Merge branch '3.14-fixes' into mips-for-linux-next
authorRalf Baechle <ralf@linux-mips.org>
Mon, 31 Mar 2014 16:17:33 +0000 (18:17 +0200)
committerRalf Baechle <ralf@linux-mips.org>
Mon, 31 Mar 2014 16:17:33 +0000 (18:17 +0200)
179 files changed:
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/alchemy/Kconfig
arch/mips/alchemy/Platform
arch/mips/alchemy/common/setup.c
arch/mips/alchemy/common/sleeper.S
arch/mips/alchemy/devboards/Makefile
arch/mips/alchemy/devboards/db1000.c
arch/mips/alchemy/devboards/db1200.c
arch/mips/alchemy/devboards/db1235.c [deleted file]
arch/mips/alchemy/devboards/db1300.c
arch/mips/alchemy/devboards/db1550.c
arch/mips/alchemy/devboards/db1xxx.c [new file with mode: 0644]
arch/mips/ar7/time.c
arch/mips/bcm47xx/Makefile
arch/mips/bcm47xx/bcm47xx_private.h
arch/mips/bcm47xx/board.c
arch/mips/bcm47xx/buttons.c
arch/mips/bcm47xx/leds.c
arch/mips/bcm47xx/setup.c
arch/mips/bcm47xx/workarounds.c [new file with mode: 0644]
arch/mips/bcm63xx/cpu.c
arch/mips/configs/db1000_defconfig [deleted file]
arch/mips/configs/db1235_defconfig [deleted file]
arch/mips/configs/db1xxx_defconfig [new file with mode: 0644]
arch/mips/configs/loongson3_defconfig [new file with mode: 0644]
arch/mips/configs/malta_defconfig
arch/mips/configs/malta_kvm_defconfig
arch/mips/configs/malta_kvm_guest_defconfig
arch/mips/configs/maltaaprp_defconfig
arch/mips/configs/maltasmtc_defconfig
arch/mips/configs/maltasmvp_defconfig
arch/mips/configs/maltasmvp_eva_defconfig [new file with mode: 0644]
arch/mips/configs/maltaup_defconfig
arch/mips/include/asm/asm-eva.h [new file with mode: 0644]
arch/mips/include/asm/asm.h
arch/mips/include/asm/asmmacro-32.h
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/atomic.h
arch/mips/include/asm/bitops.h
arch/mips/include/asm/bootinfo.h
arch/mips/include/asm/checksum.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu-info.h
arch/mips/include/asm/cpu-type.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/dma-mapping.h
arch/mips/include/asm/fpu.h
arch/mips/include/asm/futex.h
arch/mips/include/asm/fw/fw.h
arch/mips/include/asm/gcmpregs.h [deleted file]
arch/mips/include/asm/gic.h
arch/mips/include/asm/io.h
arch/mips/include/asm/local.h
arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
arch/mips/include/asm/mach-db1x00/db1200.h [deleted file]
arch/mips/include/asm/mach-db1x00/db1300.h [deleted file]
arch/mips/include/asm/mach-loongson/boot_param.h [new file with mode: 0644]
arch/mips/include/asm/mach-loongson/dma-coherence.h
arch/mips/include/asm/mach-loongson/irq.h [new file with mode: 0644]
arch/mips/include/asm/mach-loongson/loongson.h
arch/mips/include/asm/mach-loongson/machine.h
arch/mips/include/asm/mach-loongson/pci.h
arch/mips/include/asm/mach-loongson/spaces.h [new file with mode: 0644]
arch/mips/include/asm/mach-malta/kernel-entry-init.h
arch/mips/include/asm/mach-malta/spaces.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
arch/mips/include/asm/mips-boards/malta.h
arch/mips/include/asm/mips-boards/piix4.h
arch/mips/include/asm/mips-cm.h [new file with mode: 0644]
arch/mips/include/asm/mips-cpc.h [new file with mode: 0644]
arch/mips/include/asm/mips_mt.h
arch/mips/include/asm/mipsmtregs.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/module.h
arch/mips/include/asm/msa.h [new file with mode: 0644]
arch/mips/include/asm/page.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/ptrace.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/sigcontext.h
arch/mips/include/asm/smp-cps.h [new file with mode: 0644]
arch/mips/include/asm/smp-ops.h
arch/mips/include/asm/smp.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/syscall.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/uaccess.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/sigcontext.h
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/bmips_vec.S
arch/mips/kernel/cps-vec.S [new file with mode: 0644]
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/ftrace.c
arch/mips/kernel/genex.S
arch/mips/kernel/head.S
arch/mips/kernel/idle.c
arch/mips/kernel/irq-gic.c
arch/mips/kernel/kgdb.c
arch/mips/kernel/mips-cm.c [new file with mode: 0644]
arch/mips/kernel/mips-cpc.c [new file with mode: 0644]
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/ptrace32.c
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/smp-cmp.c
arch/mips/kernel/smp-cps.c [new file with mode: 0644]
arch/mips/kernel/smp-gic.c [new file with mode: 0644]
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smtc-proc.c
arch/mips/kernel/spram.c
arch/mips/kernel/syscall.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/lasat/picvue_proc.c
arch/mips/lib/csum_partial.S
arch/mips/lib/memcpy.S
arch/mips/lib/memset.S
arch/mips/lib/strlen_user.S
arch/mips/lib/strncpy_user.S
arch/mips/lib/strnlen_user.S
arch/mips/loongson/Kconfig
arch/mips/loongson/Makefile
arch/mips/loongson/Platform
arch/mips/loongson/common/Makefile
arch/mips/loongson/common/dma-swiotlb.c [new file with mode: 0644]
arch/mips/loongson/common/env.c
arch/mips/loongson/common/init.c
arch/mips/loongson/common/machtype.c
arch/mips/loongson/common/mem.c
arch/mips/loongson/common/pci.c
arch/mips/loongson/common/reset.c
arch/mips/loongson/common/serial.c
arch/mips/loongson/common/setup.c
arch/mips/loongson/common/uart_base.c
arch/mips/loongson/loongson-3/Makefile [new file with mode: 0644]
arch/mips/loongson/loongson-3/irq.c [new file with mode: 0644]
arch/mips/loongson/loongson-3/smp.c [new file with mode: 0644]
arch/mips/loongson/loongson-3/smp.h [new file with mode: 0644]
arch/mips/math-emu/cp1emu.c
arch/mips/math-emu/kernel_linkage.c
arch/mips/mm/c-r4k.c
arch/mips/mm/cache.c
arch/mips/mm/init.c
arch/mips/mm/sc-mips.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-init.c
arch/mips/mti-malta/malta-int.c
arch/mips/mti-malta/malta-memory.c
arch/mips/mti-malta/malta-setup.c
arch/mips/mti-sead3/sead3-mtd.c
arch/mips/oprofile/common.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/Makefile
arch/mips/pci/fixup-loongson3.c [new file with mode: 0644]
arch/mips/pci/fixup-malta.c
arch/mips/pci/ops-loongson3.c [new file with mode: 0644]
arch/mips/pci/pci-alchemy.c
arch/mips/pci/pci-malta.c
arch/mips/pmcs-msp71xx/msp_setup.c
drivers/edac/octeon_edac-lmc.c
drivers/gpio/gpio-vr41xx.c
drivers/spi/spi-au1550.c

index 7111d74a01a0ecb378b0e82e86a1a18287d5ce7c..fc9be7c8c5aaf6d2baf57ff1b2ff37afec811f38 100644 (file)
@@ -10,6 +10,7 @@ config MIPS
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_ARCH_KGDB
+       select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select ARCH_HAVE_CUSTOM_GPIO_H
        select HAVE_FUNCTION_TRACER
@@ -62,6 +63,7 @@ config MIPS_ALCHEMY
        select CEVT_R4K
        select CSRC_R4K
        select IRQ_CPU
+       select DMA_MAYBE_COHERENT       # Au1000,1500,1100 aren't, rest is
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_APM_EMULATION
@@ -235,7 +237,6 @@ config MACH_JZ4740
        select IRQ_CPU
        select ARCH_REQUIRE_GPIOLIB
        select SYS_HAS_EARLY_PRINTK
-       select HAVE_PWM
        select HAVE_CLK
        select GENERIC_IRQ_CHIP
 
@@ -320,6 +321,7 @@ config MIPS_MALTA
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
+       select SYS_HAS_CPU_MIPS32_R3_5
        select SYS_HAS_CPU_MIPS64_R1
        select SYS_HAS_CPU_MIPS64_R2
        select SYS_HAS_CPU_NEVADA
@@ -329,6 +331,7 @@ config MIPS_MALTA
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_MIPS_CMP
+       select SYS_SUPPORTS_MIPS_CPS
        select SYS_SUPPORTS_MULTITHREADING
        select SYS_SUPPORTS_SMARTMIPS
        select SYS_SUPPORTS_ZBOOT
@@ -783,7 +786,6 @@ config NLM_XLP_BOARD
        select CEVT_R4K
        select CSRC_R4K
        select IRQ_CPU
-       select ARCH_SUPPORTS_MSI
        select ZONE_DMA32 if 64BIT
        select SYNC_R4K
        select SYS_HAS_EARLY_PRINTK
@@ -869,6 +871,7 @@ config CEVT_R4K
        bool
 
 config CEVT_GIC
+       select MIPS_CM
        bool
 
 config CEVT_SB1250
@@ -887,6 +890,7 @@ config CSRC_R4K
        bool
 
 config CSRC_GIC
+       select MIPS_CM
        bool
 
 config CSRC_SB1250
@@ -1031,6 +1035,7 @@ config IRQ_GT641XX
        bool
 
 config IRQ_GIC
+       select MIPS_CM
        bool
 
 config PCI_GT64XXX_PCI0
@@ -1149,6 +1154,18 @@ choice
        prompt "CPU type"
        default CPU_R4X00
 
+config CPU_LOONGSON3
+       bool "Loongson 3 CPU"
+       depends on SYS_HAS_CPU_LOONGSON3
+       select CPU_SUPPORTS_64BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_HUGEPAGES
+       select WEAK_ORDERING
+       select WEAK_REORDERING_BEYOND_LLSC
+       help
+               The Loongson 3 processor implements the MIPS64R2 instruction
+               set with many extensions.
+
 config CPU_LOONGSON2E
        bool "Loongson 2E"
        depends on SYS_HAS_CPU_LOONGSON2E
@@ -1204,6 +1221,7 @@ config CPU_MIPS32_R2
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_MSA
        select HAVE_KVM
        help
          Choose this option to build a kernel for release 2 or later of the
@@ -1239,6 +1257,7 @@ config CPU_MIPS64_R2
        select CPU_SUPPORTS_64BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
        select CPU_SUPPORTS_HUGEPAGES
+       select CPU_SUPPORTS_MSA
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS64 architecture.  Many modern embedded systems with a 64-bit
@@ -1397,7 +1416,6 @@ config CPU_CAVIUM_OCTEON
        select LIBFDT
        select USE_OF
        select USB_EHCI_BIG_ENDIAN_MMIO
-       select SYS_HAS_DMA_OPS
        select MIPS_L1_CACHE_SHIFT_7
        help
          The Cavium Octeon processor is a highly integrated chip containing
@@ -1449,6 +1467,26 @@ config CPU_XLP
          Netlogic Microsystems XLP processors.
 endchoice
 
+config CPU_MIPS32_3_5_FEATURES
+       bool "MIPS32 Release 3.5 Features"
+       depends on SYS_HAS_CPU_MIPS32_R3_5
+       depends on CPU_MIPS32_R2
+       help
+         Choose this option to build a kernel for release 2 or later of the
+         MIPS32 architecture including features from the 3.5 release such as
+         support for Enhanced Virtual Addressing (EVA).
+
+config CPU_MIPS32_3_5_EVA
+       bool "Enhanced Virtual Addressing (EVA)"
+       depends on CPU_MIPS32_3_5_FEATURES
+       select EVA
+       default y
+       help
+         Choose this option if you want to enable the Enhanced Virtual
+         Addressing (EVA) on your MIPS32 core (such as proAptiv).
+         One of its primary benefits is an increase in the maximum size
+         of lowmem (up to 3GB). If unsure, say 'N' here.
+
 if CPU_LOONGSON2F
 config CPU_NOP_WORKAROUNDS
        bool
@@ -1524,6 +1562,10 @@ config CPU_BMIPS5000
        select SYS_SUPPORTS_SMP
        select SYS_SUPPORTS_HOTPLUG_CPU
 
+config SYS_HAS_CPU_LOONGSON3
+       bool
+       select CPU_SUPPORTS_CPUFREQ
+
 config SYS_HAS_CPU_LOONGSON2E
        bool
 
@@ -1542,6 +1584,9 @@ config SYS_HAS_CPU_MIPS32_R1
 config SYS_HAS_CPU_MIPS32_R2
        bool
 
+config SYS_HAS_CPU_MIPS32_R3_5
+       bool
+
 config SYS_HAS_CPU_MIPS64_R1
        bool
 
@@ -1658,6 +1703,9 @@ config CPU_MIPSR2
        bool
        default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
 
+config EVA
+       bool
+
 config SYS_SUPPORTS_32BIT_KERNEL
        bool
 config SYS_SUPPORTS_64BIT_KERNEL
@@ -1730,7 +1778,7 @@ choice
 
 config PAGE_SIZE_4KB
        bool "4kB"
-       depends on !CPU_LOONGSON2
+       depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
        help
         This option select the standard 4kB Linux page size.  On some
         R3000-family processors this is the only available page size.  Using
@@ -1871,6 +1919,7 @@ config MIPS_MT_SMP
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select SYNC_R4K
+       select MIPS_GIC_IPI
        select MIPS_MT
        select SMP
        select SMP_UP
@@ -1888,6 +1937,7 @@ config MIPS_MT_SMTC
        bool "Use all TCs on all VPEs for SMP (DEPRECATED)"
        depends on CPU_MIPS32_R2
        depends on SYS_SUPPORTS_MULTITHREADING
+       depends on !MIPS_CPS
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select MIPS_MT
@@ -1995,13 +2045,45 @@ config MIPS_VPE_APSP_API_MT
        depends on MIPS_VPE_APSP_API && !MIPS_CMP
 
 config MIPS_CMP
-       bool "MIPS CMP support"
-       depends on SYS_SUPPORTS_MIPS_CMP && MIPS_MT_SMP
+       bool "MIPS CMP framework support (DEPRECATED)"
+       depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC
+       select MIPS_GIC_IPI
        select SYNC_R4K
        select WEAK_ORDERING
        default n
        help
-         Enable Coherency Manager processor (CMP) support.
+         Select this if you are using a bootloader which implements the "CMP
+         framework" protocol (ie. YAMON) and want your kernel to make use of
+         its ability to start secondary CPUs.
+
+         Unless you have a specific need, you should use CONFIG_MIPS_CPS
+         instead of this.
+
+config MIPS_CPS
+       bool "MIPS Coherent Processing System support"
+       depends on SYS_SUPPORTS_MIPS_CPS
+       select MIPS_CM
+       select MIPS_CPC
+       select MIPS_GIC_IPI
+       select SMP
+       select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
+       select SYS_SUPPORTS_SMP
+       select WEAK_ORDERING
+       help
+         Select this if you wish to run an SMP kernel across multiple cores
+         within a MIPS Coherent Processing System. When this option is
+         enabled the kernel will probe for other cores and boot them with
+         no external assistance. It is safe to enable this when hardware
+         support is unavailable.
+
+config MIPS_GIC_IPI
+       bool
+
+config MIPS_CM
+       bool
+
+config MIPS_CPC
+       bool
 
 config SB1_PASS_1_WORKAROUNDS
        bool
@@ -2044,6 +2126,21 @@ config CPU_MICROMIPS
          When this option is enabled the kernel will be built using the
          microMIPS ISA
 
+config CPU_HAS_MSA
+       bool "Support for the MIPS SIMD Architecture"
+       depends on CPU_SUPPORTS_MSA
+       default y
+       help
+         MIPS SIMD Architecture (MSA) introduces 128 bit wide vector registers
+         and a set of SIMD instructions to operate on them. When this option
+         is enabled the kernel will support allocating & switching MSA
+         vector register contexts. If you know that your kernel will only be
+         running on CPUs which do not support MSA or that your userland will
+         not be making use of it then you may wish to say N here to reduce
+         the size & complexity of your kernel.
+
+         If unsure, say Y.
+
 config CPU_HAS_WB
        bool
 
@@ -2095,7 +2192,7 @@ config CPU_R4400_WORKAROUNDS
 #
 config HIGHMEM
        bool "High Memory Support"
-       depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
+       depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
 
 config CPU_SUPPORTS_HIGHMEM
        bool
@@ -2109,6 +2206,9 @@ config SYS_SUPPORTS_SMARTMIPS
 config SYS_SUPPORTS_MICROMIPS
        bool
 
+config CPU_SUPPORTS_MSA
+       bool
+
 config ARCH_FLATMEM_ENABLE
        def_bool y
        depends on !NUMA && !CPU_LOONGSON2
@@ -2182,6 +2282,9 @@ config SMP_UP
 config SYS_SUPPORTS_MIPS_CMP
        bool
 
+config SYS_SUPPORTS_MIPS_CPS
+       bool
+
 config SYS_SUPPORTS_SMP
        bool
 
@@ -2414,6 +2517,17 @@ config PCI
          your box. Other bus systems are ISA, EISA, or VESA. If you have PCI,
          say Y, otherwise N.
 
+config HT_PCI
+       bool "Support for HT-linked PCI"
+       default y
+       depends on CPU_LOONGSON3
+       select PCI
+       select PCI_DOMAINS
+       help
+         Loongson family machines use Hyper-Transport bus for inter-core
+         connection and device connection. The PCI bus is a subordinate
+         linked at HT. Choose Y for Loongson-3 based machines.
+
 config PCI_DOMAINS
        bool
 
index 9b8556de99937698653ade0f0ddbca55b7f198d9..1a5b4032cb662b4db660acebba0cacb7752a66f6 100644 (file)
@@ -119,6 +119,11 @@ cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
 
+ifeq ($(CONFIG_CPU_HAS_MSA),y)
+toolchain-msa                  := $(call cc-option-yn,-mhard-float -mfp64 -mmsa)
+cflags-$(toolchain-msa)                += -DTOOLCHAIN_SUPPORTS_MSA
+endif
+
 #
 # CPU-dependent compiler/assembler options for optimization.
 #
index 7032ac7ecd1bb2f8083b848ca5ca01572dc4cd44..b9628983d62097cbfb6bcd842f6165118993cfeb 100644 (file)
@@ -16,36 +16,29 @@ config ALCHEMY_GPIO_INDIRECT
 choice
        prompt "Machine type"
        depends on MIPS_ALCHEMY
-       default MIPS_DB1000
+       default MIPS_DB1XXX
 
 config MIPS_MTX1
        bool "4G Systems MTX-1 board"
-       select DMA_NONCOHERENT
        select HW_HAS_PCI
        select ALCHEMY_GPIOINT_AU1000
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
 
-config MIPS_DB1000
-       bool "Alchemy DB1000/DB1500/DB1100 PB1500/1100 boards"
-       select ALCHEMY_GPIOINT_AU1000
-       select DMA_NONCOHERENT
-       select HW_HAS_PCI
-       select SYS_SUPPORTS_BIG_ENDIAN
-       select SYS_SUPPORTS_LITTLE_ENDIAN
-       select SYS_HAS_EARLY_PRINTK
-
-config MIPS_DB1235
-       bool "Alchemy DB1200/PB1200/DB1300/DB1550/PB1550 boards"
+config MIPS_DB1XXX
+       bool "Alchemy DB1XXX / PB1XXX boards"
        select ARCH_REQUIRE_GPIOLIB
        select HW_HAS_PCI
-       select DMA_COHERENT
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
+       help
+         Select this option if you have one of the following Alchemy
+         development boards:  DB1000 DB1500 DB1100 DB1550 DB1200 DB1300
+                              PB1500 PB1100 PB1550 PB1200
+         Board type is autodetected during boot.
 
 config MIPS_XXS1500
        bool "MyCable XXS1500 board"
-       select DMA_NONCOHERENT
        select ALCHEMY_GPIOINT_AU1000
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
@@ -54,7 +47,6 @@ config MIPS_GPR
        bool "Trapeze ITS GPR board"
        select ALCHEMY_GPIOINT_AU1000
        select HW_HAS_PCI
-       select DMA_NONCOHERENT
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
 
index b3afcdd8d77af9380c154227a17c6a5ee944d8d2..33c9da3b077ba70d5abf3945d949835e0b9373bf 100644 (file)
@@ -5,18 +5,12 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
 
 
 #
-# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards
+# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100
+#             Db1550/Pb1550/Db1200/Pb1200/Db1300
 #
-platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/
-cflags-$(CONFIG_MIPS_DB1000)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
-load-$(CONFIG_MIPS_DB1000)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
-#
-platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/
-cflags-$(CONFIG_MIPS_DB1235)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
-load-$(CONFIG_MIPS_DB1235)     += 0xffffffff80100000
+platform-$(CONFIG_MIPS_DB1XXX) += alchemy/devboards/
+cflags-$(CONFIG_MIPS_DB1XXX)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
+load-$(CONFIG_MIPS_DB1XXX)     += 0xffffffff80100000
 
 #
 # 4G-Systems MTX-1 "MeshCube" wireless router
index 62b4e7bbeab9efde6e40953d34ef1f44bfbda02b..566a1743f68537f6118e0a29d9ca684d3ba955aa 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/jiffies.h>
 #include <linux/module.h>
 
+#include <asm/dma-coherence.h>
 #include <asm/mipsregs.h>
 #include <asm/time.h>
 
@@ -59,6 +60,15 @@ void __init plat_mem_setup(void)
                /* Clear to obtain best system bus performance */
                clear_c0_config(1 << 19); /* Clear Config[OD] */
 
+       hw_coherentio = 0;
+       coherentio = 1;
+       switch (alchemy_get_cputype()) {
+       case ALCHEMY_CPU_AU1000:
+       case ALCHEMY_CPU_AU1500:
+       case ALCHEMY_CPU_AU1100:
+               coherentio = 0;
+       }
+
        board_setup();  /* board specific setup */
 
        /* IO/MEM resources. */
index 706d933e00855ef507fafd13abf42539eea20084..c73d81270b42000202e41965fbacde519ec8c40f 100644 (file)
@@ -95,7 +95,7 @@ LEAF(alchemy_sleep_au1000)
 
        /* cache following instructions, as memory gets put to sleep */
        la      t0, 1f
-       .set    mips3
+       .set    arch=r4000
        cache   0x14, 0(t0)
        cache   0x14, 32(t0)
        cache   0x14, 64(t0)
@@ -121,7 +121,7 @@ LEAF(alchemy_sleep_au1550)
 
        /* cache following instructions, as memory gets put to sleep */
        la      t0, 1f
-       .set    mips3
+       .set    arch=r4000
        cache   0x14, 0(t0)
        cache   0x14, 32(t0)
        cache   0x14, 64(t0)
@@ -163,7 +163,7 @@ LEAF(alchemy_sleep_au1300)
        la      t1, 4f
        subu    t2, t1, t0
 
-       .set    mips3
+       .set    arch=r4000
 
 1:     cache   0x14, 0(t0)
        subu    t2, t2, 32
index 15bf7306648b3c269bcb97cdf5441147be089e3e..9da3659a9d1ca7eda8a34a66e903fd60e7552230 100644 (file)
@@ -2,7 +2,5 @@
 # Alchemy Develboards
 #
 
-obj-y += bcsr.o platform.o
+obj-y += bcsr.o platform.o db1000.o db1200.o db1300.o db1550.o db1xxx.o
 obj-$(CONFIG_PM)               += pm.o
-obj-$(CONFIG_MIPS_DB1000)      += db1000.o
-obj-$(CONFIG_MIPS_DB1235)      += db1235.o db1200.o db1300.o db1550.o
index 5483906e0f86d19acafb160596c673e83481e8ff..92dd929d40575ed4bcff00392bdd4c347fcec628 100644 (file)
 
 #define F_SWAPPED (bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT)
 
-struct pci_dev;
+const char *get_system_type(void);
 
-static const char *board_type_str(void)
+int __init db1000_board_setup(void)
 {
+       /* initialize board register space */
+       bcsr_init(DB1000_BCSR_PHYS_ADDR,
+                 DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS);
+
        switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
        case BCSR_WHOAMI_DB1000:
-               return "DB1000";
        case BCSR_WHOAMI_DB1500:
-               return "DB1500";
        case BCSR_WHOAMI_DB1100:
-               return "DB1100";
        case BCSR_WHOAMI_PB1500:
        case BCSR_WHOAMI_PB1500R2:
-               return "PB1500";
        case BCSR_WHOAMI_PB1100:
-               return "PB1100";
-       default:
-               return "(unknown)";
+               pr_info("AMD Alchemy %s Board\n", get_system_type());
+               return 0;
        }
+       return -ENODEV;
 }
 
-const char *get_system_type(void)
-{
-       return board_type_str();
-}
-
-void __init board_setup(void)
-{
-       /* initialize board register space */
-       bcsr_init(DB1000_BCSR_PHYS_ADDR,
-                 DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS);
-
-       printk(KERN_INFO "AMD Alchemy %s Board\n", board_type_str());
-}
-
-
 static int db1500_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
 {
        if ((slot < 12) || (slot > 13) || pin == 0)
@@ -114,17 +99,10 @@ static struct platform_device db1500_pci_host_dev = {
        .resource       = alchemy_pci_host_res,
 };
 
-static int __init db1500_pci_init(void)
+int __init db1500_pci_setup(void)
 {
-       int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
-       if ((id == BCSR_WHOAMI_DB1500) || (id == BCSR_WHOAMI_PB1500) ||
-           (id == BCSR_WHOAMI_PB1500R2))
-               return platform_device_register(&db1500_pci_host_dev);
-       return 0;
+       return platform_device_register(&db1500_pci_host_dev);
 }
-/* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */
-arch_initcall(db1500_pci_init);
-
 
 static struct resource au1100_lcd_resources[] = {
        [0] = {
@@ -513,7 +491,7 @@ static struct platform_device *db1100_devs[] = {
        &db1000_irda_dev,
 };
 
-static int __init db1000_dev_init(void)
+int __init db1000_dev_setup(void)
 {
        int board = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
        int c0, c1, d0, d1, s0, s1, flashsize = 32,  twosocks = 1;
@@ -623,4 +601,3 @@ static int __init db1000_dev_init(void)
        db1x_register_norflash(flashsize << 20, 4 /* 32bit */, F_SWAPPED);
        return 0;
 }
-device_initcall(db1000_dev_init);
index a84d98b8f96e8fbe2abfb4652f03c579ff3f3ab1..9e46667f2597b7526fb4c974b8c65a1e56537ded 100644 (file)
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 #include <linux/smc91x.h>
+#include <linux/ata_platform.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/au1100_mmc.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1xxx_psc.h>
 #include <asm/mach-au1x00/au1200fb.h>
 #include <asm/mach-au1x00/au1550_spi.h>
 #include <asm/mach-db1x00/bcsr.h>
-#include <asm/mach-db1x00/db1200.h>
 
 #include "platform.h"
 
+#define BCSR_INT_IDE           0x0001
+#define BCSR_INT_ETH           0x0002
+#define BCSR_INT_PC0           0x0004
+#define BCSR_INT_PC0STSCHG     0x0008
+#define BCSR_INT_PC1           0x0010
+#define BCSR_INT_PC1STSCHG     0x0020
+#define BCSR_INT_DC            0x0040
+#define BCSR_INT_FLASHBUSY     0x0080
+#define BCSR_INT_PC0INSERT     0x0100
+#define BCSR_INT_PC0EJECT      0x0200
+#define BCSR_INT_PC1INSERT     0x0400
+#define BCSR_INT_PC1EJECT      0x0800
+#define BCSR_INT_SD0INSERT     0x1000
+#define BCSR_INT_SD0EJECT      0x2000
+#define BCSR_INT_SD1INSERT     0x4000
+#define BCSR_INT_SD1EJECT      0x8000
+
+#define DB1200_IDE_PHYS_ADDR   0x18800000
+#define DB1200_IDE_REG_SHIFT   5
+#define DB1200_IDE_PHYS_LEN    (16 << DB1200_IDE_REG_SHIFT)
+#define DB1200_ETH_PHYS_ADDR   0x19000300
+#define DB1200_NAND_PHYS_ADDR  0x20000000
+
+#define PB1200_IDE_PHYS_ADDR   0x0C800000
+#define PB1200_ETH_PHYS_ADDR   0x0D000300
+#define PB1200_NAND_PHYS_ADDR  0x1C000000
+
+#define DB1200_INT_BEGIN       (AU1000_MAX_INTR + 1)
+#define DB1200_IDE_INT         (DB1200_INT_BEGIN + 0)
+#define DB1200_ETH_INT         (DB1200_INT_BEGIN + 1)
+#define DB1200_PC0_INT         (DB1200_INT_BEGIN + 2)
+#define DB1200_PC0_STSCHG_INT  (DB1200_INT_BEGIN + 3)
+#define DB1200_PC1_INT         (DB1200_INT_BEGIN + 4)
+#define DB1200_PC1_STSCHG_INT  (DB1200_INT_BEGIN + 5)
+#define DB1200_DC_INT          (DB1200_INT_BEGIN + 6)
+#define DB1200_FLASHBUSY_INT   (DB1200_INT_BEGIN + 7)
+#define DB1200_PC0_INSERT_INT  (DB1200_INT_BEGIN + 8)
+#define DB1200_PC0_EJECT_INT   (DB1200_INT_BEGIN + 9)
+#define DB1200_PC1_INSERT_INT  (DB1200_INT_BEGIN + 10)
+#define DB1200_PC1_EJECT_INT   (DB1200_INT_BEGIN + 11)
+#define DB1200_SD0_INSERT_INT  (DB1200_INT_BEGIN + 12)
+#define DB1200_SD0_EJECT_INT   (DB1200_INT_BEGIN + 13)
+#define PB1200_SD1_INSERT_INT  (DB1200_INT_BEGIN + 14)
+#define PB1200_SD1_EJECT_INT   (DB1200_INT_BEGIN + 15)
+#define DB1200_INT_END         (DB1200_INT_BEGIN + 15)
+
 const char *get_system_type(void);
 
 static int __init db1200_detect_board(void)
@@ -89,6 +136,15 @@ int __init db1200_board_setup(void)
                return -ENODEV;
 
        whoami = bcsr_read(BCSR_WHOAMI);
+       switch (BCSR_WHOAMI_BOARD(whoami)) {
+       case BCSR_WHOAMI_PB1200_DDR1:
+       case BCSR_WHOAMI_PB1200_DDR2:
+       case BCSR_WHOAMI_DB1200:
+               break;
+       default:
+               return -ENODEV;
+       }
+
        printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d"
                "  Board-ID %d  Daughtercard ID %d\n", get_system_type(),
                (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
@@ -275,32 +331,38 @@ static struct platform_device db1200_eth_dev = {
 
 /**********************************************************************/
 
+static struct pata_platform_info db1200_ide_info = {
+       .ioport_shift   = DB1200_IDE_REG_SHIFT,
+};
+
+#define IDE_ALT_START  (14 << DB1200_IDE_REG_SHIFT)
 static struct resource db1200_ide_res[] = {
        [0] = {
                .start  = DB1200_IDE_PHYS_ADDR,
-               .end    = DB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1,
+               .end    = DB1200_IDE_PHYS_ADDR + IDE_ALT_START - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
+               .start  = DB1200_IDE_PHYS_ADDR + IDE_ALT_START,
+               .end    = DB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [2] = {
                .start  = DB1200_IDE_INT,
                .end    = DB1200_IDE_INT,
                .flags  = IORESOURCE_IRQ,
        },
-       [2] = {
-               .start  = AU1200_DSCR_CMD0_DMA_REQ1,
-               .end    = AU1200_DSCR_CMD0_DMA_REQ1,
-               .flags  = IORESOURCE_DMA,
-       },
 };
 
 static u64 au1200_ide_dmamask = DMA_BIT_MASK(32);
 
 static struct platform_device db1200_ide_dev = {
-       .name           = "au1200-ide",
+       .name           = "pata_platform",
        .id             = 0,
        .dev = {
                .dma_mask               = &au1200_ide_dmamask,
                .coherent_dma_mask      = DMA_BIT_MASK(32),
+               .platform_data          = &db1200_ide_info,
        },
        .num_resources  = ARRAY_SIZE(db1200_ide_res),
        .resource       = db1200_ide_res,
diff --git a/arch/mips/alchemy/devboards/db1235.c b/arch/mips/alchemy/devboards/db1235.c
deleted file mode 100644 (file)
index bac19dc..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * DB1200/PB1200 / DB1550 / DB1300 board support.
- *
- * These 4 boards can reliably be supported in a single kernel image.
- */
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-db1x00/bcsr.h>
-
-int __init db1200_board_setup(void);
-int __init db1200_dev_setup(void);
-int __init db1300_board_setup(void);
-int __init db1300_dev_setup(void);
-int __init db1550_board_setup(void);
-int __init db1550_dev_setup(void);
-int __init db1550_pci_setup(int);
-
-static const char *board_type_str(void)
-{
-       switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
-       case BCSR_WHOAMI_PB1200_DDR1:
-       case BCSR_WHOAMI_PB1200_DDR2:
-               return "PB1200";
-       case BCSR_WHOAMI_DB1200:
-               return "DB1200";
-       case BCSR_WHOAMI_DB1300:
-               return "DB1300";
-       case BCSR_WHOAMI_DB1550:
-               return "DB1550";
-       case BCSR_WHOAMI_PB1550_SDR:
-       case BCSR_WHOAMI_PB1550_DDR:
-               return "PB1550";
-       default:
-               return "(unknown)";
-       }
-}
-
-const char *get_system_type(void)
-{
-       return board_type_str();
-}
-
-void __init board_setup(void)
-{
-       int ret;
-
-       switch (alchemy_get_cputype()) {
-       case ALCHEMY_CPU_AU1550:
-               ret = db1550_board_setup();
-               break;
-       case ALCHEMY_CPU_AU1200:
-               ret = db1200_board_setup();
-               break;
-       case ALCHEMY_CPU_AU1300:
-               ret = db1300_board_setup();
-               break;
-       default:
-               pr_err("unsupported CPU on board\n");
-               ret = -ENODEV;
-       }
-       if (ret)
-               panic("cannot initialize board support");
-}
-
-int __init db1235_arch_init(void)
-{
-       int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
-       if (id == BCSR_WHOAMI_DB1550)
-               return db1550_pci_setup(0);
-       else if ((id == BCSR_WHOAMI_PB1550_SDR) ||
-                (id == BCSR_WHOAMI_PB1550_DDR))
-               return db1550_pci_setup(1);
-
-       return 0;
-}
-arch_initcall(db1235_arch_init);
-
-int __init db1235_dev_init(void)
-{
-       switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
-       case BCSR_WHOAMI_PB1200_DDR1:
-       case BCSR_WHOAMI_PB1200_DDR2:
-       case BCSR_WHOAMI_DB1200:
-               return db1200_dev_setup();
-       case BCSR_WHOAMI_DB1300:
-               return db1300_dev_setup();
-       case BCSR_WHOAMI_DB1550:
-       case BCSR_WHOAMI_PB1550_SDR:
-       case BCSR_WHOAMI_PB1550_DDR:
-               return db1550_dev_setup();
-       }
-       return 0;
-}
-device_initcall(db1235_dev_init);
index 6167e73eef9cb4d2e305109fbf0351a0373b6d76..1aed6be4de108c5e6819b79b450a21fd22f01f36 100644 (file)
 #include <asm/mach-au1x00/au1200fb.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
 #include <asm/mach-au1x00/au1xxx_psc.h>
-#include <asm/mach-db1x00/db1300.h>
 #include <asm/mach-db1x00/bcsr.h>
 #include <asm/mach-au1x00/prom.h>
 
 #include "platform.h"
 
+/* FPGA (external mux) interrupt sources */
+#define DB1300_FIRST_INT       (ALCHEMY_GPIC_INT_LAST + 1)
+#define DB1300_IDE_INT         (DB1300_FIRST_INT + 0)
+#define DB1300_ETH_INT         (DB1300_FIRST_INT + 1)
+#define DB1300_CF_INT          (DB1300_FIRST_INT + 2)
+#define DB1300_VIDEO_INT       (DB1300_FIRST_INT + 4)
+#define DB1300_HDMI_INT                (DB1300_FIRST_INT + 5)
+#define DB1300_DC_INT          (DB1300_FIRST_INT + 6)
+#define DB1300_FLASH_INT       (DB1300_FIRST_INT + 7)
+#define DB1300_CF_INSERT_INT   (DB1300_FIRST_INT + 8)
+#define DB1300_CF_EJECT_INT    (DB1300_FIRST_INT + 9)
+#define DB1300_AC97_INT                (DB1300_FIRST_INT + 10)
+#define DB1300_AC97_PEN_INT    (DB1300_FIRST_INT + 11)
+#define DB1300_SD1_INSERT_INT  (DB1300_FIRST_INT + 12)
+#define DB1300_SD1_EJECT_INT   (DB1300_FIRST_INT + 13)
+#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
+#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
+#define DB1300_LAST_INT                (DB1300_FIRST_INT + 15)
+
+/* SMSC9210 CS */
+#define DB1300_ETH_PHYS_ADDR   0x19000000
+#define DB1300_ETH_PHYS_END    0x197fffff
+
+/* ATA CS */
+#define DB1300_IDE_PHYS_ADDR   0x18800000
+#define DB1300_IDE_REG_SHIFT   5
+#define DB1300_IDE_PHYS_LEN    (16 << DB1300_IDE_REG_SHIFT)
+
+/* NAND CS */
+#define DB1300_NAND_PHYS_ADDR  0x20000000
+#define DB1300_NAND_PHYS_END   0x20000fff
+
+
 static struct i2c_board_info db1300_i2c_devs[] __initdata = {
        { I2C_BOARD_INFO("wm8731", 0x1b), },    /* I2S audio codec */
        { I2C_BOARD_INFO("ne1619", 0x2d), },    /* adm1025-compat hwmon */
@@ -759,11 +791,15 @@ int __init db1300_board_setup(void)
 {
        unsigned short whoami;
 
-       db1300_gpio_config();
        bcsr_init(DB1300_BCSR_PHYS_ADDR,
                  DB1300_BCSR_PHYS_ADDR + DB1300_BCSR_HEXLED_OFS);
 
        whoami = bcsr_read(BCSR_WHOAMI);
+       if (BCSR_WHOAMI_BOARD(whoami) != BCSR_WHOAMI_DB1300)
+               return -ENODEV;
+
+       db1300_gpio_config();
+
        printk(KERN_INFO "NetLogic DBAu1300 Development Platform.\n\t"
                "BoardID %d   CPLD Rev %d   DaughtercardID %d\n",
                BCSR_WHOAMI_BOARD(whoami), BCSR_WHOAMI_CPLD(whoami),
index 016cddacd7ea34bb40717c183d94f12f99b75050..bbd8d988470290acef6b2d7a7c845d89eefcea7c 100644 (file)
@@ -62,10 +62,16 @@ int __init db1550_board_setup(void)
                  DB1550_BCSR_PHYS_ADDR + DB1550_BCSR_HEXLED_OFS);
 
        whoami = bcsr_read(BCSR_WHOAMI); /* PB1550 hexled offset differs */
-       if ((BCSR_WHOAMI_BOARD(whoami) == BCSR_WHOAMI_PB1550_SDR) ||
-           (BCSR_WHOAMI_BOARD(whoami) == BCSR_WHOAMI_PB1550_DDR))
+       switch (BCSR_WHOAMI_BOARD(whoami)) {
+       case BCSR_WHOAMI_PB1550_SDR:
+       case BCSR_WHOAMI_PB1550_DDR:
                bcsr_init(PB1550_BCSR_PHYS_ADDR,
                          PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS);
+       case BCSR_WHOAMI_DB1550:
+               break;
+       default:
+               return -ENODEV;
+       }
 
        pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d  "       \
                "Daughtercard ID %d\n", get_system_type(),
diff --git a/arch/mips/alchemy/devboards/db1xxx.c b/arch/mips/alchemy/devboards/db1xxx.c
new file mode 100644 (file)
index 0000000..2d47f95
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Alchemy DB/PB1xxx board support.
+ */
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+int __init db1000_board_setup(void);
+int __init db1000_dev_setup(void);
+int __init db1500_pci_setup(void);
+int __init db1200_board_setup(void);
+int __init db1200_dev_setup(void);
+int __init db1300_board_setup(void);
+int __init db1300_dev_setup(void);
+int __init db1550_board_setup(void);
+int __init db1550_dev_setup(void);
+int __init db1550_pci_setup(int);
+
+static const char *board_type_str(void)
+{
+       switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
+       case BCSR_WHOAMI_DB1000:
+               return "DB1000";
+       case BCSR_WHOAMI_DB1500:
+               return "DB1500";
+       case BCSR_WHOAMI_DB1100:
+               return "DB1100";
+       case BCSR_WHOAMI_PB1500:
+       case BCSR_WHOAMI_PB1500R2:
+               return "PB1500";
+       case BCSR_WHOAMI_PB1100:
+               return "PB1100";
+       case BCSR_WHOAMI_PB1200_DDR1:
+       case BCSR_WHOAMI_PB1200_DDR2:
+               return "PB1200";
+       case BCSR_WHOAMI_DB1200:
+               return "DB1200";
+       case BCSR_WHOAMI_DB1300:
+               return "DB1300";
+       case BCSR_WHOAMI_DB1550:
+               return "DB1550";
+       case BCSR_WHOAMI_PB1550_SDR:
+       case BCSR_WHOAMI_PB1550_DDR:
+               return "PB1550";
+       default:
+               return "(unknown)";
+       }
+}
+
+const char *get_system_type(void)
+{
+       return board_type_str();
+}
+
+void __init board_setup(void)
+{
+       int ret;
+
+       switch (alchemy_get_cputype()) {
+       case ALCHEMY_CPU_AU1000:
+       case ALCHEMY_CPU_AU1500:
+       case ALCHEMY_CPU_AU1100:
+               ret = db1000_board_setup();
+               break;
+       case ALCHEMY_CPU_AU1550:
+               ret = db1550_board_setup();
+               break;
+       case ALCHEMY_CPU_AU1200:
+               ret = db1200_board_setup();
+               break;
+       case ALCHEMY_CPU_AU1300:
+               ret = db1300_board_setup();
+               break;
+       default:
+               pr_err("unsupported CPU on board\n");
+               ret = -ENODEV;
+       }
+       if (ret)
+               panic("cannot initialize board support");
+}
+
+static int __init db1xxx_arch_init(void)
+{
+       int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
+       if (id == BCSR_WHOAMI_DB1550)
+               return db1550_pci_setup(0);
+       else if ((id == BCSR_WHOAMI_PB1550_SDR) ||
+                (id == BCSR_WHOAMI_PB1550_DDR))
+               return db1550_pci_setup(1);
+       else if ((id == BCSR_WHOAMI_DB1500) || (id == BCSR_WHOAMI_PB1500) ||
+                (id == BCSR_WHOAMI_PB1500R2))
+               return db1500_pci_setup();
+
+       return 0;
+}
+arch_initcall(db1xxx_arch_init);
+
+static int __init db1xxx_dev_init(void)
+{
+       switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
+       case BCSR_WHOAMI_DB1000:
+       case BCSR_WHOAMI_DB1500:
+       case BCSR_WHOAMI_DB1100:
+       case BCSR_WHOAMI_PB1500:
+       case BCSR_WHOAMI_PB1500R2:
+       case BCSR_WHOAMI_PB1100:
+               return db1000_dev_setup();
+       case BCSR_WHOAMI_PB1200_DDR1:
+       case BCSR_WHOAMI_PB1200_DDR2:
+       case BCSR_WHOAMI_DB1200:
+               return db1200_dev_setup();
+       case BCSR_WHOAMI_DB1300:
+               return db1300_dev_setup();
+       case BCSR_WHOAMI_DB1550:
+       case BCSR_WHOAMI_PB1550_SDR:
+       case BCSR_WHOAMI_PB1550_DDR:
+               return db1550_dev_setup();
+       }
+       return 0;
+}
+device_initcall(db1xxx_dev_init);
index 1dc6c3b37f91dd086505276373d44687474531dd..22c93213b233db0090a2c8175cee864f2cf62619 100644 (file)
@@ -18,6 +18,7 @@
  * Setting up the clock on the MIPS boards.
  */
 
+#include <linux/init.h>
 #include <linux/time.h>
 #include <linux/err.h>
 #include <linux/clk.h>
index 4688b6a6211b7adeaf22ef8b4ba57f1b5c18ebbd..d58c51b5e501b81e6a528a107e2082b2a6694795 100644 (file)
@@ -4,4 +4,4 @@
 #
 
 obj-y                          += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
-obj-y                          += board.o buttons.o leds.o
+obj-y                          += board.o buttons.o leds.o workarounds.o
index 5c94acebf76a7223a5cda0e640ac36ed796f0279..0194c3b9a729acd979dfe858b45d96ada5d49f03 100644 (file)
@@ -9,4 +9,7 @@ int __init bcm47xx_buttons_register(void);
 /* leds.c */
 void __init bcm47xx_leds_register(void);
 
+/* workarounds.c */
+void __init bcm47xx_workarounds(void);
+
 #endif
index cdd8246f92b33f8494712247a16348a8b41f27f1..44ab1be68c3cda8d05111766d77ee5e5865b92bc 100644 (file)
@@ -72,7 +72,11 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initcons
        {{BCM47XX_BOARD_ASUS_WL500W, "Asus WL500W"}, "WL500gW-"},
        {{BCM47XX_BOARD_ASUS_WL520GC, "Asus WL520GC"}, "WL520GC-"},
        {{BCM47XX_BOARD_ASUS_WL520GU, "Asus WL520GU"}, "WL520GU-"},
+       {{BCM47XX_BOARD_BELKIN_F7D3301, "Belkin F7D3301"}, "F7D3301"},
+       {{BCM47XX_BOARD_BELKIN_F7D3302, "Belkin F7D3302"}, "F7D3302"},
        {{BCM47XX_BOARD_BELKIN_F7D4301, "Belkin F7D4301"}, "F7D4301"},
+       {{BCM47XX_BOARD_BELKIN_F7D4302, "Belkin F7D4302"}, "F7D4302"},
+       {{BCM47XX_BOARD_BELKIN_F7D4401, "Belkin F7D4401"}, "F7D4401"},
        { {0}, NULL},
 };
 
@@ -176,7 +180,16 @@ struct bcm47xx_board_type_list3 bcm47xx_board_list_board[] __initconst = {
        {{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"},
        {{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"},
        {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "0x04CF", "3500", "02"},
-       {{BCM47XX_BOARD_LINKSYS_WRT54GSV1, "Linksys WRT54GS V1"}, "0x0101", "42", "0x10"},
+       {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0101", "42", "0x10"},
+       {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0467", "42", "0x10"},
+       {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0708", "42", "0x10"},
+       { {0}, NULL},
+};
+
+/* boardtype, boardrev */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_board_type_rev[] __initconst = {
+       {{BCM47XX_BOARD_SIEMENS_SE505V2, "Siemens SE505 V2"}, "0x0101", "0x10"},
        { {0}, NULL},
 };
 
@@ -273,6 +286,16 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
                                return &e3->board;
                }
        }
+
+       if (bcm47xx_nvram_getenv("boardtype", buf1, sizeof(buf1)) >= 0 &&
+           bcm47xx_nvram_getenv("boardrev", buf2, sizeof(buf2)) >= 0 &&
+           bcm47xx_nvram_getenv("boardnum", buf3, sizeof(buf3)) ==  -ENOENT) {
+               for (e2 = bcm47xx_board_list_board_type_rev; e2->value1; e2++) {
+                       if (!strcmp(buf1, e2->value1) &&
+                           !strcmp(buf2, e2->value2))
+                               return &e2->board;
+               }
+       }
        return bcm47xx_board_unknown;
 }
 
index 872c62e93e0e4c07f05c1e3b23dad6ef4efe90d9..49a1ce06844ba7a68e864ba61ab39c6ffe0434ac 100644 (file)
@@ -258,6 +258,18 @@ bcm47xx_buttons_linksys_wrt310nv1[] __initconst = {
        BCM47XX_GPIO_KEY(8, KEY_UNKNOWN),
 };
 
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt54g3gv2[] __initconst = {
+       BCM47XX_GPIO_KEY(5, KEY_WIMAX),
+       BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt54gsv1[] __initconst = {
+       BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+       BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
 static const struct gpio_keys_button
 bcm47xx_buttons_linksys_wrt610nv1[] __initconst = {
        BCM47XX_GPIO_KEY(6, KEY_RESTART),
@@ -270,6 +282,12 @@ bcm47xx_buttons_linksys_wrt610nv2[] __initconst = {
        BCM47XX_GPIO_KEY(6, KEY_RESTART),
 };
 
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrtsl54gs[] __initconst = {
+       BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+       BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
 /* Motorola */
 
 static const struct gpio_keys_button
@@ -402,7 +420,11 @@ int __init bcm47xx_buttons_register(void)
                err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wlhdd);
                break;
 
+       case BCM47XX_BOARD_BELKIN_F7D3301:
+       case BCM47XX_BOARD_BELKIN_F7D3302:
        case BCM47XX_BOARD_BELKIN_F7D4301:
+       case BCM47XX_BOARD_BELKIN_F7D4302:
+       case BCM47XX_BOARD_BELKIN_F7D4401:
                err = bcm47xx_copy_bdata(bcm47xx_buttons_belkin_f7d4301);
                break;
 
@@ -479,12 +501,21 @@ int __init bcm47xx_buttons_register(void)
        case BCM47XX_BOARD_LINKSYS_WRT310NV1:
                err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt310nv1);
                break;
+       case BCM47XX_BOARD_LINKSYS_WRT54G:
+               err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54gsv1);
+               break;
+       case BCM47XX_BOARD_LINKSYS_WRT54G3GV2:
+               err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54g3gv2);
+               break;
        case BCM47XX_BOARD_LINKSYS_WRT610NV1:
                err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv1);
                break;
        case BCM47XX_BOARD_LINKSYS_WRT610NV2:
                err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv2);
                break;
+       case BCM47XX_BOARD_LINKSYS_WRTSL54GS:
+               err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrtsl54gs);
+               break;
 
        case BCM47XX_BOARD_MOTOROLA_WE800G:
                err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_we800g);
index 647d155270664dafbff5dc737f3cd24a561f06ba..adcb547a91c394cad116fd3a6c622318bb88515f 100644 (file)
@@ -291,6 +291,21 @@ bcm47xx_leds_linksys_wrt310nv1[] __initconst = {
        BCM47XX_GPIO_LED(9, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
 };
 
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt54gsv1[] __initconst = {
+       BCM47XX_GPIO_LED(0, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+       BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+       BCM47XX_GPIO_LED(5, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+       BCM47XX_GPIO_LED(7, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
+       BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+       BCM47XX_GPIO_LED(2, "green", "3g", 0, LEDS_GPIO_DEFSTATE_OFF),
+       BCM47XX_GPIO_LED(3, "blue", "3g", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
 static const struct gpio_led
 bcm47xx_leds_linksys_wrt610nv1[] __initconst = {
        BCM47XX_GPIO_LED(0, "unk", "usb",  1, LEDS_GPIO_DEFSTATE_OFF),
@@ -308,6 +323,15 @@ bcm47xx_leds_linksys_wrt610nv2[] __initconst = {
        BCM47XX_GPIO_LED(7, "unk", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
 };
 
+static const struct gpio_led
+bcm47xx_leds_linksys_wrtsl54gs[] __initconst = {
+       BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+       BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+       BCM47XX_GPIO_LED(2, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+       BCM47XX_GPIO_LED(3, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+       BCM47XX_GPIO_LED(7, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
 /* Motorola */
 
 static const struct gpio_led
@@ -359,6 +383,14 @@ bcm47xx_leds_netgear_wnr834bv2[] __initconst = {
        BCM47XX_GPIO_LED(7, "unk", "connected", 0, LEDS_GPIO_DEFSTATE_OFF),
 };
 
+/* Siemens */
+static const struct gpio_led
+bcm47xx_leds_siemens_se505v2[] __initconst = {
+       BCM47XX_GPIO_LED(0, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+       BCM47XX_GPIO_LED(3, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+       BCM47XX_GPIO_LED(5, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
 /* SimpleTech */
 
 static const struct gpio_led
@@ -425,7 +457,11 @@ void __init bcm47xx_leds_register(void)
                bcm47xx_set_pdata(bcm47xx_leds_asus_wlhdd);
                break;
 
+       case BCM47XX_BOARD_BELKIN_F7D3301:
+       case BCM47XX_BOARD_BELKIN_F7D3302:
        case BCM47XX_BOARD_BELKIN_F7D4301:
+       case BCM47XX_BOARD_BELKIN_F7D4302:
+       case BCM47XX_BOARD_BELKIN_F7D4401:
                bcm47xx_set_pdata(bcm47xx_leds_belkin_f7d4301);
                break;
 
@@ -502,12 +538,21 @@ void __init bcm47xx_leds_register(void)
        case BCM47XX_BOARD_LINKSYS_WRT310NV1:
                bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt310nv1);
                break;
+       case BCM47XX_BOARD_LINKSYS_WRT54G:
+               bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54gsv1);
+               break;
+       case BCM47XX_BOARD_LINKSYS_WRT54G3GV2:
+               bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g3gv2);
+               break;
        case BCM47XX_BOARD_LINKSYS_WRT610NV1:
                bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv1);
                break;
        case BCM47XX_BOARD_LINKSYS_WRT610NV2:
                bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv2);
                break;
+       case BCM47XX_BOARD_LINKSYS_WRTSL54GS:
+               bcm47xx_set_pdata(bcm47xx_leds_linksys_wrtsl54gs);
+               break;
 
        case BCM47XX_BOARD_MOTOROLA_WE800G:
                bcm47xx_set_pdata(bcm47xx_leds_motorola_we800g);
@@ -529,6 +574,10 @@ void __init bcm47xx_leds_register(void)
                bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr834bv2);
                break;
 
+       case BCM47XX_BOARD_SIEMENS_SE505V2:
+               bcm47xx_set_pdata(bcm47xx_leds_siemens_se505v2);
+               break;
+
        case BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE:
                bcm47xx_set_pdata(bcm47xx_leds_simpletech_simpleshare);
                break;
index 025be218ea1518bebfac47c2ec2cef514fff4e83..63a4b0e915dc14ec33576e464f5803284523c72b 100644 (file)
@@ -212,7 +212,7 @@ void __init plat_mem_setup(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
 
-       if (c->cputype == CPU_74K) {
+       if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
                printk(KERN_INFO "bcm47xx: using bcma bus\n");
 #ifdef CONFIG_BCM47XX_BCMA
                bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
@@ -282,6 +282,7 @@ static int __init bcm47xx_register_bus_complete(void)
        }
        bcm47xx_buttons_register();
        bcm47xx_leds_register();
+       bcm47xx_workarounds();
 
        fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
        return 0;
diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
new file mode 100644 (file)
index 0000000..e81ce46
--- /dev/null
@@ -0,0 +1,31 @@
+#include "bcm47xx_private.h"
+
+#include <linux/gpio.h>
+#include <bcm47xx_board.h>
+#include <bcm47xx.h>
+
+static void __init bcm47xx_workarounds_netgear_wnr3500l(void)
+{
+       const int usb_power = 12;
+       int err;
+
+       err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power");
+       if (err)
+               pr_err("Failed to request USB power gpio: %d\n", err);
+       else
+               gpio_free(usb_power);
+}
+
+void __init bcm47xx_workarounds(void)
+{
+       enum bcm47xx_board board = bcm47xx_board_get();
+
+       switch (board) {
+       case BCM47XX_BOARD_NETGEAR_WNR3500L:
+               bcm47xx_workarounds_netgear_wnr3500l();
+               break;
+       default:
+               /* No workaround(s) needed */
+               break;
+       }
+}
index 1b1b8a89959bb1f3998726f83675de7f9cde0784..fd4e76c00a42d44dfdd638e18861f53b41c76218 100644 (file)
@@ -299,14 +299,13 @@ static unsigned int detect_memory_size(void)
 void __init bcm63xx_cpu_init(void)
 {
        unsigned int tmp;
-       struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int cpu = smp_processor_id();
        u32 chipid_reg;
 
        /* soc registers location depends on cpu type */
        chipid_reg = 0;
 
-       switch (c->cputype) {
+       switch (current_cpu_type()) {
        case CPU_BMIPS3300:
                if ((read_c0_prid() & PRID_IMP_MASK) != PRID_IMP_BMIPS3300_ALT)
                        __cpu_name[cpu] = "Broadcom BCM6338";
diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig
deleted file mode 100644 (file)
index bac26b9..0000000
+++ /dev/null
@@ -1,359 +0,0 @@
-CONFIG_MIPS=y
-CONFIG_MIPS_ALCHEMY=y
-CONFIG_MIPS_DB1000=y
-CONFIG_SCHED_OMIT_FRAME_POINTER=y
-CONFIG_TICK_ONESHOT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_100=y
-CONFIG_HZ=100
-CONFIG_PREEMPT_NONE=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_CROSS_COMPILE=""
-CONFIG_LOCALVERSION="-db1x00"
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_KERNEL_LZMA=y
-CONFIG_DEFAULT_HOSTNAME="db1x00"
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-CONFIG_FHANDLE=y
-CONFIG_AUDIT=y
-CONFIG_TINY_RCU=y
-CONFIG_LOG_BUF_SHIFT=18
-CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_USER_NS=y
-CONFIG_PID_NS=y
-CONFIG_NET_NS=y
-CONFIG_SYSCTL=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_HOTPLUG=y
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
-CONFIG_EVENTFD=y
-CONFIG_SHMEM=y
-CONFIG_AIO=y
-CONFIG_EMBEDDED=y
-CONFIG_HAVE_PERF_EVENTS=y
-CONFIG_PERF_USE_VMALLOC=y
-CONFIG_PCI_QUIRKS=y
-CONFIG_SLAB=y
-CONFIG_SLABINFO=y
-CONFIG_BLOCK=y
-CONFIG_LBDAF=y
-CONFIG_BLK_DEV_BSG=y
-CONFIG_BLK_DEV_BSGLIB=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_DEFAULT_NOOP=y
-CONFIG_DEFAULT_IOSCHED="noop"
-CONFIG_FREEZER=y
-CONFIG_PCI=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCCARD=y
-CONFIG_PCMCIA=y
-CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
-CONFIG_BINFMT_ELF=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
-CONFIG_SUSPEND=y
-CONFIG_SUSPEND_FREEZER=y
-CONFIG_PM_SLEEP=y
-CONFIG_PM_RUNTIME=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_INET_TUNNEL=y
-CONFIG_INET_LRO=y
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-CONFIG_IPV6=y
-CONFIG_INET6_XFRM_MODE_TRANSPORT=y
-CONFIG_INET6_XFRM_MODE_TUNNEL=y
-CONFIG_INET6_XFRM_MODE_BEET=y
-CONFIG_IPV6_SIT=y
-CONFIG_IPV6_NDISC_NODETYPE=y
-CONFIG_STP=y
-CONFIG_GARP=y
-CONFIG_BRIDGE=y
-CONFIG_BRIDGE_IGMP_SNOOPING=y
-CONFIG_VLAN_8021Q=y
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_LLC=y
-CONFIG_LLC2=y
-CONFIG_DNS_RESOLVER=y
-CONFIG_BT=y
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
-CONFIG_BT_RFCOMM=y
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=y
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=y
-CONFIG_BT_HCIBTUSB=y
-CONFIG_UEVENT_HELPER_PATH=""
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLKDEVS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_NOSWAP=y
-CONFIG_MTD_CFI_GEOMETRY=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-CONFIG_MTD_CFI_I4=y
-CONFIG_MTD_CFI_I8=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_SCSI_MOD=y
-CONFIG_SCSI=y
-CONFIG_SCSI_DMA=y
-CONFIG_SCSI_PROC_FS=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_ATA=y
-CONFIG_ATA_VERBOSE_ERROR=y
-CONFIG_ATA_SFF=y
-CONFIG_ATA_BMDMA=y
-CONFIG_PATA_HPT37X=y
-CONFIG_PATA_PCMCIA=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_FIREWIRE=y
-CONFIG_FIREWIRE_OHCI=y
-CONFIG_FIREWIRE_OHCI_DEBUG=y
-CONFIG_FIREWIRE_NET=y
-CONFIG_NETDEVICES=y
-CONFIG_MII=y
-CONFIG_PHYLIB=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MIPS_AU1X00_ENET=y
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_3C589=y
-CONFIG_PCMCIA_PCNET=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_SYNC_TTY=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_MPPE=y
-CONFIG_PPPOE=y
-CONFIG_INPUT=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_VT=y
-CONFIG_CONSOLE_TRANSLATIONS=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
-CONFIG_DEVKMEM=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=4
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_TTY_PRINTK=y
-CONFIG_DEVPORT=y
-CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
-CONFIG_FB=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_AU1100=y
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x16=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_TIMER=y
-CONFIG_SND_PCM=y
-CONFIG_SND_JACK=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_HRTIMER=y
-CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
-CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_VMASTER=y
-CONFIG_SND_AC97_CODEC=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AC97_BUS=y
-CONFIG_SND_SOC_AU1XAUDIO=y
-CONFIG_SND_SOC_AU1XAC97C=y
-CONFIG_SND_SOC_DB1000=y
-CONFIG_SND_SOC_AC97_CODEC=y
-CONFIG_AC97_BUS=y
-CONFIG_HID_SUPPORT=y
-CONFIG_HID=y
-CONFIG_HIDRAW=y
-CONFIG_USB_HID=y
-CONFIG_USB_SUPPORT=y
-CONFIG_USB=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_EHCI_TT_NEWSCHED=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_UHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_RTC_LIB=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_HCTOSYS=y
-CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
-CONFIG_RTC_INTF_SYSFS=y
-CONFIG_RTC_INTF_PROC=y
-CONFIG_RTC_INTF_DEV=y
-CONFIG_RTC_DRV_AU1XXX=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_USE_FOR_EXT23=y
-CONFIG_EXT4_FS_XATTR=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_JBD2=y
-CONFIG_FS_MBCACHE=y
-CONFIG_FS_POSIX_ACL=y
-CONFIG_EXPORTFS=y
-CONFIG_FILE_LOCKING=y
-CONFIG_FSNOTIFY=y
-CONFIG_DNOTIFY=y
-CONFIG_INOTIFY_USER=y
-CONFIG_GENERIC_ACL=y
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_TMPFS_XATTR=y
-CONFIG_MISC_FILESYSTEMS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=0
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_FS_POSIX_ACL=y
-CONFIG_JFFS2_FS_SECURITY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_RTIME=y
-CONFIG_JFFS2_RUBIN=y
-CONFIG_JFFS2_CMODE_PRIORITY=y
-CONFIG_SQUASHFS=y
-CONFIG_SQUASHFS_ZLIB=y
-CONFIG_SQUASHFS_LZO=y
-CONFIG_SQUASHFS_XZ=y
-CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
-CONFIG_NETWORK_FILESYSTEMS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_PNFS_FILE_LAYOUT=y
-CONFIG_PNFS_BLOCK=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFS_USE_KERNEL_DNS=y
-CONFIG_NFS_USE_NEW_IDMAPPER=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V2_ACL=y
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_NFS_ACL_SUPPORT=y
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=y
-CONFIG_SUNRPC_GSS=y
-CONFIG_SUNRPC_BACKCHANNEL=y
-CONFIG_MSDOS_PARTITION=y
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_CODEPAGE_1250=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_EARLY_PRINTK=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="noirqdebug rootwait root=/dev/sda1 rootfstype=ext4 console=ttyS0,115200 video=au1100fb:panel:CRT_800x600_16"
-CONFIG_DEBUG_ZBOOT=y
-CONFIG_KEYS=y
-CONFIG_KEYS_DEBUG_PROC_KEYS=y
-CONFIG_SECURITYFS=y
-CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_DEFAULT_SECURITY=""
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_ALGAPI2=y
-CONFIG_CRYPTO_AEAD2=y
-CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_BLKCIPHER2=y
-CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_HASH2=y
-CONFIG_CRYPTO_RNG=y
-CONFIG_CRYPTO_RNG2=y
-CONFIG_CRYPTO_PCOMP2=y
-CONFIG_CRYPTO_MANAGER=y
-CONFIG_CRYPTO_MANAGER2=y
-CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
-CONFIG_CRYPTO_WORKQUEUE=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_AES=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_BITREVERSE=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_CRC_ITU_T=y
-CONFIG_CRC32=y
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_XZ_DEC=y
diff --git a/arch/mips/configs/db1235_defconfig b/arch/mips/configs/db1235_defconfig
deleted file mode 100644 (file)
index 28e49f2..0000000
+++ /dev/null
@@ -1,434 +0,0 @@
-CONFIG_MIPS_ALCHEMY=y
-CONFIG_MIPS_DB1235=y
-CONFIG_COMPACTION=y
-CONFIG_KSM=y
-CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOCALVERSION="-db1235"
-CONFIG_KERNEL_LZMA=y
-CONFIG_DEFAULT_HOSTNAME="db1235"
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_FHANDLE=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_AUDIT=y
-CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_NAMESPACES=y
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-CONFIG_JUMP_LABEL=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_LDM_PARTITION=y
-CONFIG_EFI_PARTITION=y
-CONFIG_PCI=y
-CONFIG_PCCARD=y
-CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
-CONFIG_PM_RUNTIME=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_UNIX_DIAG=y
-CONFIG_XFRM_USER=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_NET_IPGRE_DEMUX=y
-CONFIG_NET_IPGRE=y
-CONFIG_NET_IPGRE_BROADCAST=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
-CONFIG_SYN_COOKIES=y
-CONFIG_NET_IPVTI=y
-CONFIG_INET_AH=y
-CONFIG_INET_ESP=y
-CONFIG_INET_IPCOMP=y
-CONFIG_INET_UDP_DIAG=y
-CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_HSTCP=y
-CONFIG_TCP_CONG_HYBLA=y
-CONFIG_TCP_CONG_SCALABLE=y
-CONFIG_TCP_CONG_LP=y
-CONFIG_TCP_CONG_VENO=y
-CONFIG_TCP_CONG_YEAH=y
-CONFIG_TCP_CONG_ILLINOIS=y
-CONFIG_DEFAULT_HYBLA=y
-CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_IPV6_MIP6=y
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
-CONFIG_IPV6_SIT_6RD=y
-CONFIG_IPV6_TUNNEL=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_IPV6_MROUTE=y
-CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
-CONFIG_IPV6_PIMSM_V2=y
-CONFIG_NETFILTER=y
-CONFIG_NF_CONNTRACK=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CONNTRACK_TIMEOUT=y
-CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_CONNTRACK_AMANDA=y
-CONFIG_NF_CONNTRACK_FTP=y
-CONFIG_NF_CONNTRACK_H323=y
-CONFIG_NF_CONNTRACK_IRC=y
-CONFIG_NF_CONNTRACK_NETBIOS_NS=y
-CONFIG_NF_CONNTRACK_SNMP=y
-CONFIG_NF_CONNTRACK_PPTP=y
-CONFIG_NF_CONNTRACK_SANE=y
-CONFIG_NF_CONNTRACK_SIP=y
-CONFIG_NF_CONNTRACK_TFTP=y
-CONFIG_NF_CT_NETLINK=y
-CONFIG_NF_CT_NETLINK_TIMEOUT=y
-CONFIG_NF_CT_NETLINK_HELPER=y
-CONFIG_NETFILTER_NETLINK_QUEUE_CT=y
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
-CONFIG_NETFILTER_XT_TARGET_HMARK=y
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_LED=y
-CONFIG_NETFILTER_XT_TARGET_LOG=y
-CONFIG_NETFILTER_XT_TARGET_MARK=y
-CONFIG_NETFILTER_XT_TARGET_NFLOG=y
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
-CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
-CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
-CONFIG_NETFILTER_XT_MATCH_COMMENT=y
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
-CONFIG_NETFILTER_XT_MATCH_CPU=y
-CONFIG_NETFILTER_XT_MATCH_DCCP=y
-CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y
-CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_HELPER=y
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-CONFIG_NETFILTER_XT_MATCH_LENGTH=y
-CONFIG_NETFILTER_XT_MATCH_LIMIT=y
-CONFIG_NETFILTER_XT_MATCH_MAC=y
-CONFIG_NETFILTER_XT_MATCH_MARK=y
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
-CONFIG_NETFILTER_XT_MATCH_NFACCT=y
-CONFIG_NETFILTER_XT_MATCH_OSF=y
-CONFIG_NETFILTER_XT_MATCH_OWNER=y
-CONFIG_NETFILTER_XT_MATCH_POLICY=y
-CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA=y
-CONFIG_NETFILTER_XT_MATCH_RATEEST=y
-CONFIG_NETFILTER_XT_MATCH_REALM=y
-CONFIG_NETFILTER_XT_MATCH_RECENT=y
-CONFIG_NETFILTER_XT_MATCH_SCTP=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
-CONFIG_NETFILTER_XT_MATCH_STRING=y
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
-CONFIG_NETFILTER_XT_MATCH_TIME=y
-CONFIG_NETFILTER_XT_MATCH_U32=y
-CONFIG_NF_CONNTRACK_IPV4=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MATCH_AH=y
-CONFIG_IP_NF_MATCH_ECN=y
-CONFIG_IP_NF_MATCH_RPFILTER=y
-CONFIG_IP_NF_MATCH_TTL=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_ULOG=y
-CONFIG_NF_NAT=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
-CONFIG_IP_NF_TARGET_NETMAP=y
-CONFIG_IP_NF_TARGET_REDIRECT=y
-CONFIG_IP_NF_MANGLE=y
-CONFIG_IP_NF_TARGET_CLUSTERIP=y
-CONFIG_IP_NF_TARGET_ECN=y
-CONFIG_IP_NF_TARGET_TTL=y
-CONFIG_IP_NF_RAW=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP_NF_ARP_MANGLE=y
-CONFIG_NF_CONNTRACK_IPV6=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_MATCH_AH=y
-CONFIG_IP6_NF_MATCH_EUI64=y
-CONFIG_IP6_NF_MATCH_FRAG=y
-CONFIG_IP6_NF_MATCH_OPTS=y
-CONFIG_IP6_NF_MATCH_HL=y
-CONFIG_IP6_NF_MATCH_IPV6HEADER=y
-CONFIG_IP6_NF_MATCH_MH=y
-CONFIG_IP6_NF_MATCH_RPFILTER=y
-CONFIG_IP6_NF_MATCH_RT=y
-CONFIG_IP6_NF_TARGET_HL=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_IP6_NF_TARGET_REJECT=y
-CONFIG_IP6_NF_MANGLE=y
-CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
-CONFIG_BRIDGE_EBT_T_FILTER=y
-CONFIG_BRIDGE_EBT_T_NAT=y
-CONFIG_BRIDGE_EBT_802_3=y
-CONFIG_BRIDGE_EBT_AMONG=y
-CONFIG_BRIDGE_EBT_ARP=y
-CONFIG_BRIDGE_EBT_IP=y
-CONFIG_BRIDGE_EBT_IP6=y
-CONFIG_BRIDGE_EBT_LIMIT=y
-CONFIG_BRIDGE_EBT_MARK=y
-CONFIG_BRIDGE_EBT_PKTTYPE=y
-CONFIG_BRIDGE_EBT_STP=y
-CONFIG_BRIDGE_EBT_VLAN=y
-CONFIG_BRIDGE_EBT_ARPREPLY=y
-CONFIG_BRIDGE_EBT_DNAT=y
-CONFIG_BRIDGE_EBT_MARK_T=y
-CONFIG_BRIDGE_EBT_REDIRECT=y
-CONFIG_BRIDGE_EBT_SNAT=y
-CONFIG_BRIDGE_EBT_LOG=y
-CONFIG_BRIDGE_EBT_NFLOG=y
-CONFIG_L2TP=y
-CONFIG_L2TP_V3=y
-CONFIG_L2TP_IP=y
-CONFIG_L2TP_ETH=y
-CONFIG_BRIDGE=y
-CONFIG_VLAN_8021Q=y
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_LLC2=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=y
-CONFIG_NET_SCH_HTB=y
-CONFIG_NET_SCH_HFSC=y
-CONFIG_NET_SCH_PRIO=y
-CONFIG_NET_SCH_MULTIQ=y
-CONFIG_NET_SCH_RED=y
-CONFIG_NET_SCH_SFB=y
-CONFIG_NET_SCH_SFQ=y
-CONFIG_NET_SCH_TEQL=y
-CONFIG_NET_SCH_TBF=y
-CONFIG_NET_SCH_GRED=y
-CONFIG_NET_SCH_DSMARK=y
-CONFIG_NET_SCH_NETEM=y
-CONFIG_NET_SCH_DRR=y
-CONFIG_NET_SCH_MQPRIO=y
-CONFIG_NET_SCH_CHOKE=y
-CONFIG_NET_SCH_QFQ=y
-CONFIG_NET_SCH_CODEL=y
-CONFIG_NET_SCH_FQ_CODEL=y
-CONFIG_NET_SCH_INGRESS=y
-CONFIG_NET_SCH_PLUG=y
-CONFIG_NET_CLS_BASIC=y
-CONFIG_NET_CLS_TCINDEX=y
-CONFIG_NET_CLS_ROUTE4=y
-CONFIG_NET_CLS_FW=y
-CONFIG_NET_CLS_U32=y
-CONFIG_CLS_U32_PERF=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=y
-CONFIG_NET_CLS_RSVP6=y
-CONFIG_NET_CLS_FLOW=y
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_CMP=y
-CONFIG_NET_EMATCH_NBYTE=y
-CONFIG_NET_EMATCH_U32=y
-CONFIG_NET_EMATCH_META=y
-CONFIG_NET_EMATCH_TEXT=y
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=y
-CONFIG_NET_ACT_GACT=y
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=y
-CONFIG_NET_ACT_NAT=y
-CONFIG_NET_ACT_PEDIT=y
-CONFIG_NET_ACT_SIMP=y
-CONFIG_NET_ACT_SKBEDIT=y
-CONFIG_NET_ACT_CSUM=y
-CONFIG_NET_CLS_IND=y
-CONFIG_BT=y
-CONFIG_BT_RFCOMM=y
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=y
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=y
-CONFIG_BT_HCIBTUSB=y
-CONFIG_CFG80211=y
-CONFIG_CFG80211_CERTIFICATION_ONUS=y
-CONFIG_CFG80211_WEXT=y
-CONFIG_MAC80211=y
-CONFIG_MAC80211_LEDS=y
-CONFIG_RFKILL=y
-CONFIG_RFKILL_INPUT=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_PLATFORM=y
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_AT25=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDE_AU1XXX=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_ATA=y
-CONFIG_PATA_HPT37X=y
-CONFIG_PATA_PCMCIA=y
-CONFIG_PATA_PLATFORM=y
-CONFIG_NETDEVICES=y
-CONFIG_MIPS_AU1X00_ENET=y
-CONFIG_SMC91X=y
-CONFIG_SMSC911X=y
-CONFIG_AMD_PHY=y
-CONFIG_SMSC_PHY=y
-CONFIG_RT2X00=y
-CONFIG_RT73USB=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_WM97XX=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_TTY_PRINTK=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_AU1550=y
-CONFIG_SPI=y
-CONFIG_SPI_AU1550=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_SENSORS_ADM1025=y
-CONFIG_SENSORS_LM70=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_HRTIMER=y
-CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AU1XPSC=y
-CONFIG_SND_SOC_DB1200=y
-CONFIG_HIDRAW=y
-CONFIG_UHID=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_AU1X=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AU1XXX=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_XFS_FS=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CONFIGFS_FS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_CMODE_FAVOURLZO=y
-CONFIG_SQUASHFS=y
-CONFIG_SQUASHFS_LZO=y
-CONFIG_SQUASHFS_XZ=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_CODEPAGE_852=y
-CONFIG_NLS_CODEPAGE_1250=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_SECURITYFS=y
-CONFIG_CRYPTO_USER=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_CRYPTD=y
-CONFIG_CRYPTO_CCM=y
-CONFIG_CRYPTO_GCM=y
-CONFIG_CRYPTO_CTS=y
-CONFIG_CRYPTO_LRW=y
-CONFIG_CRYPTO_PCBC=y
-CONFIG_CRYPTO_XTS=y
-CONFIG_CRYPTO_XCBC=y
-CONFIG_CRYPTO_VMAC=y
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_MICHAEL_MIC=y
-CONFIG_CRYPTO_RMD128=y
-CONFIG_CRYPTO_RMD160=y
-CONFIG_CRYPTO_RMD256=y
-CONFIG_CRYPTO_RMD320=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_TGR192=y
-CONFIG_CRYPTO_WP512=y
-CONFIG_CRYPTO_ANUBIS=y
-CONFIG_CRYPTO_BLOWFISH=y
-CONFIG_CRYPTO_CAMELLIA=y
-CONFIG_CRYPTO_CAST5=y
-CONFIG_CRYPTO_CAST6=y
-CONFIG_CRYPTO_FCRYPT=y
-CONFIG_CRYPTO_KHAZAD=y
-CONFIG_CRYPTO_SALSA20=y
-CONFIG_CRYPTO_SEED=y
-CONFIG_CRYPTO_SERPENT=y
-CONFIG_CRYPTO_TEA=y
-CONFIG_CRYPTO_TWOFISH=y
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_CRYPTO_USER_API_SKCIPHER=y
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
new file mode 100644 (file)
index 0000000..c99b6ee
--- /dev/null
@@ -0,0 +1,245 @@
+CONFIG_MIPS_ALCHEMY=y
+CONFIG_MIPS_DB1XXX=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="-db1xxx"
+CONFIG_KERNEL_XZ=y
+CONFIG_DEFAULT_HOSTNAME="db1xxx"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_DEFAULT_NOOP=y
+CONFIG_PCI=y
+CONFIG_PCI_REALLOC_ENABLE_AUTO=y
+CONFIG_PCCARD=y
+CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_FIB_TRIE_STATS=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_VENO=y
+CONFIG_DEFAULT_VENO=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_BRIDGE=y
+CONFIG_NETLINK_MMAP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_IRDA=y
+CONFIG_IRLAN=y
+CONFIG_IRCOMM=y
+CONFIG_IRDA_ULTRA=y
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+CONFIG_AU1000_FIR=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SST25L=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC_BCH=y
+CONFIG_MTD_NAND_AU1550=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_SCSI_TGT=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_ATA=y
+CONFIG_PATA_HPT37X=y
+CONFIG_PATA_HPT3X2N=y
+CONFIG_PATA_PCMCIA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_NLMON=y
+CONFIG_PCMCIA_3C589=y
+CONFIG_MIPS_AU1X00_ENET=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_AMD_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+CONFIG_TOUCHSCREEN_WM97XX=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_TTY_PRINTK=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_AU1550=y
+CONFIG_SPI=y
+CONFIG_SPI_AU1550=y
+CONFIG_SPI_GPIO=y
+CONFIG_SENSORS_ADM1025=y
+CONFIG_SENSORS_LM70=y
+CONFIG_FB=y
+CONFIG_FB_AU1100=y
+CONFIG_FB_AU1200=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_AC97_POWER_SAVE=y
+CONFIG_SND_AC97_POWER_SAVE_DEFAULT=1
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_AU1XPSC=y
+CONFIG_SND_SOC_AU1XAUDIO=y
+CONFIG_SND_SOC_DB1000=y
+CONFIG_SND_SOC_DB1200=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_OTG=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_SDIO_UART=y
+CONFIG_MMC_AU1X=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AU1XXX=y
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="local"
+CONFIG_NFS_V4_1_MIGRATION=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SECURITYFS=y
+CONFIG_CRYPTO_USER=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRC32_SLICEBY4=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
new file mode 100644 (file)
index 0000000..ea1761f
--- /dev/null
@@ -0,0 +1,362 @@
+CONFIG_MACH_LOONGSON=y
+CONFIG_SWIOTLB=y
+CONFIG_LEMOTE_MACH3A=y
+CONFIG_CPU_LOONGSON3=y
+CONFIG_64BIT=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_KSM=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HZ_256=y
+CONFIG_PREEMPT=y
+CONFIG_KEXEC=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZMA=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CPUSETS=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_PCI=y
+CONFIG_HT_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_SHPC=m
+CONFIG_BINFMT_MISC=m
+CONFIG_MIPS32_COMPAT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_IP_VS=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_SCTP=m
+CONFIG_L2TP=m
+CONFIG_BRIDGE=m
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI_TGT=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
+CONFIG_MEGARAID_LEGACY=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_PATA_ATIIXP=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NETDEVICES=y
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+# CONFIG_NET_VENDOR_I825XX is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_ATH_CARDS=m
+CONFIG_ATH9K=m
+CONFIG_HOSTAP=m
+CONFIG_INPUT_POLLDEV=m
+CONFIG_INPUT_SPARSEKMAP=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_RAW=m
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_PIIX4=y
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB_RADEON=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=m
+CONFIG_BACKLIGHT_GENERIC=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+# CONFIG_SND_ISA is not set
+CONFIG_SND_HDA_INTEL=m
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=m
+CONFIG_SND_HDA_CODEC_CONEXANT=m
+# CONFIG_SND_USB is not set
+CONFIG_HID_A4TECH=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_DMADEVICES=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=936
+CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_FRAME_WARN=1024
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_RCU_CPU_STALL_VERBOSE is not set
+# CONFIG_FTRACE is not set
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
index ce1d3eeeb7373373fa0f07056ff3ca39e0f0c496..b745b6a9f32281da968cb78b3812e685b8c5bd79 100644 (file)
@@ -1,7 +1,9 @@
 CONFIG_MIPS_MALTA=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
 CONFIG_MIPS_MT_SMP=y
+CONFIG_NR_CPUS=8
 CONFIG_HZ_100=y
 CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
@@ -42,7 +44,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -68,7 +69,6 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
@@ -125,7 +125,6 @@ CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -185,7 +184,6 @@ CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
 CONFIG_PHONET=m
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_CBQ=m
@@ -226,9 +224,9 @@ CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_OOPS=m
 CONFIG_MTD_CFI=y
@@ -328,7 +326,6 @@ CONFIG_LIBERTAS=m
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO_I8042 is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 # CONFIG_HWMON is not set
index 341bb47204d6e6b7e0c83db988e6537b8c82e1e2..4f7d952d85177b90a928d3b6c121ca3301cd4d9e 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_PAGE_SIZE_16KB=y
 CONFIG_MIPS_MT_SMP=y
+CONFIG_NR_CPUS=8
 CONFIG_HZ_100=y
 CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
@@ -44,7 +45,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -70,7 +70,6 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
@@ -127,7 +126,6 @@ CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -187,7 +185,6 @@ CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
 CONFIG_PHONET=m
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_CBQ=m
@@ -228,9 +225,9 @@ CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_OOPS=m
 CONFIG_MTD_CFI=y
@@ -300,6 +297,7 @@ CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
+CONFIG_VHOST_NET=m
 CONFIG_PCNET32=y
 CONFIG_CHELSIO_T3=m
 CONFIG_AX88796=m
@@ -329,7 +327,6 @@ CONFIG_LIBERTAS=m
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO_I8042 is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 # CONFIG_HWMON is not set
@@ -453,4 +450,3 @@ CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_KVM_MIPS_DYN_TRANS=y
 CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
-CONFIG_VHOST_NET=m
index 2b8558b7108058307cecae5c54b80826c6c12d6b..e36681c24ddcfc48901a3a5bbf20173f2a077d9b 100644 (file)
@@ -44,7 +44,6 @@ CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -70,7 +69,6 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
@@ -127,7 +125,6 @@ CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -187,7 +184,6 @@ CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
 CONFIG_PHONET=m
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_CBQ=m
@@ -228,9 +224,9 @@ CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_OOPS=m
 CONFIG_MTD_CFI=y
@@ -331,7 +327,6 @@ CONFIG_LIBERTAS=m
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO_I8042 is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 # CONFIG_HWMON is not set
index 93057a760dfa09bbe997a5fb55f8eba4212da17a..fb042ce86b4bc1da95d3b9297e6dfe88ed2f5dad 100644 (file)
@@ -44,7 +44,6 @@ CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
@@ -55,7 +54,6 @@ CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_CBQ=m
 CONFIG_NET_SCH_HTB=m
@@ -80,6 +78,7 @@ CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=y
 CONFIG_NET_CLS_IND=y
 # CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_IDE=y
index 4e54b75d89be33af5f991e2bc162f45d649a6d6c..eb316447588cfa5fff62104128c86eb9f2f9ee58 100644 (file)
@@ -1,6 +1,7 @@
 CONFIG_MIPS_MALTA=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
 CONFIG_MIPS_MT_SMTC=y
 # CONFIG_MIPS_MT_FPAFF is not set
 CONFIG_NR_CPUS=9
@@ -45,7 +46,6 @@ CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
@@ -56,7 +56,6 @@ CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_CBQ=m
 CONFIG_NET_SCH_HTB=m
@@ -81,6 +80,7 @@ CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=y
 CONFIG_NET_CLS_IND=y
 # CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_IDE=y
index d75931850392a1fa485c95963a57690a2ab8bc6f..10ef3bed5f437534cd9ba7d98403662a242dfd7b 100644 (file)
@@ -1,10 +1,11 @@
 CONFIG_MIPS_MALTA=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
 CONFIG_MIPS_MT_SMP=y
 CONFIG_SCHED_SMT=y
 CONFIG_MIPS_CMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=8
 CONFIG_HZ_100=y
 CONFIG_LOCALVERSION="cmp"
 CONFIG_SYSVIPC=y
@@ -47,7 +48,6 @@ CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
@@ -82,6 +82,7 @@ CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=y
 CONFIG_NET_CLS_IND=y
 # CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_IDE=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
new file mode 100644 (file)
index 0000000..2d3002c
--- /dev/null
@@ -0,0 +1,200 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPS32_3_5_FEATURES=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="cmp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index 9868fc9c11338746180614e3967cacab72927b8e..62344648eb7a31a0e134db2d7eaf828dd8a012aa 100644 (file)
@@ -43,7 +43,6 @@ CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
@@ -54,7 +53,6 @@ CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_CBQ=m
 CONFIG_NET_SCH_HTB=m
@@ -79,6 +77,7 @@ CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=y
 CONFIG_NET_CLS_IND=y
 # CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_IDE=y
diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
new file mode 100644 (file)
index 0000000..e41c56e
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef __ASM_ASM_EVA_H
+#define __ASM_ASM_EVA_H
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_EVA
+
+#define __BUILD_EVA_INSN(insn, reg, addr)                              \
+                               "       .set    push\n"                 \
+                               "       .set    mips0\n"                \
+                               "       .set    eva\n"                  \
+                               "       "insn" "reg", "addr "\n"        \
+                               "       .set    pop\n"
+
+#define user_cache(op, base)           __BUILD_EVA_INSN("cachee", op, base)
+#define user_ll(reg, addr)             __BUILD_EVA_INSN("lle", reg, addr)
+#define user_sc(reg, addr)             __BUILD_EVA_INSN("sce", reg, addr)
+#define user_lw(reg, addr)             __BUILD_EVA_INSN("lwe", reg, addr)
+#define user_lwl(reg, addr)            __BUILD_EVA_INSN("lwle", reg, addr)
+#define user_lwr(reg, addr)            __BUILD_EVA_INSN("lwre", reg, addr)
+#define user_lh(reg, addr)             __BUILD_EVA_INSN("lhe", reg, addr)
+#define user_lb(reg, addr)             __BUILD_EVA_INSN("lbe", reg, addr)
+#define user_lbu(reg, addr)            __BUILD_EVA_INSN("lbue", reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_ld(reg, addr)             user_lw(reg, addr)
+#define user_sw(reg, addr)             __BUILD_EVA_INSN("swe", reg, addr)
+#define user_swl(reg, addr)            __BUILD_EVA_INSN("swle", reg, addr)
+#define user_swr(reg, addr)            __BUILD_EVA_INSN("swre", reg, addr)
+#define user_sh(reg, addr)             __BUILD_EVA_INSN("she", reg, addr)
+#define user_sb(reg, addr)             __BUILD_EVA_INSN("sbe", reg, addr)
+/* No 64-bit EVA instruction for storing double words */
+#define user_sd(reg, addr)             user_sw(reg, addr)
+
+#else
+
+#define user_cache(op, base)           "cache " op ", " base "\n"
+#define user_ll(reg, addr)             "ll " reg ", " addr "\n"
+#define user_sc(reg, addr)             "sc " reg ", " addr "\n"
+#define user_lw(reg, addr)             "lw " reg ", " addr "\n"
+#define user_lwl(reg, addr)            "lwl " reg ", " addr "\n"
+#define user_lwr(reg, addr)            "lwr " reg ", " addr "\n"
+#define user_lh(reg, addr)             "lh " reg ", " addr "\n"
+#define user_lb(reg, addr)             "lb " reg ", " addr "\n"
+#define user_lbu(reg, addr)            "lbu " reg ", " addr "\n"
+#define user_sw(reg, addr)             "sw " reg ", " addr "\n"
+#define user_swl(reg, addr)            "swl " reg ", " addr "\n"
+#define user_swr(reg, addr)            "swr " reg ", " addr "\n"
+#define user_sh(reg, addr)             "sh " reg ", " addr "\n"
+#define user_sb(reg, addr)             "sb " reg ", " addr "\n"
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define user_sd(reg, addr)             user_sw(reg, addr)
+#define user_ld(reg, addr)             user_lw(reg, addr)
+#else
+#define user_sd(reg, addr)             "sd " reg", " addr "\n"
+#define user_ld(reg, addr)             "ld " reg", " addr "\n"
+#endif /* CONFIG_32BIT */
+
+#endif /* CONFIG_EVA */
+
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_EVA
+
+#define __BUILD_EVA_INSN(insn, reg, addr)                      \
+                               .set    push;                   \
+                               .set    mips0;                  \
+                               .set    eva;                    \
+                               insn reg, addr;                 \
+                               .set    pop;
+
+#define user_cache(op, base)           __BUILD_EVA_INSN(cachee, op, base)
+#define user_ll(reg, addr)             __BUILD_EVA_INSN(lle, reg, addr)
+#define user_sc(reg, addr)             __BUILD_EVA_INSN(sce, reg, addr)
+#define user_lw(reg, addr)             __BUILD_EVA_INSN(lwe, reg, addr)
+#define user_lwl(reg, addr)            __BUILD_EVA_INSN(lwle, reg, addr)
+#define user_lwr(reg, addr)            __BUILD_EVA_INSN(lwre, reg, addr)
+#define user_lh(reg, addr)             __BUILD_EVA_INSN(lhe, reg, addr)
+#define user_lb(reg, addr)             __BUILD_EVA_INSN(lbe, reg, addr)
+#define user_lbu(reg, addr)            __BUILD_EVA_INSN(lbue, reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_ld(reg, addr)             user_lw(reg, addr)
+#define user_sw(reg, addr)             __BUILD_EVA_INSN(swe, reg, addr)
+#define user_swl(reg, addr)            __BUILD_EVA_INSN(swle, reg, addr)
+#define user_swr(reg, addr)            __BUILD_EVA_INSN(swre, reg, addr)
+#define user_sh(reg, addr)             __BUILD_EVA_INSN(she, reg, addr)
+#define user_sb(reg, addr)             __BUILD_EVA_INSN(sbe, reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_sd(reg, addr)             user_sw(reg, addr)
+#else
+
+#define user_cache(op, base)           cache op, base
+#define user_ll(reg, addr)             ll reg, addr
+#define user_sc(reg, addr)             sc reg, addr
+#define user_lw(reg, addr)             lw reg, addr
+#define user_lwl(reg, addr)            lwl reg, addr
+#define user_lwr(reg, addr)            lwr reg, addr
+#define user_lh(reg, addr)             lh reg, addr
+#define user_lb(reg, addr)             lb reg, addr
+#define user_lbu(reg, addr)            lbu reg, addr
+#define user_sw(reg, addr)             sw reg, addr
+#define user_swl(reg, addr)            swl reg, addr
+#define user_swr(reg, addr)            swr reg, addr
+#define user_sh(reg, addr)             sh reg, addr
+#define user_sb(reg, addr)             sb reg, addr
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define user_sd(reg, addr)             user_sw(reg, addr)
+#define user_ld(reg, addr)             user_lw(reg, addr)
+#else
+#define user_sd(reg, addr)             sd reg, addr
+#define user_ld(reg, addr)             ld reg, addr
+#endif /* CONFIG_32BIT */
+
+#endif /* CONFIG_EVA */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ASM_EVA_H */
index 879691d194af426f5532d47c4e467491bf785038..7c26b28bf2526856f7a96e1a2c39c61b78bf4e59 100644 (file)
@@ -18,6 +18,7 @@
 #define __ASM_ASM_H
 
 #include <asm/sgidefs.h>
+#include <asm/asm-eva.h>
 
 #ifndef CAT
 #ifdef __STDC__
@@ -145,19 +146,27 @@ symbol            =       value
 
 #define PREF(hint,addr)                                        \
                .set    push;                           \
-               .set    mips4;                          \
+               .set    arch=r5000;                     \
                pref    hint, addr;                     \
                .set    pop
 
+#define PREFE(hint, addr)                              \
+               .set    push;                           \
+               .set    mips0;                          \
+               .set    eva;                            \
+               prefe   hint, addr;                     \
+               .set    pop
+
 #define PREFX(hint,addr)                               \
                .set    push;                           \
-               .set    mips4;                          \
+               .set    arch=r5000;                     \
                prefx   hint, addr;                     \
                .set    pop
 
 #else /* !CONFIG_CPU_HAS_PREFETCH */
 
 #define PREF(hint, addr)
+#define PREFE(hint, addr)
 #define PREFX(hint, addr)
 
 #endif /* !CONFIG_CPU_HAS_PREFETCH */
index 70e1f176f123c12c07faf378dc0fac16b0a67bec..e38c2811d4e23645e962df3b2f27fee3cdfb9804 100644 (file)
 
        .macro  fpu_save_single thread tmp=t0
        cfc1    \tmp,  fcr31
-       swc1    $f0,  THREAD_FPR0(\thread)
-       swc1    $f1,  THREAD_FPR1(\thread)
-       swc1    $f2,  THREAD_FPR2(\thread)
-       swc1    $f3,  THREAD_FPR3(\thread)
-       swc1    $f4,  THREAD_FPR4(\thread)
-       swc1    $f5,  THREAD_FPR5(\thread)
-       swc1    $f6,  THREAD_FPR6(\thread)
-       swc1    $f7,  THREAD_FPR7(\thread)
-       swc1    $f8,  THREAD_FPR8(\thread)
-       swc1    $f9,  THREAD_FPR9(\thread)
-       swc1    $f10, THREAD_FPR10(\thread)
-       swc1    $f11, THREAD_FPR11(\thread)
-       swc1    $f12, THREAD_FPR12(\thread)
-       swc1    $f13, THREAD_FPR13(\thread)
-       swc1    $f14, THREAD_FPR14(\thread)
-       swc1    $f15, THREAD_FPR15(\thread)
-       swc1    $f16, THREAD_FPR16(\thread)
-       swc1    $f17, THREAD_FPR17(\thread)
-       swc1    $f18, THREAD_FPR18(\thread)
-       swc1    $f19, THREAD_FPR19(\thread)
-       swc1    $f20, THREAD_FPR20(\thread)
-       swc1    $f21, THREAD_FPR21(\thread)
-       swc1    $f22, THREAD_FPR22(\thread)
-       swc1    $f23, THREAD_FPR23(\thread)
-       swc1    $f24, THREAD_FPR24(\thread)
-       swc1    $f25, THREAD_FPR25(\thread)
-       swc1    $f26, THREAD_FPR26(\thread)
-       swc1    $f27, THREAD_FPR27(\thread)
-       swc1    $f28, THREAD_FPR28(\thread)
-       swc1    $f29, THREAD_FPR29(\thread)
-       swc1    $f30, THREAD_FPR30(\thread)
-       swc1    $f31, THREAD_FPR31(\thread)
+       swc1    $f0,  THREAD_FPR0_LS64(\thread)
+       swc1    $f1,  THREAD_FPR1_LS64(\thread)
+       swc1    $f2,  THREAD_FPR2_LS64(\thread)
+       swc1    $f3,  THREAD_FPR3_LS64(\thread)
+       swc1    $f4,  THREAD_FPR4_LS64(\thread)
+       swc1    $f5,  THREAD_FPR5_LS64(\thread)
+       swc1    $f6,  THREAD_FPR6_LS64(\thread)
+       swc1    $f7,  THREAD_FPR7_LS64(\thread)
+       swc1    $f8,  THREAD_FPR8_LS64(\thread)
+       swc1    $f9,  THREAD_FPR9_LS64(\thread)
+       swc1    $f10, THREAD_FPR10_LS64(\thread)
+       swc1    $f11, THREAD_FPR11_LS64(\thread)
+       swc1    $f12, THREAD_FPR12_LS64(\thread)
+       swc1    $f13, THREAD_FPR13_LS64(\thread)
+       swc1    $f14, THREAD_FPR14_LS64(\thread)
+       swc1    $f15, THREAD_FPR15_LS64(\thread)
+       swc1    $f16, THREAD_FPR16_LS64(\thread)
+       swc1    $f17, THREAD_FPR17_LS64(\thread)
+       swc1    $f18, THREAD_FPR18_LS64(\thread)
+       swc1    $f19, THREAD_FPR19_LS64(\thread)
+       swc1    $f20, THREAD_FPR20_LS64(\thread)
+       swc1    $f21, THREAD_FPR21_LS64(\thread)
+       swc1    $f22, THREAD_FPR22_LS64(\thread)
+       swc1    $f23, THREAD_FPR23_LS64(\thread)
+       swc1    $f24, THREAD_FPR24_LS64(\thread)
+       swc1    $f25, THREAD_FPR25_LS64(\thread)
+       swc1    $f26, THREAD_FPR26_LS64(\thread)
+       swc1    $f27, THREAD_FPR27_LS64(\thread)
+       swc1    $f28, THREAD_FPR28_LS64(\thread)
+       swc1    $f29, THREAD_FPR29_LS64(\thread)
+       swc1    $f30, THREAD_FPR30_LS64(\thread)
+       swc1    $f31, THREAD_FPR31_LS64(\thread)
        sw      \tmp, THREAD_FCR31(\thread)
        .endm
 
        .macro  fpu_restore_single thread tmp=t0
        lw      \tmp, THREAD_FCR31(\thread)
-       lwc1    $f0,  THREAD_FPR0(\thread)
-       lwc1    $f1,  THREAD_FPR1(\thread)
-       lwc1    $f2,  THREAD_FPR2(\thread)
-       lwc1    $f3,  THREAD_FPR3(\thread)
-       lwc1    $f4,  THREAD_FPR4(\thread)
-       lwc1    $f5,  THREAD_FPR5(\thread)
-       lwc1    $f6,  THREAD_FPR6(\thread)
-       lwc1    $f7,  THREAD_FPR7(\thread)
-       lwc1    $f8,  THREAD_FPR8(\thread)
-       lwc1    $f9,  THREAD_FPR9(\thread)
-       lwc1    $f10, THREAD_FPR10(\thread)
-       lwc1    $f11, THREAD_FPR11(\thread)
-       lwc1    $f12, THREAD_FPR12(\thread)
-       lwc1    $f13, THREAD_FPR13(\thread)
-       lwc1    $f14, THREAD_FPR14(\thread)
-       lwc1    $f15, THREAD_FPR15(\thread)
-       lwc1    $f16, THREAD_FPR16(\thread)
-       lwc1    $f17, THREAD_FPR17(\thread)
-       lwc1    $f18, THREAD_FPR18(\thread)
-       lwc1    $f19, THREAD_FPR19(\thread)
-       lwc1    $f20, THREAD_FPR20(\thread)
-       lwc1    $f21, THREAD_FPR21(\thread)
-       lwc1    $f22, THREAD_FPR22(\thread)
-       lwc1    $f23, THREAD_FPR23(\thread)
-       lwc1    $f24, THREAD_FPR24(\thread)
-       lwc1    $f25, THREAD_FPR25(\thread)
-       lwc1    $f26, THREAD_FPR26(\thread)
-       lwc1    $f27, THREAD_FPR27(\thread)
-       lwc1    $f28, THREAD_FPR28(\thread)
-       lwc1    $f29, THREAD_FPR29(\thread)
-       lwc1    $f30, THREAD_FPR30(\thread)
-       lwc1    $f31, THREAD_FPR31(\thread)
+       lwc1    $f0,  THREAD_FPR0_LS64(\thread)
+       lwc1    $f1,  THREAD_FPR1_LS64(\thread)
+       lwc1    $f2,  THREAD_FPR2_LS64(\thread)
+       lwc1    $f3,  THREAD_FPR3_LS64(\thread)
+       lwc1    $f4,  THREAD_FPR4_LS64(\thread)
+       lwc1    $f5,  THREAD_FPR5_LS64(\thread)
+       lwc1    $f6,  THREAD_FPR6_LS64(\thread)
+       lwc1    $f7,  THREAD_FPR7_LS64(\thread)
+       lwc1    $f8,  THREAD_FPR8_LS64(\thread)
+       lwc1    $f9,  THREAD_FPR9_LS64(\thread)
+       lwc1    $f10, THREAD_FPR10_LS64(\thread)
+       lwc1    $f11, THREAD_FPR11_LS64(\thread)
+       lwc1    $f12, THREAD_FPR12_LS64(\thread)
+       lwc1    $f13, THREAD_FPR13_LS64(\thread)
+       lwc1    $f14, THREAD_FPR14_LS64(\thread)
+       lwc1    $f15, THREAD_FPR15_LS64(\thread)
+       lwc1    $f16, THREAD_FPR16_LS64(\thread)
+       lwc1    $f17, THREAD_FPR17_LS64(\thread)
+       lwc1    $f18, THREAD_FPR18_LS64(\thread)
+       lwc1    $f19, THREAD_FPR19_LS64(\thread)
+       lwc1    $f20, THREAD_FPR20_LS64(\thread)
+       lwc1    $f21, THREAD_FPR21_LS64(\thread)
+       lwc1    $f22, THREAD_FPR22_LS64(\thread)
+       lwc1    $f23, THREAD_FPR23_LS64(\thread)
+       lwc1    $f24, THREAD_FPR24_LS64(\thread)
+       lwc1    $f25, THREAD_FPR25_LS64(\thread)
+       lwc1    $f26, THREAD_FPR26_LS64(\thread)
+       lwc1    $f27, THREAD_FPR27_LS64(\thread)
+       lwc1    $f28, THREAD_FPR28_LS64(\thread)
+       lwc1    $f29, THREAD_FPR29_LS64(\thread)
+       lwc1    $f30, THREAD_FPR30_LS64(\thread)
+       lwc1    $f31, THREAD_FPR31_LS64(\thread)
        ctc1    \tmp, fcr31
        .endm
 
index 4225e99bd7bfdbe75b0e47cd8b974c2dd92e1ddc..b464b8b1147a175fd9315ffa5afaab0c75d2c63a 100644 (file)
 
        .macro  fpu_save_16even thread tmp=t0
        cfc1    \tmp, fcr31
-       sdc1    $f0,  THREAD_FPR0(\thread)
-       sdc1    $f2,  THREAD_FPR2(\thread)
-       sdc1    $f4,  THREAD_FPR4(\thread)
-       sdc1    $f6,  THREAD_FPR6(\thread)
-       sdc1    $f8,  THREAD_FPR8(\thread)
-       sdc1    $f10, THREAD_FPR10(\thread)
-       sdc1    $f12, THREAD_FPR12(\thread)
-       sdc1    $f14, THREAD_FPR14(\thread)
-       sdc1    $f16, THREAD_FPR16(\thread)
-       sdc1    $f18, THREAD_FPR18(\thread)
-       sdc1    $f20, THREAD_FPR20(\thread)
-       sdc1    $f22, THREAD_FPR22(\thread)
-       sdc1    $f24, THREAD_FPR24(\thread)
-       sdc1    $f26, THREAD_FPR26(\thread)
-       sdc1    $f28, THREAD_FPR28(\thread)
-       sdc1    $f30, THREAD_FPR30(\thread)
+       sdc1    $f0,  THREAD_FPR0_LS64(\thread)
+       sdc1    $f2,  THREAD_FPR2_LS64(\thread)
+       sdc1    $f4,  THREAD_FPR4_LS64(\thread)
+       sdc1    $f6,  THREAD_FPR6_LS64(\thread)
+       sdc1    $f8,  THREAD_FPR8_LS64(\thread)
+       sdc1    $f10, THREAD_FPR10_LS64(\thread)
+       sdc1    $f12, THREAD_FPR12_LS64(\thread)
+       sdc1    $f14, THREAD_FPR14_LS64(\thread)
+       sdc1    $f16, THREAD_FPR16_LS64(\thread)
+       sdc1    $f18, THREAD_FPR18_LS64(\thread)
+       sdc1    $f20, THREAD_FPR20_LS64(\thread)
+       sdc1    $f22, THREAD_FPR22_LS64(\thread)
+       sdc1    $f24, THREAD_FPR24_LS64(\thread)
+       sdc1    $f26, THREAD_FPR26_LS64(\thread)
+       sdc1    $f28, THREAD_FPR28_LS64(\thread)
+       sdc1    $f30, THREAD_FPR30_LS64(\thread)
        sw      \tmp, THREAD_FCR31(\thread)
        .endm
 
        .macro  fpu_save_16odd thread
        .set    push
        .set    mips64r2
-       sdc1    $f1,  THREAD_FPR1(\thread)
-       sdc1    $f3,  THREAD_FPR3(\thread)
-       sdc1    $f5,  THREAD_FPR5(\thread)
-       sdc1    $f7,  THREAD_FPR7(\thread)
-       sdc1    $f9,  THREAD_FPR9(\thread)
-       sdc1    $f11, THREAD_FPR11(\thread)
-       sdc1    $f13, THREAD_FPR13(\thread)
-       sdc1    $f15, THREAD_FPR15(\thread)
-       sdc1    $f17, THREAD_FPR17(\thread)
-       sdc1    $f19, THREAD_FPR19(\thread)
-       sdc1    $f21, THREAD_FPR21(\thread)
-       sdc1    $f23, THREAD_FPR23(\thread)
-       sdc1    $f25, THREAD_FPR25(\thread)
-       sdc1    $f27, THREAD_FPR27(\thread)
-       sdc1    $f29, THREAD_FPR29(\thread)
-       sdc1    $f31, THREAD_FPR31(\thread)
+       sdc1    $f1,  THREAD_FPR1_LS64(\thread)
+       sdc1    $f3,  THREAD_FPR3_LS64(\thread)
+       sdc1    $f5,  THREAD_FPR5_LS64(\thread)
+       sdc1    $f7,  THREAD_FPR7_LS64(\thread)
+       sdc1    $f9,  THREAD_FPR9_LS64(\thread)
+       sdc1    $f11, THREAD_FPR11_LS64(\thread)
+       sdc1    $f13, THREAD_FPR13_LS64(\thread)
+       sdc1    $f15, THREAD_FPR15_LS64(\thread)
+       sdc1    $f17, THREAD_FPR17_LS64(\thread)
+       sdc1    $f19, THREAD_FPR19_LS64(\thread)
+       sdc1    $f21, THREAD_FPR21_LS64(\thread)
+       sdc1    $f23, THREAD_FPR23_LS64(\thread)
+       sdc1    $f25, THREAD_FPR25_LS64(\thread)
+       sdc1    $f27, THREAD_FPR27_LS64(\thread)
+       sdc1    $f29, THREAD_FPR29_LS64(\thread)
+       sdc1    $f31, THREAD_FPR31_LS64(\thread)
        .set    pop
        .endm
 
 
        .macro  fpu_restore_16even thread tmp=t0
        lw      \tmp, THREAD_FCR31(\thread)
-       ldc1    $f0,  THREAD_FPR0(\thread)
-       ldc1    $f2,  THREAD_FPR2(\thread)
-       ldc1    $f4,  THREAD_FPR4(\thread)
-       ldc1    $f6,  THREAD_FPR6(\thread)
-       ldc1    $f8,  THREAD_FPR8(\thread)
-       ldc1    $f10, THREAD_FPR10(\thread)
-       ldc1    $f12, THREAD_FPR12(\thread)
-       ldc1    $f14, THREAD_FPR14(\thread)
-       ldc1    $f16, THREAD_FPR16(\thread)
-       ldc1    $f18, THREAD_FPR18(\thread)
-       ldc1    $f20, THREAD_FPR20(\thread)
-       ldc1    $f22, THREAD_FPR22(\thread)
-       ldc1    $f24, THREAD_FPR24(\thread)
-       ldc1    $f26, THREAD_FPR26(\thread)
-       ldc1    $f28, THREAD_FPR28(\thread)
-       ldc1    $f30, THREAD_FPR30(\thread)
+       ldc1    $f0,  THREAD_FPR0_LS64(\thread)
+       ldc1    $f2,  THREAD_FPR2_LS64(\thread)
+       ldc1    $f4,  THREAD_FPR4_LS64(\thread)
+       ldc1    $f6,  THREAD_FPR6_LS64(\thread)
+       ldc1    $f8,  THREAD_FPR8_LS64(\thread)
+       ldc1    $f10, THREAD_FPR10_LS64(\thread)
+       ldc1    $f12, THREAD_FPR12_LS64(\thread)
+       ldc1    $f14, THREAD_FPR14_LS64(\thread)
+       ldc1    $f16, THREAD_FPR16_LS64(\thread)
+       ldc1    $f18, THREAD_FPR18_LS64(\thread)
+       ldc1    $f20, THREAD_FPR20_LS64(\thread)
+       ldc1    $f22, THREAD_FPR22_LS64(\thread)
+       ldc1    $f24, THREAD_FPR24_LS64(\thread)
+       ldc1    $f26, THREAD_FPR26_LS64(\thread)
+       ldc1    $f28, THREAD_FPR28_LS64(\thread)
+       ldc1    $f30, THREAD_FPR30_LS64(\thread)
        ctc1    \tmp, fcr31
        .endm
 
        .macro  fpu_restore_16odd thread
        .set    push
        .set    mips64r2
-       ldc1    $f1,  THREAD_FPR1(\thread)
-       ldc1    $f3,  THREAD_FPR3(\thread)
-       ldc1    $f5,  THREAD_FPR5(\thread)
-       ldc1    $f7,  THREAD_FPR7(\thread)
-       ldc1    $f9,  THREAD_FPR9(\thread)
-       ldc1    $f11, THREAD_FPR11(\thread)
-       ldc1    $f13, THREAD_FPR13(\thread)
-       ldc1    $f15, THREAD_FPR15(\thread)
-       ldc1    $f17, THREAD_FPR17(\thread)
-       ldc1    $f19, THREAD_FPR19(\thread)
-       ldc1    $f21, THREAD_FPR21(\thread)
-       ldc1    $f23, THREAD_FPR23(\thread)
-       ldc1    $f25, THREAD_FPR25(\thread)
-       ldc1    $f27, THREAD_FPR27(\thread)
-       ldc1    $f29, THREAD_FPR29(\thread)
-       ldc1    $f31, THREAD_FPR31(\thread)
+       ldc1    $f1,  THREAD_FPR1_LS64(\thread)
+       ldc1    $f3,  THREAD_FPR3_LS64(\thread)
+       ldc1    $f5,  THREAD_FPR5_LS64(\thread)
+       ldc1    $f7,  THREAD_FPR7_LS64(\thread)
+       ldc1    $f9,  THREAD_FPR9_LS64(\thread)
+       ldc1    $f11, THREAD_FPR11_LS64(\thread)
+       ldc1    $f13, THREAD_FPR13_LS64(\thread)
+       ldc1    $f15, THREAD_FPR15_LS64(\thread)
+       ldc1    $f17, THREAD_FPR17_LS64(\thread)
+       ldc1    $f19, THREAD_FPR19_LS64(\thread)
+       ldc1    $f21, THREAD_FPR21_LS64(\thread)
+       ldc1    $f23, THREAD_FPR23_LS64(\thread)
+       ldc1    $f25, THREAD_FPR25_LS64(\thread)
+       ldc1    $f27, THREAD_FPR27_LS64(\thread)
+       ldc1    $f29, THREAD_FPR29_LS64(\thread)
+       ldc1    $f31, THREAD_FPR31_LS64(\thread)
        .set    pop
        .endm
 
        fpu_restore_16even \thread \tmp
        .endm
 
+#ifdef CONFIG_CPU_MIPSR2
+       .macro  _EXT    rd, rs, p, s
+       ext     \rd, \rs, \p, \s
+       .endm
+#else /* !CONFIG_CPU_MIPSR2 */
+       .macro  _EXT    rd, rs, p, s
+       srl     \rd, \rs, \p
+       andi    \rd, \rd, (1 << \s) - 1
+       .endm
+#endif /* !CONFIG_CPU_MIPSR2 */
+
 /*
  * Temporary until all gas have MT ASE support
  */
         .word  0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
        .endm
 
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+       .macro  ld_d    wd, off, base
+       .set    push
+       .set    mips32r2
+       .set    msa
+       ld.d    $w\wd, \off(\base)
+       .set    pop
+       .endm
+
+       .macro  st_d    wd, off, base
+       .set    push
+       .set    mips32r2
+       .set    msa
+       st.d    $w\wd, \off(\base)
+       .set    pop
+       .endm
+
+       .macro  copy_u_w        rd, ws, n
+       .set    push
+       .set    mips32r2
+       .set    msa
+       copy_u.w \rd, $w\ws[\n]
+       .set    pop
+       .endm
+
+       .macro  copy_u_d        rd, ws, n
+       .set    push
+       .set    mips64r2
+       .set    msa
+       copy_u.d \rd, $w\ws[\n]
+       .set    pop
+       .endm
+
+       .macro  insert_w        wd, n, rs
+       .set    push
+       .set    mips32r2
+       .set    msa
+       insert.w $w\wd[\n], \rs
+       .set    pop
+       .endm
+
+       .macro  insert_d        wd, n, rs
+       .set    push
+       .set    mips64r2
+       .set    msa
+       insert.d $w\wd[\n], \rs
+       .set    pop
+       .endm
+#else
+       /*
+        * Temporary until all toolchains in use include MSA support.
+        */
+       .macro  cfcmsa  rd, cs
+       .set    push
+       .set    noat
+       .word   0x787e0059 | (\cs << 11)
+       move    \rd, $1
+       .set    pop
+       .endm
+
+       .macro  ctcmsa  cd, rs
+       .set    push
+       .set    noat
+       move    $1, \rs
+       .word   0x783e0819 | (\cd << 6)
+       .set    pop
+       .endm
+
+       .macro  ld_d    wd, off, base
+       .set    push
+       .set    noat
+       add     $1, \base, \off
+       .word   0x78000823 | (\wd << 6)
+       .set    pop
+       .endm
+
+       .macro  st_d    wd, off, base
+       .set    push
+       .set    noat
+       add     $1, \base, \off
+       .word   0x78000827 | (\wd << 6)
+       .set    pop
+       .endm
+
+       .macro  copy_u_w        rd, ws, n
+       .set    push
+       .set    noat
+       .word   0x78f00059 | (\n << 16) | (\ws << 11)
+       /* move triggers an assembler bug... */
+       or      \rd, $1, zero
+       .set    pop
+       .endm
+
+       .macro  copy_u_d        rd, ws, n
+       .set    push
+       .set    noat
+       .word   0x78f80059 | (\n << 16) | (\ws << 11)
+       /* move triggers an assembler bug... */
+       or      \rd, $1, zero
+       .set    pop
+       .endm
+
+       .macro  insert_w        wd, n, rs
+       .set    push
+       .set    noat
+       /* move triggers an assembler bug... */
+       or      $1, \rs, zero
+       .word   0x79300819 | (\n << 16) | (\wd << 6)
+       .set    pop
+       .endm
+
+       .macro  insert_d        wd, n, rs
+       .set    push
+       .set    noat
+       /* move triggers an assembler bug... */
+       or      $1, \rs, zero
+       .word   0x79380819 | (\n << 16) | (\wd << 6)
+       .set    pop
+       .endm
+#endif
+
+       .macro  msa_save_all    thread
+       st_d    0, THREAD_FPR0, \thread
+       st_d    1, THREAD_FPR1, \thread
+       st_d    2, THREAD_FPR2, \thread
+       st_d    3, THREAD_FPR3, \thread
+       st_d    4, THREAD_FPR4, \thread
+       st_d    5, THREAD_FPR5, \thread
+       st_d    6, THREAD_FPR6, \thread
+       st_d    7, THREAD_FPR7, \thread
+       st_d    8, THREAD_FPR8, \thread
+       st_d    9, THREAD_FPR9, \thread
+       st_d    10, THREAD_FPR10, \thread
+       st_d    11, THREAD_FPR11, \thread
+       st_d    12, THREAD_FPR12, \thread
+       st_d    13, THREAD_FPR13, \thread
+       st_d    14, THREAD_FPR14, \thread
+       st_d    15, THREAD_FPR15, \thread
+       st_d    16, THREAD_FPR16, \thread
+       st_d    17, THREAD_FPR17, \thread
+       st_d    18, THREAD_FPR18, \thread
+       st_d    19, THREAD_FPR19, \thread
+       st_d    20, THREAD_FPR20, \thread
+       st_d    21, THREAD_FPR21, \thread
+       st_d    22, THREAD_FPR22, \thread
+       st_d    23, THREAD_FPR23, \thread
+       st_d    24, THREAD_FPR24, \thread
+       st_d    25, THREAD_FPR25, \thread
+       st_d    26, THREAD_FPR26, \thread
+       st_d    27, THREAD_FPR27, \thread
+       st_d    28, THREAD_FPR28, \thread
+       st_d    29, THREAD_FPR29, \thread
+       st_d    30, THREAD_FPR30, \thread
+       st_d    31, THREAD_FPR31, \thread
+       .endm
+
+       .macro  msa_restore_all thread
+       ld_d    0, THREAD_FPR0, \thread
+       ld_d    1, THREAD_FPR1, \thread
+       ld_d    2, THREAD_FPR2, \thread
+       ld_d    3, THREAD_FPR3, \thread
+       ld_d    4, THREAD_FPR4, \thread
+       ld_d    5, THREAD_FPR5, \thread
+       ld_d    6, THREAD_FPR6, \thread
+       ld_d    7, THREAD_FPR7, \thread
+       ld_d    8, THREAD_FPR8, \thread
+       ld_d    9, THREAD_FPR9, \thread
+       ld_d    10, THREAD_FPR10, \thread
+       ld_d    11, THREAD_FPR11, \thread
+       ld_d    12, THREAD_FPR12, \thread
+       ld_d    13, THREAD_FPR13, \thread
+       ld_d    14, THREAD_FPR14, \thread
+       ld_d    15, THREAD_FPR15, \thread
+       ld_d    16, THREAD_FPR16, \thread
+       ld_d    17, THREAD_FPR17, \thread
+       ld_d    18, THREAD_FPR18, \thread
+       ld_d    19, THREAD_FPR19, \thread
+       ld_d    20, THREAD_FPR20, \thread
+       ld_d    21, THREAD_FPR21, \thread
+       ld_d    22, THREAD_FPR22, \thread
+       ld_d    23, THREAD_FPR23, \thread
+       ld_d    24, THREAD_FPR24, \thread
+       ld_d    25, THREAD_FPR25, \thread
+       ld_d    26, THREAD_FPR26, \thread
+       ld_d    27, THREAD_FPR27, \thread
+       ld_d    28, THREAD_FPR28, \thread
+       ld_d    29, THREAD_FPR29, \thread
+       ld_d    30, THREAD_FPR30, \thread
+       ld_d    31, THREAD_FPR31, \thread
+       .endm
+
 #endif /* _ASM_ASMMACRO_H */
index 7eed2f261710c54932ef3b031653c37dce7d6380..e8eb3d53a241614dac724103806ce94e296954da 100644 (file)
@@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
                int temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     ll      %0, %1          # atomic_add            \n"
                "       addu    %0, %2                                  \n"
                "       sc      %0, %1                                  \n"
@@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       ll      %0, %1          # atomic_add    \n"
                        "       addu    %0, %2                          \n"
                        "       sc      %0, %1                          \n"
@@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
                int temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     ll      %0, %1          # atomic_sub            \n"
                "       subu    %0, %2                                  \n"
                "       sc      %0, %1                                  \n"
@@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       ll      %0, %1          # atomic_sub    \n"
                        "       subu    %0, %2                          \n"
                        "       sc      %0, %1                          \n"
@@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
                int temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     ll      %1, %2          # atomic_add_return     \n"
                "       addu    %0, %1, %3                              \n"
                "       sc      %0, %2                                  \n"
@@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       ll      %1, %2  # atomic_add_return     \n"
                        "       addu    %0, %1, %3                      \n"
                        "       sc      %0, %2                          \n"
@@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
                int temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     ll      %1, %2          # atomic_sub_return     \n"
                "       subu    %0, %1, %3                              \n"
                "       sc      %0, %2                                  \n"
@@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       ll      %1, %2  # atomic_sub_return     \n"
                        "       subu    %0, %1, %3                      \n"
                        "       sc      %0, %2                          \n"
@@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                int temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                int temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
                "       daddu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
@@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       lld     %0, %1          # atomic64_add  \n"
                        "       daddu   %0, %2                          \n"
                        "       scd     %0, %1                          \n"
@@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
                "       dsubu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
@@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       lld     %0, %1          # atomic64_sub  \n"
                        "       dsubu   %0, %2                          \n"
                        "       scd     %0, %1                          \n"
@@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
                "       daddu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
@@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       lld     %1, %2  # atomic64_add_return   \n"
                        "       daddu   %0, %1, %3                      \n"
                        "       scd     %0, %2                          \n"
@@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
                "       dsubu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       lld     %1, %2  # atomic64_sub_return   \n"
                        "       dsubu   %0, %1, %3                      \n"
                        "       scd     %0, %2                          \n"
@@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
index 71305a8b3d78b8053b46a78d63f332cffc3a95db..6a65d49e2c0d4e5284173189b3f33316d4e069bb 100644 (file)
@@ -79,7 +79,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     " __LL "%0, %1                  # set_bit       \n"
                "       or      %0, %2                                  \n"
                "       " __SC  "%0, %1                                 \n"
@@ -101,7 +101,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       " __LL "%0, %1          # set_bit       \n"
                        "       or      %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
@@ -131,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 
        if (kernel_uses_llsc && R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     " __LL "%0, %1                  # clear_bit     \n"
                "       and     %0, %2                                  \n"
                "       " __SC "%0, %1                                  \n"
@@ -153,7 +153,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       " __LL "%0, %1          # clear_bit     \n"
                        "       and     %0, %2                          \n"
                        "       " __SC "%0, %1                          \n"
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                           \n"
+               "       .set    arch=r4000                      \n"
                "1:     " __LL "%0, %1          # change_bit    \n"
                "       xor     %0, %2                          \n"
                "       " __SC  "%0, %1                         \n"
@@ -211,7 +211,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       " __LL "%0, %1          # change_bit    \n"
                        "       xor     %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
@@ -244,7 +244,7 @@ static inline int test_and_set_bit(unsigned long nr,
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     " __LL "%0, %1          # test_and_set_bit      \n"
                "       or      %2, %0, %3                              \n"
                "       " __SC  "%2, %1                                 \n"
@@ -260,7 +260,7 @@ static inline int test_and_set_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
@@ -298,7 +298,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     " __LL "%0, %1          # test_and_set_bit      \n"
                "       or      %2, %0, %3                              \n"
                "       " __SC  "%2, %1                                 \n"
@@ -314,7 +314,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
@@ -353,7 +353,7 @@ static inline int test_and_clear_bit(unsigned long nr,
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     " __LL  "%0, %1         # test_and_clear_bit    \n"
                "       or      %2, %0, %3                              \n"
                "       xor     %2, %3                                  \n"
@@ -386,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       " __LL  "%0, %1 # test_and_clear_bit    \n"
                        "       or      %2, %0, %3                      \n"
                        "       xor     %2, %3                          \n"
@@ -427,7 +427,7 @@ static inline int test_and_change_bit(unsigned long nr,
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     " __LL  "%0, %1         # test_and_change_bit   \n"
                "       xor     %2, %0, %3                              \n"
                "       " __SC  "%2, %1                                 \n"
@@ -443,7 +443,7 @@ static inline int test_and_change_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       " __LL  "%0, %1 # test_and_change_bit   \n"
                        "       xor     %2, %0, %3                      \n"
                        "       " __SC  "\t%2, %1                       \n"
index 4d2cdea5aa37f46e05e1e06e7f8d378bf9331c13..1f7ca8b004042c76131f2bc4e8a7af94ea19f7c9 100644 (file)
 /*
  * Valid machtype for Loongson family
  */
-#define MACH_LOONGSON_UNKNOWN  0
-#define MACH_LEMOTE_FL2E       1
-#define MACH_LEMOTE_FL2F       2
-#define MACH_LEMOTE_ML2F7      3
-#define MACH_LEMOTE_YL2F89     4
-#define MACH_DEXXON_GDIUM2F10  5
-#define MACH_LEMOTE_NAS               6
-#define MACH_LEMOTE_LL2F       7
-#define MACH_LOONGSON_END      8
+enum loongson_machine_type {
+       MACH_LOONGSON_UNKNOWN,
+       MACH_LEMOTE_FL2E,
+       MACH_LEMOTE_FL2F,
+       MACH_LEMOTE_ML2F7,
+       MACH_LEMOTE_YL2F89,
+       MACH_DEXXON_GDIUM2F10,
+       MACH_LEMOTE_NAS,
+       MACH_LEMOTE_LL2F,
+       MACH_LEMOTE_A1004,
+       MACH_LEMOTE_A1101,
+       MACH_LEMOTE_A1201,
+       MACH_LEMOTE_A1205,
+       MACH_LOONGSON_END
+};
 
 /*
  * Valid machtype for group INGENIC
@@ -112,6 +118,8 @@ extern void prom_free_prom_memory(void);
 extern void free_init_pages(const char *what,
                            unsigned long begin, unsigned long end);
 
+extern void (*free_init_pages_eva)(void *begin, void *end);
+
 /*
  * Initial kernel command line, usually setup by prom_init()
  */
index ac3d2b8a20d4bfa483fcb55d57e92670c2c08dd2..3418c51e11512ed2a3957448fbb68d83ebac1858 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Copyright (C) 2001 Thiemo Seufer.
  * Copyright (C) 2002 Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
  */
 #ifndef _ASM_CHECKSUM_H
 #define _ASM_CHECKSUM_H
  */
 __wsum csum_partial(const void *buff, int len, __wsum sum);
 
-__wsum __csum_partial_copy_user(const void *src, void *dst,
-                               int len, __wsum sum, int *err_ptr);
+__wsum __csum_partial_copy_kernel(const void *src, void *dst,
+                                 int len, __wsum sum, int *err_ptr);
 
+__wsum __csum_partial_copy_from_user(const void *src, void *dst,
+                                    int len, __wsum sum, int *err_ptr);
+__wsum __csum_partial_copy_to_user(const void *src, void *dst,
+                                  int len, __wsum sum, int *err_ptr);
 /*
  * this is a new version of the above that records errors it finds in *errp,
  * but continues and zeros the rest of the buffer.
@@ -41,8 +46,26 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
                                   __wsum sum, int *err_ptr)
 {
        might_fault();
-       return __csum_partial_copy_user((__force void *)src, dst,
-                                       len, sum, err_ptr);
+       if (segment_eq(get_fs(), get_ds()))
+               return __csum_partial_copy_kernel((__force void *)src, dst,
+                                                 len, sum, err_ptr);
+       else
+               return __csum_partial_copy_from_user((__force void *)src, dst,
+                                                    len, sum, err_ptr);
+}
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst,
+                              int len, __wsum sum, int *err_ptr)
+{
+       if (access_ok(VERIFY_READ, src, len))
+               return csum_partial_copy_from_user(src, dst, len, sum,
+                                                  err_ptr);
+       if (len)
+               *err_ptr = -EFAULT;
+
+       return sum;
 }
 
 /*
@@ -54,9 +77,16 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
                             __wsum sum, int *err_ptr)
 {
        might_fault();
-       if (access_ok(VERIFY_WRITE, dst, len))
-               return __csum_partial_copy_user(src, (__force void *)dst,
-                                               len, sum, err_ptr);
+       if (access_ok(VERIFY_WRITE, dst, len)) {
+               if (segment_eq(get_fs(), get_ds()))
+                       return __csum_partial_copy_kernel(src,
+                                                         (__force void *)dst,
+                                                         len, sum, err_ptr);
+               else
+                       return __csum_partial_copy_to_user(src,
+                                                          (__force void *)dst,
+                                                          len, sum, err_ptr);
+       }
        if (len)
                *err_ptr = -EFAULT;
 
index 466069bd846596c4e2940be2f42a5aaa82830bec..eefcaa363a875f3f0a0f0c727bcd0cefe2e5a701 100644 (file)
@@ -22,11 +22,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                unsigned long dummy;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     ll      %0, %3                  # xchg_u32      \n"
                "       .set    mips0                                   \n"
                "       move    %2, %z4                                 \n"
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "       sc      %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
                "       .set    mips0                                   \n"
@@ -38,11 +38,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       ll      %0, %3          # xchg_u32      \n"
                        "       .set    mips0                           \n"
                        "       move    %2, %z4                         \n"
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       sc      %2, %1                          \n"
                        "       .set    mips0                           \n"
                        : "=&r" (retval), "=m" (*m), "=&r" (dummy)
@@ -74,7 +74,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                unsigned long dummy;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     lld     %0, %3                  # xchg_u64      \n"
                "       move    %2, %z4                                 \n"
                "       scd     %2, %1                                  \n"
@@ -88,7 +88,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    mips3                           \n"
+                       "       .set    arch=r4000                      \n"
                        "       lld     %0, %3          # xchg_u64      \n"
                        "       move    %2, %z4                         \n"
                        "       scd     %2, %1                          \n"
@@ -145,12 +145,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    mips3                           \n"     \
+               "       .set    arch=r4000                      \n"     \
                "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
-               "       .set    mips3                           \n"     \
+               "       .set    arch=r4000                      \n"     \
                "       " st "  $1, %1                          \n"     \
                "       beqzl   $1, 1b                          \n"     \
                "2:                                             \n"     \
@@ -162,12 +162,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    mips3                           \n"     \
+               "       .set    arch=r4000                      \n"     \
                "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
-               "       .set    mips3                           \n"     \
+               "       .set    arch=r4000                      \n"     \
                "       " st "  $1, %1                          \n"     \
                "       beqz    $1, 1b                          \n"     \
                "       .set    pop                             \n"     \
index 6e70b03b6aab8dbcad47703bc9d3a325adec859b..f56cc975b92f8522ca37e344a9e0146ba6f80bf1 100644 (file)
@@ -26,7 +26,9 @@
 #ifndef cpu_has_segments
 #define cpu_has_segments       (cpu_data[0].options & MIPS_CPU_SEGMENTS)
 #endif
-
+#ifndef cpu_has_eva
+#define cpu_has_eva            (cpu_data[0].options & MIPS_CPU_EVA)
+#endif
 
 /*
  * For the moment we don't consider R6000 and R8000 so we can assume that
 #define cpu_has_vz             (cpu_data[0].ases & MIPS_ASE_VZ)
 #endif
 
+#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
+# define cpu_has_msa           (cpu_data[0].ases & MIPS_ASE_MSA)
+#elif !defined(cpu_has_msa)
+# define cpu_has_msa           0
+#endif
+
 #endif /* __ASM_CPU_FEATURES_H */
index 8f7adf0ac1e383539e742df12133bfc3c0fdbda4..dc2135be2a3a4ed2c044870e3c8c8b24881535f6 100644 (file)
@@ -49,6 +49,7 @@ struct cpuinfo_mips {
        unsigned long           ases;
        unsigned int            processor_id;
        unsigned int            fpu_id;
+       unsigned int            msa_id;
        unsigned int            cputype;
        int                     isa_level;
        int                     tlbsize;
@@ -95,4 +96,31 @@ extern void cpu_report(void);
 extern const char *__cpu_name[];
 #define cpu_name_string()      __cpu_name[smp_processor_id()]
 
+struct seq_file;
+struct notifier_block;
+
+extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
+extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
+
+#define proc_cpuinfo_notifier(fn, pri)                                 \
+({                                                                     \
+       static struct notifier_block fn##_nb = {                        \
+               .notifier_call = fn,                                    \
+               .priority = pri                                         \
+       };                                                              \
+                                                                       \
+       register_proc_cpuinfo_notifier(&fn##_nb);                       \
+})
+
+struct proc_cpuinfo_notifier_args {
+       struct seq_file *m;
+       unsigned long n;
+};
+
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+# define cpu_vpe_id(cpuinfo)   ((cpuinfo)->vpe_id)
+#else
+# define cpu_vpe_id(cpuinfo)   0
+#endif
+
 #endif /* __ASM_CPU_INFO_H */
index 02f591bd95ca635b82e6016eedcffa5ea1978b93..721906130a573852b654b4495d66cc7877e533c0 100644 (file)
@@ -20,6 +20,10 @@ static inline int __pure __get_cpu_type(const int cpu_type)
        case CPU_LOONGSON2:
 #endif
 
+#ifdef CONFIG_SYS_HAS_CPU_LOONGSON3
+       case CPU_LOONGSON3:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
        case CPU_LOONGSON1:
 #endif
@@ -46,6 +50,8 @@ static inline int __pure __get_cpu_type(const int cpu_type)
        case CPU_M14KEC:
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
+       case CPU_P5600:
+       case CPU_M5150:
 #endif
 
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
index 76411df3d971efee351615d05824cbfa3be72a85..530eb8b3a68e7e6576d9dc8c5c7e16482ee5a439 100644 (file)
 #define PRID_IMP_RM7000                0x2700
 #define PRID_IMP_NEVADA                0x2800          /* RM5260 ??? */
 #define PRID_IMP_RM9000                0x3400
-#define PRID_IMP_LOONGSON1     0x4200
+#define PRID_IMP_LOONGSON_32   0x4200  /* Loongson-1 */
 #define PRID_IMP_R5432         0x5400
 #define PRID_IMP_R5500         0x5500
-#define PRID_IMP_LOONGSON2     0x6300
+#define PRID_IMP_LOONGSON_64   0x6300  /* Loongson-2/3 */
 
 #define PRID_IMP_UNKNOWN       0xff00
 
 #define PRID_IMP_INTERAPTIV_MP 0xa100
 #define PRID_IMP_PROAPTIV_UP   0xa200
 #define PRID_IMP_PROAPTIV_MP   0xa300
+#define PRID_IMP_M5150         0xa700
+#define PRID_IMP_P5600         0xa800
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
 #define PRID_REV_LOONGSON1B    0x0020
 #define PRID_REV_LOONGSON2E    0x0002
 #define PRID_REV_LOONGSON2F    0x0003
+#define PRID_REV_LOONGSON3A    0x0005
 
 /*
  * Older processors used to encode processor version and revision in two
@@ -296,14 +299,14 @@ enum cpu_type_enum {
        CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
        CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
        CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
-       CPU_M14KEC, CPU_INTERAPTIV, CPU_PROAPTIV,
+       CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
 
        /*
         * MIPS64 class processors
         */
        CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
-       CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
-       CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
+       CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
+       CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
 
        CPU_LAST
 };
@@ -358,6 +361,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_MICROMIPS     0x01000000 /* CPU has microMIPS capability */
 #define MIPS_CPU_TLBINV                0x02000000 /* CPU supports TLBINV/F */
 #define MIPS_CPU_SEGMENTS      0x04000000 /* CPU supports Segmentation Control registers */
+#define MIPS_CPU_EVA           0x80000000 /* CPU supports Enhanced Virtual Addressing */
 
 /*
  * CPU ASE encodings
@@ -370,5 +374,6 @@ enum cpu_type_enum {
 #define MIPS_ASE_MIPSMT                0x00000020 /* CPU supports MIPS MT */
 #define MIPS_ASE_DSP2P         0x00000040 /* Signal Processing ASE Rev 2 */
 #define MIPS_ASE_VZ            0x00000080 /* Virtualization ASE */
+#define MIPS_ASE_MSA           0x00000100 /* MIPS SIMD Architecture */
 
 #endif /* _ASM_CPU_H */
index 84238c574d5e6bff1db1db79d58a5ddbd4436422..06412aa9e3fb18aebe8185178d439c692d0e10c3 100644 (file)
@@ -49,9 +49,14 @@ static inline int dma_mapping_error(struct device *dev, u64 mask)
 static inline int
 dma_set_mask(struct device *dev, u64 mask)
 {
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
        if(!dev->dma_mask || !dma_supported(dev, mask))
                return -EIO;
 
+       if (ops->set_dma_mask)
+               return ops->set_dma_mask(dev, mask);
+
        *dev->dma_mask = mask;
 
        return 0;
index 58e50cbdb1a6d577ef6ffbac115efcd593b6dedc..4d86b72750c701701f597387cce73b2a274f5bbf 100644 (file)
@@ -180,7 +180,7 @@ static inline void restore_fp(struct task_struct *tsk)
                _restore_fp(tsk);
 }
 
-static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
+static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
 {
        if (tsk == current) {
                preempt_disable();
index 6ea15815d3ee2f83456f665b5fdfcacf2a537d9a..194cda0396a345f2d8cd890677028ab5d5da98fa 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/futex.h>
 #include <linux/uaccess.h>
+#include <asm/asm-eva.h>
 #include <asm/barrier.h>
 #include <asm/errno.h>
 #include <asm/war.h>
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    mips3                           \n"     \
+               "       .set    arch=r4000                      \n"     \
                "1:     ll      %1, %4  # __futex_atomic_op     \n"     \
                "       .set    mips0                           \n"     \
                "       " insn  "                               \n"     \
-               "       .set    mips3                           \n"     \
+               "       .set    arch=r4000                      \n"     \
                "2:     sc      $1, %2                          \n"     \
                "       beqzl   $1, 1b                          \n"     \
                __WEAK_LLSC_MB                                          \
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    mips3                           \n"     \
-               "1:     ll      %1, %4  # __futex_atomic_op     \n"     \
+               "       .set    arch=r4000                      \n"     \
+               "1:     "user_ll("%1", "%4")" # __futex_atomic_op\n"    \
                "       .set    mips0                           \n"     \
                "       " insn  "                               \n"     \
-               "       .set    mips3                           \n"     \
-               "2:     sc      $1, %2                          \n"     \
+               "       .set    arch=r4000                      \n"     \
+               "2:     "user_sc("$1", "%2")"                   \n"     \
                "       beqz    $1, 1b                          \n"     \
                __WEAK_LLSC_MB                                          \
                "3:                                             \n"     \
@@ -146,12 +147,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "# futex_atomic_cmpxchg_inatomic                        \n"
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:     ll      %1, %3                                  \n"
                "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
                "       move    $1, %z5                                 \n"
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "2:     sc      $1, %2                                  \n"
                "       beqzl   $1, 1b                                  \n"
                __WEAK_LLSC_MB
@@ -173,13 +174,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "# futex_atomic_cmpxchg_inatomic                        \n"
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
-               "       .set    mips3                                   \n"
-               "1:     ll      %1, %3                                  \n"
+               "       .set    arch=r4000                              \n"
+               "1:     "user_ll("%1", "%3")"                           \n"
                "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
                "       move    $1, %z5                                 \n"
-               "       .set    mips3                                   \n"
-               "2:     sc      $1, %2                                  \n"
+               "       .set    arch=r4000                              \n"
+               "2:     "user_sc("$1", "%2")"                           \n"
                "       beqz    $1, 1b                                  \n"
                __WEAK_LLSC_MB
                "3:                                                     \n"
index d6c50a7e9edebf13e68338758b297667d87087f2..f3e6978aad704c4a15bad1971c55625f2e378f6e 100644 (file)
@@ -38,7 +38,7 @@ extern int *_fw_envp;
 
 extern void fw_init_cmdline(void);
 extern char *fw_getcmdline(void);
-extern fw_memblock_t *fw_getmdesc(void);
+extern fw_memblock_t *fw_getmdesc(int);
 extern void fw_meminit(void);
 extern char *fw_getenv(char *name);
 extern unsigned long fw_getenvl(char *name);
diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h
deleted file mode 100644 (file)
index a7359f7..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000, 07 MIPS Technologies, Inc.
- *
- * Multiprocessor Subsystem Register Definitions
- *
- */
-#ifndef _ASM_GCMPREGS_H
-#define _ASM_GCMPREGS_H
-
-
-/* Offsets to major blocks within GCMP from GCMP base */
-#define GCMP_GCB_OFS           0x0000 /* Global Control Block */
-#define GCMP_CLCB_OFS          0x2000 /* Core Local Control Block */
-#define GCMP_COCB_OFS          0x4000 /* Core Other Control Block */
-#define GCMP_GDB_OFS           0x8000 /* Global Debug Block */
-
-/* Offsets to individual GCMP registers from GCMP base */
-#define GCMPOFS(block, tag, reg)       \
-       (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS)
-#define GCMPOFSn(block, tag, reg, n) \
-       (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS(n))
-
-#define GCMPGCBOFS(reg)                GCMPOFS(GCB, GCB, reg)
-#define GCMPGCBOFSn(reg, n)    GCMPOFSn(GCB, GCB, reg, n)
-#define GCMPCLCBOFS(reg)       GCMPOFS(CLCB, CCB, reg)
-#define GCMPCOCBOFS(reg)       GCMPOFS(COCB, CCB, reg)
-#define GCMPGDBOFS(reg)                GCMPOFS(GDB, GDB, reg)
-
-/* GCMP register access */
-#define GCMPGCB(reg)                   REGP(_gcmp_base, GCMPGCBOFS(reg))
-#define GCMPGCBn(reg, n)              REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
-#define GCMPCLCB(reg)                  REGP(_gcmp_base, GCMPCLCBOFS(reg))
-#define GCMPCOCB(reg)                  REGP(_gcmp_base, GCMPCOCBOFS(reg))
-#define GCMPGDB(reg)                   REGP(_gcmp_base, GCMPGDBOFS(reg))
-
-/* Mask generation */
-#define GCMPMSK(block, reg, bits)      (MSK(bits)<<GCMP_##block##_##reg##_SHF)
-#define GCMPGCBMSK(reg, bits)          GCMPMSK(GCB, reg, bits)
-#define GCMPCCBMSK(reg, bits)          GCMPMSK(CCB, reg, bits)
-#define GCMPGDBMSK(reg, bits)          GCMPMSK(GDB, reg, bits)
-
-/* GCB registers */
-#define GCMP_GCB_GC_OFS                        0x0000  /* Global Config Register */
-#define         GCMP_GCB_GC_NUMIOCU_SHF        8
-#define         GCMP_GCB_GC_NUMIOCU_MSK        GCMPGCBMSK(GC_NUMIOCU, 4)
-#define         GCMP_GCB_GC_NUMCORES_SHF       0
-#define         GCMP_GCB_GC_NUMCORES_MSK       GCMPGCBMSK(GC_NUMCORES, 8)
-#define GCMP_GCB_GCMPB_OFS             0x0008          /* Global GCMP Base */
-#define         GCMP_GCB_GCMPB_GCMPBASE_SHF    15
-#define         GCMP_GCB_GCMPB_GCMPBASE_MSK    GCMPGCBMSK(GCMPB_GCMPBASE, 17)
-#define         GCMP_GCB_GCMPB_CMDEFTGT_SHF    0
-#define         GCMP_GCB_GCMPB_CMDEFTGT_MSK    GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
-#define         GCMP_GCB_GCMPB_CMDEFTGT_DISABLED       0
-#define         GCMP_GCB_GCMPB_CMDEFTGT_MEM            1
-#define         GCMP_GCB_GCMPB_CMDEFTGT_IOCU1          2
-#define         GCMP_GCB_GCMPB_CMDEFTGT_IOCU2          3
-#define GCMP_GCB_CCMC_OFS              0x0010  /* Global CM Control */
-#define GCMP_GCB_GCSRAP_OFS            0x0020  /* Global CSR Access Privilege */
-#define         GCMP_GCB_GCSRAP_CMACCESS_SHF   0
-#define         GCMP_GCB_GCSRAP_CMACCESS_MSK   GCMPGCBMSK(GCSRAP_CMACCESS, 8)
-#define GCMP_GCB_GCMPREV_OFS           0x0030  /* GCMP Revision Register */
-#define GCMP_GCB_GCMEM_OFS             0x0040  /* Global CM Error Mask */
-#define GCMP_GCB_GCMEC_OFS             0x0048  /* Global CM Error Cause */
-#define         GCMP_GCB_GMEC_ERROR_TYPE_SHF   27
-#define         GCMP_GCB_GMEC_ERROR_TYPE_MSK   GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
-#define         GCMP_GCB_GMEC_ERROR_INFO_SHF   0
-#define         GCMP_GCB_GMEC_ERROR_INFO_MSK   GCMPGCBMSK(GMEC_ERROR_INFO, 27)
-#define GCMP_GCB_GCMEA_OFS             0x0050  /* Global CM Error Address */
-#define GCMP_GCB_GCMEO_OFS             0x0058  /* Global CM Error Multiple */
-#define         GCMP_GCB_GMEO_ERROR_2ND_SHF    0
-#define         GCMP_GCB_GMEO_ERROR_2ND_MSK    GCMPGCBMSK(GMEO_ERROR_2ND, 5)
-#define GCMP_GCB_GICBA_OFS             0x0080  /* Global Interrupt Controller Base Address */
-#define         GCMP_GCB_GICBA_BASE_SHF        17
-#define         GCMP_GCB_GICBA_BASE_MSK        GCMPGCBMSK(GICBA_BASE, 15)
-#define         GCMP_GCB_GICBA_EN_SHF          0
-#define         GCMP_GCB_GICBA_EN_MSK          GCMPGCBMSK(GICBA_EN, 1)
-
-/* GCB Regions */
-#define GCMP_GCB_CMxBASE_OFS(n)                (0x0090+16*(n))         /* Global Region[0-3] Base Address */
-#define         GCMP_GCB_CMxBASE_BASE_SHF      16
-#define         GCMP_GCB_CMxBASE_BASE_MSK      GCMPGCBMSK(CMxBASE_BASE, 16)
-#define GCMP_GCB_CMxMASK_OFS(n)                (0x0098+16*(n))         /* Global Region[0-3] Address Mask */
-#define         GCMP_GCB_CMxMASK_MASK_SHF      16
-#define         GCMP_GCB_CMxMASK_MASK_MSK      GCMPGCBMSK(CMxMASK_MASK, 16)
-#define         GCMP_GCB_CMxMASK_CMREGTGT_SHF  0
-#define         GCMP_GCB_CMxMASK_CMREGTGT_MSK  GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
-#define         GCMP_GCB_CMxMASK_CMREGTGT_MEM   0
-#define         GCMP_GCB_CMxMASK_CMREGTGT_MEM1  1
-#define         GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
-#define         GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
-
-
-/* Core local/Core other control block registers */
-#define GCMP_CCB_RESETR_OFS            0x0000                  /* Reset Release */
-#define         GCMP_CCB_RESETR_INRESET_SHF    0
-#define         GCMP_CCB_RESETR_INRESET_MSK    GCMPCCBMSK(RESETR_INRESET, 16)
-#define GCMP_CCB_COHCTL_OFS            0x0008                  /* Coherence Control */
-#define         GCMP_CCB_COHCTL_DOMAIN_SHF     0
-#define         GCMP_CCB_COHCTL_DOMAIN_MSK     GCMPCCBMSK(COHCTL_DOMAIN, 8)
-#define GCMP_CCB_CFG_OFS               0x0010                  /* Config */
-#define         GCMP_CCB_CFG_IOCUTYPE_SHF      10
-#define         GCMP_CCB_CFG_IOCUTYPE_MSK      GCMPCCBMSK(CFG_IOCUTYPE, 2)
-#define          GCMP_CCB_CFG_IOCUTYPE_CPU     0
-#define          GCMP_CCB_CFG_IOCUTYPE_NCIOCU  1
-#define          GCMP_CCB_CFG_IOCUTYPE_CIOCU   2
-#define         GCMP_CCB_CFG_NUMVPE_SHF        0
-#define         GCMP_CCB_CFG_NUMVPE_MSK        GCMPCCBMSK(CFG_NUMVPE, 10)
-#define GCMP_CCB_OTHER_OFS             0x0018          /* Other Address */
-#define         GCMP_CCB_OTHER_CORENUM_SHF     16
-#define         GCMP_CCB_OTHER_CORENUM_MSK     GCMPCCBMSK(OTHER_CORENUM, 16)
-#define GCMP_CCB_RESETBASE_OFS         0x0020          /* Reset Exception Base */
-#define         GCMP_CCB_RESETBASE_BEV_SHF     12
-#define         GCMP_CCB_RESETBASE_BEV_MSK     GCMPCCBMSK(RESETBASE_BEV, 20)
-#define GCMP_CCB_ID_OFS                        0x0028          /* Identification */
-#define GCMP_CCB_DINTGROUP_OFS         0x0030          /* DINT Group Participate */
-#define GCMP_CCB_DBGGROUP_OFS          0x0100          /* DebugBreak Group */
-
-extern int __init gcmp_probe(unsigned long, unsigned long);
-extern int __init gcmp_niocu(void);
-extern void __init gcmp_setregion(int, unsigned long, unsigned long, int);
-#endif /* _ASM_GCMPREGS_H */
index b2e3e93dd7d88c570a6267b621cbf4521748e8c8..0827166905899db5c49042d0b2fa089810d46ef4 100644 (file)
@@ -11,6 +11,9 @@
 #ifndef _ASM_GICREGS_H
 #define _ASM_GICREGS_H
 
+#include <linux/bitmap.h>
+#include <linux/threads.h>
+
 #undef GICISBYTELITTLEENDIAN
 
 /* Constants */
index 3321dd5a8872d7014fcf7d8c167e603ca0d60143..933b50e125a0f30d7fc526a252d7774d31243df5 100644 (file)
@@ -331,7 +331,7 @@ static inline void pfx##write##bwlq(type val,                               \
                if (irq)                                                \
                        local_irq_save(__flags);                        \
                __asm__ __volatile__(                                   \
-                       ".set   mips3"          "\t\t# __writeq""\n\t"  \
+                       ".set   arch=r4000"     "\t\t# __writeq""\n\t"  \
                        "dsll32 %L0, %L0, 0"                    "\n\t"  \
                        "dsrl32 %L0, %L0, 0"                    "\n\t"  \
                        "dsll32 %M0, %M0, 0"                    "\n\t"  \
@@ -361,7 +361,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem)        \
                if (irq)                                                \
                        local_irq_save(__flags);                        \
                __asm__ __volatile__(                                   \
-                       ".set   mips3"          "\t\t# __readq" "\n\t"  \
+                       ".set   arch=r4000"     "\t\t# __readq" "\n\t"  \
                        "ld     %L0, %1"                        "\n\t"  \
                        "dsra32 %M0, %L0, 0"                    "\n\t"  \
                        "sll    %L0, %L0, 0"                    "\n\t"  \
@@ -584,7 +584,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
  *
  * This API used to be exported; it now is for arch code internal use only.
  */
-#ifdef CONFIG_DMA_NONCOHERENT
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
 
 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
@@ -603,7 +603,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 #define dma_cache_inv(start,size)      \
        do { (void) (start); (void) (size); } while (0)
 
-#endif /* CONFIG_DMA_NONCOHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
 
 /*
  * Read a 32-bit register that requires a 64-bit read cycle on the bus.
index d44622cd74becb52ab33e80d8f21ab0e7d68ea05..46dfc3c1fd49777a41b3158c77b1fc5c49955087 100644 (file)
@@ -33,7 +33,7 @@ static __inline__ long local_add_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:"    __LL    "%1, %2         # local_add_return      \n"
                "       addu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
@@ -47,7 +47,7 @@ static __inline__ long local_add_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:"    __LL    "%1, %2         # local_add_return      \n"
                "       addu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
@@ -78,7 +78,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:"    __LL    "%1, %2         # local_sub_return      \n"
                "       subu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
@@ -92,7 +92,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "1:"    __LL    "%1, %2         # local_sub_return      \n"
                "       subu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
index 40005fb39618ad4c3b89055e86129c4f83a62cef..bba7399a49a37410d5163a55820c3199ae82767a 100644 (file)
@@ -27,7 +27,11 @@ enum bcm47xx_board {
        BCM47XX_BOARD_ASUS_WL700GE,
        BCM47XX_BOARD_ASUS_WLHDD,
 
+       BCM47XX_BOARD_BELKIN_F7D3301,
+       BCM47XX_BOARD_BELKIN_F7D3302,
        BCM47XX_BOARD_BELKIN_F7D4301,
+       BCM47XX_BOARD_BELKIN_F7D4302,
+       BCM47XX_BOARD_BELKIN_F7D4401,
 
        BCM47XX_BOARD_BUFFALO_WBR2_G54,
        BCM47XX_BOARD_BUFFALO_WHR2_A54G54,
@@ -66,7 +70,7 @@ enum bcm47xx_board {
        BCM47XX_BOARD_LINKSYS_WRT310NV1,
        BCM47XX_BOARD_LINKSYS_WRT310NV2,
        BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
-       BCM47XX_BOARD_LINKSYS_WRT54GSV1,
+       BCM47XX_BOARD_LINKSYS_WRT54G,
        BCM47XX_BOARD_LINKSYS_WRT610NV1,
        BCM47XX_BOARD_LINKSYS_WRT610NV2,
        BCM47XX_BOARD_LINKSYS_WRTSL54GS,
@@ -94,6 +98,8 @@ enum bcm47xx_board {
 
        BCM47XX_BOARD_PHICOMM_M1,
 
+       BCM47XX_BOARD_SIEMENS_SE505V2,
+
        BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE,
 
        BCM47XX_BOARD_ZTE_H218N,
diff --git a/arch/mips/include/asm/mach-db1x00/db1200.h b/arch/mips/include/asm/mach-db1x00/db1200.h
deleted file mode 100644 (file)
index d3cce73..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * AMD Alchemy DBAu1200 Reference Board
- * Board register defines.
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- *
- */
-#ifndef __ASM_DB1200_H
-#define __ASM_DB1200_H
-
-#include <linux/types.h>
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/au1xxx_psc.h>
-
-/* Bit positions for the different interrupt sources */
-#define BCSR_INT_IDE           0x0001
-#define BCSR_INT_ETH           0x0002
-#define BCSR_INT_PC0           0x0004
-#define BCSR_INT_PC0STSCHG     0x0008
-#define BCSR_INT_PC1           0x0010
-#define BCSR_INT_PC1STSCHG     0x0020
-#define BCSR_INT_DC            0x0040
-#define BCSR_INT_FLASHBUSY     0x0080
-#define BCSR_INT_PC0INSERT     0x0100
-#define BCSR_INT_PC0EJECT      0x0200
-#define BCSR_INT_PC1INSERT     0x0400
-#define BCSR_INT_PC1EJECT      0x0800
-#define BCSR_INT_SD0INSERT     0x1000
-#define BCSR_INT_SD0EJECT      0x2000
-#define BCSR_INT_SD1INSERT     0x4000
-#define BCSR_INT_SD1EJECT      0x8000
-
-#define IDE_REG_SHIFT          5
-
-#define DB1200_IDE_PHYS_ADDR   0x18800000
-#define DB1200_IDE_PHYS_LEN    (16 << IDE_REG_SHIFT)
-#define DB1200_ETH_PHYS_ADDR   0x19000300
-#define DB1200_NAND_PHYS_ADDR  0x20000000
-
-#define PB1200_IDE_PHYS_ADDR   0x0C800000
-#define PB1200_ETH_PHYS_ADDR   0x0D000300
-#define PB1200_NAND_PHYS_ADDR  0x1C000000
-
-/*
- * External Interrupts for DBAu1200 as of 8/6/2004.
- * Bit positions in the CPLD registers can be calculated by taking
- * the interrupt define and subtracting the DB1200_INT_BEGIN value.
- *
- *   Example: IDE bis pos is  = 64 - 64
- *           ETH bit pos is  = 65 - 64
- */
-enum external_db1200_ints {
-       DB1200_INT_BEGIN        = AU1000_MAX_INTR + 1,
-
-       DB1200_IDE_INT          = DB1200_INT_BEGIN,
-       DB1200_ETH_INT,
-       DB1200_PC0_INT,
-       DB1200_PC0_STSCHG_INT,
-       DB1200_PC1_INT,
-       DB1200_PC1_STSCHG_INT,
-       DB1200_DC_INT,
-       DB1200_FLASHBUSY_INT,
-       DB1200_PC0_INSERT_INT,
-       DB1200_PC0_EJECT_INT,
-       DB1200_PC1_INSERT_INT,
-       DB1200_PC1_EJECT_INT,
-       DB1200_SD0_INSERT_INT,
-       DB1200_SD0_EJECT_INT,
-       PB1200_SD1_INSERT_INT,
-       PB1200_SD1_EJECT_INT,
-
-       DB1200_INT_END          = DB1200_INT_BEGIN + 15,
-};
-
-#endif /* __ASM_DB1200_H */
diff --git a/arch/mips/include/asm/mach-db1x00/db1300.h b/arch/mips/include/asm/mach-db1x00/db1300.h
deleted file mode 100644 (file)
index 3d1ede4..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * NetLogic DB1300 board constants
- */
-
-#ifndef _DB1300_H_
-#define _DB1300_H_
-
-/* FPGA (external mux) interrupt sources */
-#define DB1300_FIRST_INT       (ALCHEMY_GPIC_INT_LAST + 1)
-#define DB1300_IDE_INT         (DB1300_FIRST_INT + 0)
-#define DB1300_ETH_INT         (DB1300_FIRST_INT + 1)
-#define DB1300_CF_INT          (DB1300_FIRST_INT + 2)
-#define DB1300_VIDEO_INT       (DB1300_FIRST_INT + 4)
-#define DB1300_HDMI_INT                (DB1300_FIRST_INT + 5)
-#define DB1300_DC_INT          (DB1300_FIRST_INT + 6)
-#define DB1300_FLASH_INT       (DB1300_FIRST_INT + 7)
-#define DB1300_CF_INSERT_INT   (DB1300_FIRST_INT + 8)
-#define DB1300_CF_EJECT_INT    (DB1300_FIRST_INT + 9)
-#define DB1300_AC97_INT                (DB1300_FIRST_INT + 10)
-#define DB1300_AC97_PEN_INT    (DB1300_FIRST_INT + 11)
-#define DB1300_SD1_INSERT_INT  (DB1300_FIRST_INT + 12)
-#define DB1300_SD1_EJECT_INT   (DB1300_FIRST_INT + 13)
-#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
-#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
-#define DB1300_LAST_INT                (DB1300_FIRST_INT + 15)
-
-/* SMSC9210 CS */
-#define DB1300_ETH_PHYS_ADDR   0x19000000
-#define DB1300_ETH_PHYS_END    0x197fffff
-
-/* ATA CS */
-#define DB1300_IDE_PHYS_ADDR   0x18800000
-#define DB1300_IDE_REG_SHIFT   5
-#define DB1300_IDE_PHYS_LEN    (16 << DB1300_IDE_REG_SHIFT)
-
-/* NAND CS */
-#define DB1300_NAND_PHYS_ADDR  0x20000000
-#define DB1300_NAND_PHYS_END   0x20000fff
-
-#endif /* _DB1300_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h
new file mode 100644 (file)
index 0000000..829a7ec
--- /dev/null
@@ -0,0 +1,163 @@
+#ifndef __ASM_MACH_LOONGSON_BOOT_PARAM_H_
+#define __ASM_MACH_LOONGSON_BOOT_PARAM_H_
+
+#define SYSTEM_RAM_LOW         1
+#define SYSTEM_RAM_HIGH                2
+#define MEM_RESERVED           3
+#define PCI_IO                 4
+#define PCI_MEM                        5
+#define LOONGSON_CFG_REG       6
+#define VIDEO_ROM              7
+#define ADAPTER_ROM            8
+#define ACPI_TABLE             9
+#define MAX_MEMORY_TYPE                10
+
+#define LOONGSON3_BOOT_MEM_MAP_MAX 128
+struct efi_memory_map_loongson {
+       u16 vers;       /* version of efi_memory_map */
+       u32 nr_map;     /* number of memory_maps */
+       u32 mem_freq;   /* memory frequence */
+       struct mem_map {
+               u32 node_id;    /* node_id which memory attached to */
+               u32 mem_type;   /* system memory, pci memory, pci io, etc. */
+               u64 mem_start;  /* memory map start address */
+               u32 mem_size;   /* each memory_map size, not the total size */
+       } map[LOONGSON3_BOOT_MEM_MAP_MAX];
+} __packed;
+
+enum loongson_cpu_type {
+       Loongson_2E = 0,
+       Loongson_2F = 1,
+       Loongson_3A = 2,
+       Loongson_3B = 3,
+       Loongson_1A = 4,
+       Loongson_1B = 5
+};
+
+/*
+ * Capability and feature descriptor structure for MIPS CPU
+ */
+struct efi_cpuinfo_loongson {
+       u16 vers;     /* version of efi_cpuinfo_loongson */
+       u32 processor_id; /* PRID, e.g. 6305, 6306 */
+       u32 cputype;  /* Loongson_3A/3B, etc. */
+       u32 total_node;   /* num of total numa nodes */
+       u32 cpu_startup_core_id; /* Core id */
+       u32 cpu_clock_freq; /* cpu_clock */
+       u32 nr_cpus;
+} __packed;
+
+struct system_loongson {
+       u16 vers;     /* version of system_loongson */
+       u32 ccnuma_smp; /* 0: no numa; 1: has numa */
+       u32 sing_double_channel; /* 1:single; 2:double */
+} __packed;
+
+struct irq_source_routing_table {
+       u16 vers;
+       u16 size;
+       u16 rtr_bus;
+       u16 rtr_devfn;
+       u32 vendor;
+       u32 device;
+       u32 PIC_type;   /* conform use HT or PCI to route to CPU-PIC */
+       u64 ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */
+       u64 ht_enable;  /* irqs used in this PIC */
+       u32 node_id;    /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */
+       u64 pci_mem_start_addr;
+       u64 pci_mem_end_addr;
+       u64 pci_io_start_addr;
+       u64 pci_io_end_addr;
+       u64 pci_config_addr;
+       u32 dma_mask_bits;
+} __packed;
+
+struct interface_info {
+       u16 vers; /* version of the specificition */
+       u16 size;
+       u8  flag;
+       char description[64];
+} __packed;
+
+#define MAX_RESOURCE_NUMBER 128
+struct resource_loongson {
+       u64 start; /* resource start address */
+       u64 end;   /* resource end address */
+       char name[64];
+       u32 flags;
+};
+
+struct archdev_data {};  /* arch specific additions */
+
+struct board_devices {
+       char name[64];    /* hold the device name */
+       u32 num_resources; /* number of device_resource */
+       /* for each device's resource */
+       struct resource_loongson resource[MAX_RESOURCE_NUMBER];
+       /* arch specific additions */
+       struct archdev_data archdata;
+};
+
+struct loongson_special_attribute {
+       u16 vers;     /* version of this special */
+       char special_name[64]; /* special_atribute_name */
+       u32 loongson_special_type; /* type of special device */
+       /* for each device's resource */
+       struct resource_loongson resource[MAX_RESOURCE_NUMBER];
+};
+
+struct loongson_params {
+       u64 memory_offset;      /* efi_memory_map_loongson struct offset */
+       u64 cpu_offset;         /* efi_cpuinfo_loongson struct offset */
+       u64 system_offset;      /* system_loongson struct offset */
+       u64 irq_offset;         /* irq_source_routing_table struct offset */
+       u64 interface_offset;   /* interface_info struct offset */
+       u64 special_offset;     /* loongson_special_attribute struct offset */
+       u64 boarddev_table_offset;  /* board_devices offset */
+};
+
+struct smbios_tables {
+       u16 vers;     /* version of smbios */
+       u64 vga_bios; /* vga_bios address */
+       struct loongson_params lp;
+};
+
+struct efi_reset_system_t {
+       u64 ResetCold;
+       u64 ResetWarm;
+       u64 ResetType;
+       u64 Shutdown;
+       u64 DoSuspend; /* NULL if not support */
+};
+
+struct efi_loongson {
+       u64 mps;        /* MPS table */
+       u64 acpi;       /* ACPI table (IA64 ext 0.71) */
+       u64 acpi20;     /* ACPI table (ACPI 2.0) */
+       struct smbios_tables smbios;    /* SM BIOS table */
+       u64 sal_systab; /* SAL system table */
+       u64 boot_info;  /* boot info table */
+};
+
+struct boot_params {
+       struct efi_loongson efi;
+       struct efi_reset_system_t reset_system;
+};
+
+struct loongson_system_configuration {
+       u32 nr_cpus;
+       enum loongson_cpu_type cputype;
+       u64 ht_control_base;
+       u64 pci_mem_start_addr;
+       u64 pci_mem_end_addr;
+       u64 pci_io_base;
+       u64 restart_addr;
+       u64 poweroff_addr;
+       u64 suspend_addr;
+       u64 vgabios_addr;
+       u32 dma_mask_bits;
+};
+
+extern struct efi_memory_map_loongson *loongson_memmap;
+extern struct loongson_system_configuration loongson_sysconf;
+#endif
index aeb2c05d61456de8b0143984fe1c9626e8acae6d..6a902751cc7f79034b802821857020a7386536e7 100644 (file)
 #ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H
 #define __ASM_MACH_LOONGSON_DMA_COHERENCE_H
 
+#ifdef CONFIG_SWIOTLB
+#include <linux/swiotlb.h>
+#endif
+
 struct device;
 
+extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
                                          size_t size)
 {
+#ifdef CONFIG_CPU_LOONGSON3
+       return virt_to_phys(addr);
+#else
        return virt_to_phys(addr) | 0x80000000;
+#endif
 }
 
 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
                                               struct page *page)
 {
+#ifdef CONFIG_CPU_LOONGSON3
+       return page_to_phys(page);
+#else
        return page_to_phys(page) | 0x80000000;
+#endif
 }
 
 static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
        dma_addr_t dma_addr)
 {
-#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
+#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
+       return dma_addr;
+#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
        return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
 #else
        return dma_addr & 0x7fffffff;
@@ -55,7 +71,11 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
 
 static inline int plat_device_is_coherent(struct device *dev)
 {
+#ifdef CONFIG_DMA_NONCOHERENT
        return 0;
+#else
+       return 1;
+#endif /* CONFIG_DMA_NONCOHERENT */
 }
 
 #endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson/irq.h b/arch/mips/include/asm/mach-loongson/irq.h
new file mode 100644 (file)
index 0000000..34560bd
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef __ASM_MACH_LOONGSON_IRQ_H_
+#define __ASM_MACH_LOONGSON_IRQ_H_
+
+#include <boot_param.h>
+
+#ifdef CONFIG_CPU_LOONGSON3
+
+/* cpu core interrupt numbers */
+#define MIPS_CPU_IRQ_BASE 56
+
+#define LOONGSON_UART_IRQ   (MIPS_CPU_IRQ_BASE + 2) /* UART */
+#define LOONGSON_HT1_IRQ    (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
+#define LOONGSON_TIMER_IRQ  (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
+
+#define LOONGSON_HT1_CFG_BASE          loongson_sysconf.ht_control_base
+#define LOONGSON_HT1_INT_VECTOR_BASE   (LOONGSON_HT1_CFG_BASE + 0x80)
+#define LOONGSON_HT1_INT_EN_BASE       (LOONGSON_HT1_CFG_BASE + 0xa0)
+#define LOONGSON_HT1_INT_VECTOR(n)     \
+               LOONGSON3_REG32(LOONGSON_HT1_INT_VECTOR_BASE, 4 * (n))
+#define LOONGSON_HT1_INTN_EN(n)                \
+               LOONGSON3_REG32(LOONGSON_HT1_INT_EN_BASE, 4 * (n))
+
+#define LOONGSON_INT_ROUTER_OFFSET     0x1400
+#define LOONGSON_INT_ROUTER_INTEN      \
+         LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x24)
+#define LOONGSON_INT_ROUTER_INTENSET   \
+         LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x28)
+#define LOONGSON_INT_ROUTER_INTENCLR   \
+         LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x2c)
+#define LOONGSON_INT_ROUTER_ENTRY(n)   \
+         LOONGSON3_REG8(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + n)
+#define LOONGSON_INT_ROUTER_LPC                LOONGSON_INT_ROUTER_ENTRY(0x0a)
+#define LOONGSON_INT_ROUTER_HT1(n)     LOONGSON_INT_ROUTER_ENTRY(n + 0x18)
+
+#define LOONGSON_INT_CORE0_INT0                0x11 /* route to int 0 of core 0 */
+#define LOONGSON_INT_CORE0_INT1                0x21 /* route to int 1 of core 0 */
+
+#endif
+
+extern void fixup_irqs(void);
+extern void loongson3_ipi_interrupt(struct pt_regs *regs);
+
+#include_next <irq.h>
+#endif /* __ASM_MACH_LOONGSON_IRQ_H_ */
index b286534fef0899007bb0640c1e12415a73b81714..f3fd1eb8e3ddeb3695c49cc838b06841c610da44 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/kconfig.h>
+#include <boot_param.h>
 
 /* loongson internal northbridge initialization */
 extern void bonito_irq_init(void);
@@ -24,8 +25,9 @@ extern void mach_prepare_reboot(void);
 extern void mach_prepare_shutdown(void);
 
 /* environment arguments from bootloader */
-extern unsigned long cpu_clock_freq;
-extern unsigned long memsize, highmemsize;
+extern u32 cpu_clock_freq;
+extern u32 memsize, highmemsize;
+extern struct plat_smp_ops loongson3_smp_ops;
 
 /* loongson-specific command line, env and memory initialization */
 extern void __init prom_init_memory(void);
@@ -61,6 +63,12 @@ extern int mach_i8259_irq(void);
 #define LOONGSON_REG(x) \
        (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
 
+#define LOONGSON3_REG8(base, x) \
+       (*(volatile u8 *)((char *)TO_UNCAC(base) + (x)))
+
+#define LOONGSON3_REG32(base, x) \
+       (*(volatile u32 *)((char *)TO_UNCAC(base) + (x)))
+
 #define LOONGSON_IRQ_BASE      32
 #define LOONGSON2_PERFCNT_IRQ  (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
 
@@ -86,6 +94,10 @@ static inline void do_perfcnt_IRQ(void)
 #define LOONGSON_REG_BASE      0x1fe00000
 #define LOONGSON_REG_SIZE      0x00100000      /* 256Bytes + 256Bytes + ??? */
 #define LOONGSON_REG_TOP       (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+/* Loongson-3 specific registers */
+#define LOONGSON3_REG_BASE     0x3ff00000
+#define LOONGSON3_REG_SIZE     0x00100000      /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON3_REG_TOP      (LOONGSON3_REG_BASE+LOONGSON3_REG_SIZE-1)
 
 #define LOONGSON_LIO1_BASE     0x1ff00000
 #define LOONGSON_LIO1_SIZE     0x00100000      /* 1M */
@@ -101,7 +113,13 @@ static inline void do_perfcnt_IRQ(void)
 #define LOONGSON_PCICFG_BASE   0x1fe80000
 #define LOONGSON_PCICFG_SIZE   0x00000800      /* 2K */
 #define LOONGSON_PCICFG_TOP    (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
+
+#if defined(CONFIG_HT_PCI)
+#define LOONGSON_PCIIO_BASE    loongson_sysconf.pci_io_base
+#else
 #define LOONGSON_PCIIO_BASE    0x1fd00000
+#endif
+
 #define LOONGSON_PCIIO_SIZE    0x00100000      /* 1M */
 #define LOONGSON_PCIIO_TOP     (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
 
@@ -231,6 +249,9 @@ static inline void do_perfcnt_IRQ(void)
 #define LOONGSON_PXARB_CFG             LOONGSON_REG(LOONGSON_REGBASE + 0x68)
 #define LOONGSON_PXARB_STATUS          LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
 
+/* Chip Config */
+#define LOONGSON_CHIPCFG0              LOONGSON_REG(LOONGSON_REGBASE + 0x80)
+
 /* pcimap */
 
 #define LOONGSON_PCIMAP_PCIMAP_LO0     0x0000003f
@@ -246,9 +267,6 @@ static inline void do_perfcnt_IRQ(void)
 #ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
 #include <linux/cpufreq.h>
 extern struct cpufreq_frequency_table loongson2_clockmod_table[];
-
-/* Chip Config */
-#define LOONGSON_CHIPCFG0              LOONGSON_REG(LOONGSON_REGBASE + 0x80)
 #endif
 
 /*
index 3810d5ca84ac1c592b6fca12ec7ff7cf8435305c..1b1f592fa2be7e9eb128c5036653d0bd6ff4c608 100644 (file)
 
 #endif
 
+#ifdef CONFIG_LEMOTE_MACH3A
+
+#define LOONGSON_MACHTYPE MACH_LEMOTE_A1101
+
+#endif /* CONFIG_LEMOTE_MACH3A */
+
 #endif /* __ASM_MACH_LOONGSON_MACHINE_H */
index bc99dab4ef63b080eb0d7b2eaa318a8f2997a423..1212774f66ef4d810b24e22b524935248ee6dd3f 100644 (file)
@@ -40,8 +40,13 @@ extern struct pci_ops loongson_pci_ops;
 #else  /* loongson2f/32bit & loongson2e */
 
 /* this pci memory space is mapped by pcimap in pci.c */
+#ifdef CONFIG_CPU_LOONGSON3
+#define LOONGSON_PCI_MEM_START 0x40000000UL
+#define LOONGSON_PCI_MEM_END   0x7effffffUL
+#else
 #define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
 #define LOONGSON_PCI_MEM_END   (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
+#endif
 /* this is an offset from mips_io_port_base */
 #define LOONGSON_PCI_IO_START  0x00004000UL
 
diff --git a/arch/mips/include/asm/mach-loongson/spaces.h b/arch/mips/include/asm/mach-loongson/spaces.h
new file mode 100644 (file)
index 0000000..e2506ee
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef __ASM_MACH_LOONGSON_SPACES_H_
+#define __ASM_MACH_LOONGSON_SPACES_H_
+
+#if defined(CONFIG_64BIT)
+#define CAC_BASE        _AC(0x9800000000000000, UL)
+#endif /* CONFIG_64BIT */
+
+#include <asm/mach-generic/spaces.h>
+#endif
index 0b793e7bf67e4d77a960a419c32b73200f0d1506..7c5e17a178490164af87dc681957728697ca984d 100644 (file)
@@ -5,10 +5,80 @@
  *
  * Chris Dearman (chris@mips.com)
  * Copyright (C) 2007 Mips Technologies, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
  */
 #ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
 #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
 
+       /*
+        * Prepare segments for EVA boot:
+        *
+        * This is in case the processor boots in legacy configuration
+        * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
+        *
+        * On entry, t1 is loaded with CP0_CONFIG
+        *
+        * ========================= Mappings =============================
+        * Virtual memory           Physical memory           Mapping
+        * 0x00000000 - 0x7fffffff  0x80000000 - 0xfffffffff   MUSUK (kuseg)
+        *                          Flat 2GB physical memory
+        *
+        * 0x80000000 - 0x9fffffff  0x00000000 - 0x1ffffffff   MUSUK (kseg0)
+        * 0xa0000000 - 0xbf000000  0x00000000 - 0x1ffffffff   MUSUK (kseg1)
+        * 0xc0000000 - 0xdfffffff             -                 MK  (kseg2)
+        * 0xe0000000 - 0xffffffff             -                 MK  (kseg3)
+        *
+        *
+        * Lowmem is expanded to 2GB
+        */
+       .macro  eva_entry
+       /*
+        * Get Config.K0 value and use it to program
+        * the segmentation registers
+        */
+       andi    t1, 0x7 /* CCA */
+       move    t2, t1
+       ins     t2, t1, 16, 3
+       /* SegCtl0 */
+       li      t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |         \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) |                          \
+               (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |            \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+       or      t0, t2
+       mtc0    t0, $5, 2
+
+       /* SegCtl1 */
+       li      t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |      \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (2 << MIPS_SEGCFG_C_SHIFT) |                            \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) |                          \
+               (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |         \
+               (0 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+       ins     t0, t1, 16, 3
+       mtc0    t0, $5, 3
+
+       /* SegCtl2 */
+       li      t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |      \
+               (6 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) |                          \
+               (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |         \
+               (4 << MIPS_SEGCFG_PA_SHIFT) |                           \
+               (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+       or      t0, t2
+       mtc0    t0, $5, 4
+
+       jal     mips_ihb
+       mfc0    t0, $16, 5
+       li      t2, 0x40000000      /* K bit */
+       or      t0, t0, t2
+       mtc0    t0, $16, 5
+       sync
+       jal     mips_ihb
+       .endm
+
        .macro  kernel_entry_setup
 #ifdef CONFIG_MIPS_MT_SMTC
        mfc0    t0, CP0_CONFIG
 nonmt_processor:
        .asciz  "SMTC kernel requires the MT ASE to run\n"
        __FINIT
-0:
 #endif
+
+#ifdef CONFIG_EVA
+       sync
+       ehb
+
+       mfc0    t1, CP0_CONFIG
+       bgez    t1, 9f
+       mfc0    t0, CP0_CONFIG, 1
+       bgez    t0, 9f
+       mfc0    t0, CP0_CONFIG, 2
+       bgez    t0, 9f
+       mfc0    t0, CP0_CONFIG, 3
+       sll     t0, t0, 6   /* SC bit */
+       bgez    t0, 9f
+
+       eva_entry
+       b       0f
+9:
+       /* Assume we came from YAMON... */
+       PTR_LA  v0, 0x9fc00534  /* YAMON print */
+       lw      v0, (v0)
+       move    a0, zero
+       PTR_LA  a1, nonsc_processor
+       jal     v0
+
+       PTR_LA  v0, 0x9fc00520  /* YAMON exit */
+       lw      v0, (v0)
+       li      a0, 1
+       jal     v0
+
+1:     b       1b
+       nop
+       __INITDATA
+nonsc_processor:
+       .asciz  "EVA kernel requires a MIPS core with Segment Control implemented\n"
+       __FINIT
+#endif /* CONFIG_EVA */
+0:
        .endm
 
 /*
  * Do SMP slave processor setup necessary before we can safely execute C code.
  */
        .macro  smp_slave_setup
+#ifdef CONFIG_EVA
+       sync
+       ehb
+       mfc0    t1, CP0_CONFIG
+       eva_entry
+#endif
        .endm
 
 #endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-malta/spaces.h b/arch/mips/include/asm/mach-malta/spaces.h
new file mode 100644 (file)
index 0000000..d7e5497
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+
+#ifndef _ASM_MALTA_SPACES_H
+#define _ASM_MALTA_SPACES_H
+
+#ifdef CONFIG_EVA
+
+/*
+ * Traditional Malta Board Memory Map for EVA
+ *
+ * 0x00000000 - 0x0fffffff: 1st RAM region, 256MB
+ * 0x10000000 - 0x1bffffff: GIC and CPC Control Registers
+ * 0x1c000000 - 0x1fffffff: I/O And Flash
+ * 0x20000000 - 0x7fffffff: 2nd RAM region, 1.5GB
+ * 0x80000000 - 0xffffffff: Physical memory aliases to 0x0 (2GB)
+ *
+ * The kernel is still located in 0x80000000(kseg0). However,
+ * the physical mask has been shifted to 0x80000000 which exploits the alias
+ * on the Malta board. As a result of which, we override the __pa_symbol
+ * to peform direct mapping from virtual to physical addresses. In other
+ * words, the 0x80000000 virtual address maps to 0x80000000 physical address
+ * which in turn aliases to 0x0. We do this in order to be able to use a flat
+ * 2GB of memory (0x80000000 - 0xffffffff) so we can avoid the I/O hole in
+ * 0x10000000 - 0x1fffffff.
+ * The last 64KB of physical memory are reserved for correct HIGHMEM
+ * macros arithmetics.
+ *
+ */
+
+#define PAGE_OFFSET    _AC(0x0, UL)
+#define PHYS_OFFSET    _AC(0x80000000, UL)
+#define HIGHMEM_START  _AC(0xffff0000, UL)
+
+#define __pa_symbol(x) (RELOC_HIDE((unsigned long)(x), 0))
+
+#endif /* CONFIG_EVA */
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MALTA_SPACES_H */
index 2dbc7a8cec1a4e62f46fad4e2624e699637dd779..fc946c8359952a5682e76aa361657eefd3a538c7 100644 (file)
@@ -76,7 +76,7 @@ static inline void set_value_reg32(volatile u32 *const addr,
 
        __asm__ __volatile__(
        "       .set    push                            \n"
-       "       .set    mips3                           \n"
+       "       .set    arch=r4000                      \n"
        "1:     ll      %0, %1  # set_value_reg32       \n"
        "       and     %0, %2                          \n"
        "       or      %0, %3                          \n"
@@ -98,7 +98,7 @@ static inline void set_reg32(volatile u32 *const addr,
 
        __asm__ __volatile__(
        "       .set    push                            \n"
-       "       .set    mips3                           \n"
+       "       .set    arch=r4000                      \n"
        "1:     ll      %0, %1          # set_reg32     \n"
        "       or      %0, %2                          \n"
        "       sc      %0, %1                          \n"
@@ -119,7 +119,7 @@ static inline void clear_reg32(volatile u32 *const addr,
 
        __asm__ __volatile__(
        "       .set    push                            \n"
-       "       .set    mips3                           \n"
+       "       .set    arch=r4000                      \n"
        "1:     ll      %0, %1          # clear_reg32   \n"
        "       and     %0, %2                          \n"
        "       sc      %0, %1                          \n"
@@ -140,7 +140,7 @@ static inline void toggle_reg32(volatile u32 *const addr,
 
        __asm__ __volatile__(
        "       .set    push                            \n"
-       "       .set    mips3                           \n"
+       "       .set    arch=r4000                      \n"
        "1:     ll      %0, %1          # toggle_reg32  \n"
        "       xor     %0, %2                          \n"
        "       sc      %0, %1                          \n"
@@ -216,7 +216,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
 #define custom_read_reg32(address, tmp)                                \
        __asm__ __volatile__(                                   \
        "       .set    push                            \n"     \
-       "       .set    mips3                           \n"     \
+       "       .set    arch=r4000                      \n"     \
        "1:     ll      %0, %1  #custom_read_reg32      \n"     \
        "       .set    pop                             \n"     \
        : "=r" (tmp), "=m" (*address)                           \
@@ -225,7 +225,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
 #define custom_write_reg32(address, tmp)                       \
        __asm__ __volatile__(                                   \
        "       .set    push                            \n"     \
-       "       .set    mips3                           \n"     \
+       "       .set    arch=r4000                      \n"     \
        "       sc      %0, %1  #custom_write_reg32     \n"     \
        "       "__beqz"%0, 1b                          \n"     \
        "       nop                                     \n"     \
index 722bc889eab555dfc29d2c30dbb01642df8119bb..fd9774269a5e5da0ad6cc48183e6e2304f455af9 100644 (file)
@@ -63,6 +63,11 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
 #define GIC_BASE_ADDR                  0x1bdc0000
 #define GIC_ADDRSPACE_SZ               (128 * 1024)
 
+/*
+ * CPC Specific definitions
+ */
+#define CPC_BASE_ADDR                  0x1bde0000
+
 /*
  * MSC01 BIU Specific definitions
  * FIXME : These should be elsewhere ?
index 836e2ede24de11252a7a4cce4a4008ed7756fee1..9cf54041d416a5253ca8403fee795b0661342024 100644 (file)
@@ -50,4 +50,9 @@
 #define PIIX4_FUNC1_IDETIM_SECONDARY_HI                0x43
 #define   PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN        (1 << 7)
 
+/* Power Management Configuration Space */
+#define PIIX4_FUNC3_PMBA                       0x40
+#define PIIX4_FUNC3_PMREGMISC                  0x80
+#define   PIIX4_FUNC3_PMREGMISC_EN                     (1 << 0)
+
 #endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
new file mode 100644 (file)
index 0000000..6a9d2dd
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_MIPS_CM_H__
+#define __MIPS_ASM_MIPS_CM_H__
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+/* The base address of the CM GCR block */
+extern void __iomem *mips_cm_base;
+
+/* The base address of the CM L2-only sync region */
+extern void __iomem *mips_cm_l2sync_base;
+
+/**
+ * __mips_cm_phys_base - retrieve the physical base address of the CM
+ *
+ * This function returns the physical base address of the Coherence Manager
+ * global control block, or 0 if no Coherence Manager is present. It provides
+ * a default implementation which reads the CMGCRBase register where available,
+ * and may be overriden by platforms which determine this address in a
+ * different way by defining a function with the same prototype except for the
+ * name mips_cm_phys_base (without underscores).
+ */
+extern phys_t __mips_cm_phys_base(void);
+
+/**
+ * mips_cm_probe - probe for a Coherence Manager
+ *
+ * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
+ * is successfully detected, else -errno.
+ */
+#ifdef CONFIG_MIPS_CM
+extern int mips_cm_probe(void);
+#else
+static inline int mips_cm_probe(void)
+{
+       return -ENODEV;
+}
+#endif
+
+/**
+ * mips_cm_present - determine whether a Coherence Manager is present
+ *
+ * Returns true if a CM is present in the system, else false.
+ */
+static inline bool mips_cm_present(void)
+{
+#ifdef CONFIG_MIPS_CM
+       return mips_cm_base != NULL;
+#else
+       return false;
+#endif
+}
+
+/**
+ * mips_cm_has_l2sync - determine whether an L2-only sync region is present
+ *
+ * Returns true if the system implements an L2-only sync region, else false.
+ */
+static inline bool mips_cm_has_l2sync(void)
+{
+#ifdef CONFIG_MIPS_CM
+       return mips_cm_l2sync_base != NULL;
+#else
+       return false;
+#endif
+}
+
+/* Offsets to register blocks from the CM base address */
+#define MIPS_CM_GCB_OFS                0x0000 /* Global Control Block */
+#define MIPS_CM_CLCB_OFS       0x2000 /* Core Local Control Block */
+#define MIPS_CM_COCB_OFS       0x4000 /* Core Other Control Block */
+#define MIPS_CM_GDB_OFS                0x6000 /* Global Debug Block */
+
+/* Total size of the CM memory mapped registers */
+#define MIPS_CM_GCR_SIZE       0x8000
+
+/* Size of the L2-only sync region */
+#define MIPS_CM_L2SYNC_SIZE    0x1000
+
+/* Macros to ease the creation of register access functions */
+#define BUILD_CM_R_(name, off)                                 \
+static inline u32 *addr_gcr_##name(void)                       \
+{                                                              \
+       return (u32 *)(mips_cm_base + (off));                   \
+}                                                              \
+                                                               \
+static inline u32 read_gcr_##name(void)                                \
+{                                                              \
+       return __raw_readl(addr_gcr_##name());                  \
+}
+
+#define BUILD_CM__W(name, off)                                 \
+static inline void write_gcr_##name(u32 value)                 \
+{                                                              \
+       __raw_writel(value, addr_gcr_##name());                 \
+}
+
+#define BUILD_CM_RW(name, off)                                 \
+       BUILD_CM_R_(name, off)                                  \
+       BUILD_CM__W(name, off)
+
+#define BUILD_CM_Cx_R_(name, off)                              \
+       BUILD_CM_R_(cl_##name, MIPS_CM_CLCB_OFS + (off))        \
+       BUILD_CM_R_(co_##name, MIPS_CM_COCB_OFS + (off))
+
+#define BUILD_CM_Cx__W(name, off)                              \
+       BUILD_CM__W(cl_##name, MIPS_CM_CLCB_OFS + (off))        \
+       BUILD_CM__W(co_##name, MIPS_CM_COCB_OFS + (off))
+
+#define BUILD_CM_Cx_RW(name, off)                              \
+       BUILD_CM_Cx_R_(name, off)                               \
+       BUILD_CM_Cx__W(name, off)
+
+/* GCB register accessor functions */
+BUILD_CM_R_(config,            MIPS_CM_GCB_OFS + 0x00)
+BUILD_CM_RW(base,              MIPS_CM_GCB_OFS + 0x08)
+BUILD_CM_RW(access,            MIPS_CM_GCB_OFS + 0x20)
+BUILD_CM_R_(rev,               MIPS_CM_GCB_OFS + 0x30)
+BUILD_CM_RW(error_mask,                MIPS_CM_GCB_OFS + 0x40)
+BUILD_CM_RW(error_cause,       MIPS_CM_GCB_OFS + 0x48)
+BUILD_CM_RW(error_addr,                MIPS_CM_GCB_OFS + 0x50)
+BUILD_CM_RW(error_mult,                MIPS_CM_GCB_OFS + 0x58)
+BUILD_CM_RW(l2_only_sync_base, MIPS_CM_GCB_OFS + 0x70)
+BUILD_CM_RW(gic_base,          MIPS_CM_GCB_OFS + 0x80)
+BUILD_CM_RW(cpc_base,          MIPS_CM_GCB_OFS + 0x88)
+BUILD_CM_RW(reg0_base,         MIPS_CM_GCB_OFS + 0x90)
+BUILD_CM_RW(reg0_mask,         MIPS_CM_GCB_OFS + 0x98)
+BUILD_CM_RW(reg1_base,         MIPS_CM_GCB_OFS + 0xa0)
+BUILD_CM_RW(reg1_mask,         MIPS_CM_GCB_OFS + 0xa8)
+BUILD_CM_RW(reg2_base,         MIPS_CM_GCB_OFS + 0xb0)
+BUILD_CM_RW(reg2_mask,         MIPS_CM_GCB_OFS + 0xb8)
+BUILD_CM_RW(reg3_base,         MIPS_CM_GCB_OFS + 0xc0)
+BUILD_CM_RW(reg3_mask,         MIPS_CM_GCB_OFS + 0xc8)
+BUILD_CM_R_(gic_status,                MIPS_CM_GCB_OFS + 0xd0)
+BUILD_CM_R_(cpc_status,                MIPS_CM_GCB_OFS + 0xf0)
+
+/* Core Local & Core Other register accessor functions */
+BUILD_CM_Cx_RW(reset_release,  0x00)
+BUILD_CM_Cx_RW(coherence,      0x08)
+BUILD_CM_Cx_R_(config,         0x10)
+BUILD_CM_Cx_RW(other,          0x18)
+BUILD_CM_Cx_RW(reset_base,     0x20)
+BUILD_CM_Cx_R_(id,             0x28)
+BUILD_CM_Cx_RW(reset_ext_base, 0x30)
+BUILD_CM_Cx_R_(tcid_0_priority,        0x40)
+BUILD_CM_Cx_R_(tcid_1_priority,        0x48)
+BUILD_CM_Cx_R_(tcid_2_priority,        0x50)
+BUILD_CM_Cx_R_(tcid_3_priority,        0x58)
+BUILD_CM_Cx_R_(tcid_4_priority,        0x60)
+BUILD_CM_Cx_R_(tcid_5_priority,        0x68)
+BUILD_CM_Cx_R_(tcid_6_priority,        0x70)
+BUILD_CM_Cx_R_(tcid_7_priority,        0x78)
+BUILD_CM_Cx_R_(tcid_8_priority,        0x80)
+
+/* GCR_CONFIG register fields */
+#define CM_GCR_CONFIG_NUMIOCU_SHF              8
+#define CM_GCR_CONFIG_NUMIOCU_MSK              (_ULCAST_(0xf) << 8)
+#define CM_GCR_CONFIG_PCORES_SHF               0
+#define CM_GCR_CONFIG_PCORES_MSK               (_ULCAST_(0xff) << 0)
+
+/* GCR_BASE register fields */
+#define CM_GCR_BASE_GCRBASE_SHF                        15
+#define CM_GCR_BASE_GCRBASE_MSK                        (_ULCAST_(0x1ffff) << 15)
+#define CM_GCR_BASE_CMDEFTGT_SHF               0
+#define CM_GCR_BASE_CMDEFTGT_MSK               (_ULCAST_(0x3) << 0)
+#define  CM_GCR_BASE_CMDEFTGT_DISABLED         0
+#define  CM_GCR_BASE_CMDEFTGT_MEM              1
+#define  CM_GCR_BASE_CMDEFTGT_IOCU0            2
+#define  CM_GCR_BASE_CMDEFTGT_IOCU1            3
+
+/* GCR_ACCESS register fields */
+#define CM_GCR_ACCESS_ACCESSEN_SHF             0
+#define CM_GCR_ACCESS_ACCESSEN_MSK             (_ULCAST_(0xff) << 0)
+
+/* GCR_REV register fields */
+#define CM_GCR_REV_MAJOR_SHF                   8
+#define CM_GCR_REV_MAJOR_MSK                   (_ULCAST_(0xff) << 8)
+#define CM_GCR_REV_MINOR_SHF                   0
+#define CM_GCR_REV_MINOR_MSK                   (_ULCAST_(0xff) << 0)
+
+/* GCR_ERROR_CAUSE register fields */
+#define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF         27
+#define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK         (_ULCAST_(0x1f) << 27)
+#define CM_GCR_ERROR_CAUSE_ERRINFO_SHF         0
+#define CM_GCR_ERROR_CAUSE_ERRINGO_MSK         (_ULCAST_(0x7ffffff) << 0)
+
+/* GCR_ERROR_MULT register fields */
+#define CM_GCR_ERROR_MULT_ERR2ND_SHF           0
+#define CM_GCR_ERROR_MULT_ERR2ND_MSK           (_ULCAST_(0x1f) << 0)
+
+/* GCR_L2_ONLY_SYNC_BASE register fields */
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_SHF  12
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK  (_ULCAST_(0xfffff) << 12)
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_SHF    0
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK    (_ULCAST_(0x1) << 0)
+
+/* GCR_GIC_BASE register fields */
+#define CM_GCR_GIC_BASE_GICBASE_SHF            17
+#define CM_GCR_GIC_BASE_GICBASE_MSK            (_ULCAST_(0x7fff) << 17)
+#define CM_GCR_GIC_BASE_GICEN_SHF              0
+#define CM_GCR_GIC_BASE_GICEN_MSK              (_ULCAST_(0x1) << 0)
+
+/* GCR_CPC_BASE register fields */
+#define CM_GCR_CPC_BASE_CPCBASE_SHF            17
+#define CM_GCR_CPC_BASE_CPCBASE_MSK            (_ULCAST_(0x7fff) << 17)
+#define CM_GCR_CPC_BASE_CPCEN_SHF              0
+#define CM_GCR_CPC_BASE_CPCEN_MSK              (_ULCAST_(0x1) << 0)
+
+/* GCR_REGn_BASE register fields */
+#define CM_GCR_REGn_BASE_BASEADDR_SHF          16
+#define CM_GCR_REGn_BASE_BASEADDR_MSK          (_ULCAST_(0xffff) << 16)
+
+/* GCR_REGn_MASK register fields */
+#define CM_GCR_REGn_MASK_ADDRMASK_SHF          16
+#define CM_GCR_REGn_MASK_ADDRMASK_MSK          (_ULCAST_(0xffff) << 16)
+#define CM_GCR_REGn_MASK_CCAOVR_SHF            5
+#define CM_GCR_REGn_MASK_CCAOVR_MSK            (_ULCAST_(0x3) << 5)
+#define CM_GCR_REGn_MASK_CCAOVREN_SHF          4
+#define CM_GCR_REGn_MASK_CCAOVREN_MSK          (_ULCAST_(0x1) << 4)
+#define CM_GCR_REGn_MASK_DROPL2_SHF            2
+#define CM_GCR_REGn_MASK_DROPL2_MSK            (_ULCAST_(0x1) << 2)
+#define CM_GCR_REGn_MASK_CMTGT_SHF             0
+#define CM_GCR_REGn_MASK_CMTGT_MSK             (_ULCAST_(0x3) << 0)
+#define  CM_GCR_REGn_MASK_CMTGT_DISABLED       (_ULCAST_(0x0) << 0)
+#define  CM_GCR_REGn_MASK_CMTGT_MEM            (_ULCAST_(0x1) << 0)
+#define  CM_GCR_REGn_MASK_CMTGT_IOCU0          (_ULCAST_(0x2) << 0)
+#define  CM_GCR_REGn_MASK_CMTGT_IOCU1          (_ULCAST_(0x3) << 0)
+
+/* GCR_GIC_STATUS register fields */
+#define CM_GCR_GIC_STATUS_EX_SHF               0
+#define CM_GCR_GIC_STATUS_EX_MSK               (_ULCAST_(0x1) << 0)
+
+/* GCR_CPC_STATUS register fields */
+#define CM_GCR_CPC_STATUS_EX_SHF               0
+#define CM_GCR_CPC_STATUS_EX_MSK               (_ULCAST_(0x1) << 0)
+
+/* GCR_Cx_COHERENCE register fields */
+#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF    0
+#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK    (_ULCAST_(0xff) << 0)
+
+/* GCR_Cx_CONFIG register fields */
+#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF          10
+#define CM_GCR_Cx_CONFIG_IOCUTYPE_MSK          (_ULCAST_(0x3) << 10)
+#define CM_GCR_Cx_CONFIG_PVPE_SHF              0
+#define CM_GCR_Cx_CONFIG_PVPE_MSK              (_ULCAST_(0x1ff) << 0)
+
+/* GCR_Cx_OTHER register fields */
+#define CM_GCR_Cx_OTHER_CORENUM_SHF            16
+#define CM_GCR_Cx_OTHER_CORENUM_MSK            (_ULCAST_(0xffff) << 16)
+
+/* GCR_Cx_RESET_BASE register fields */
+#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF    12
+#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_MSK    (_ULCAST_(0xfffff) << 12)
+
+/* GCR_Cx_RESET_EXT_BASE register fields */
+#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_SHF  31
+#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_MSK  (_ULCAST_(0x1) << 31)
+#define CM_GCR_Cx_RESET_EXT_BASE_UEB_SHF       30
+#define CM_GCR_Cx_RESET_EXT_BASE_UEB_MSK       (_ULCAST_(0x1) << 30)
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_SHF        20
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_MSK        (_ULCAST_(0xff) << 20)
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_SHF  1
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_MSK  (_ULCAST_(0x7f) << 1)
+#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_SHF   0
+#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_MSK   (_ULCAST_(0x1) << 0)
+
+/**
+ * mips_cm_numcores - return the number of cores present in the system
+ *
+ * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or
+ * zero if no Coherence Manager is present.
+ */
+static inline unsigned mips_cm_numcores(void)
+{
+       if (!mips_cm_present())
+               return 0;
+
+       return ((read_gcr_config() & CM_GCR_CONFIG_PCORES_MSK)
+               >> CM_GCR_CONFIG_PCORES_SHF) + 1;
+}
+
+/**
+ * mips_cm_numiocu - return the number of IOCUs present in the system
+ *
+ * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero
+ * if no Coherence Manager is present.
+ */
+static inline unsigned mips_cm_numiocu(void)
+{
+       if (!mips_cm_present())
+               return 0;
+
+       return (read_gcr_config() & CM_GCR_CONFIG_NUMIOCU_MSK)
+               >> CM_GCR_CONFIG_NUMIOCU_SHF;
+}
+
+/**
+ * mips_cm_l2sync - perform an L2-only sync operation
+ *
+ * If an L2-only sync region is present in the system then this function
+ * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
+ */
+static inline int mips_cm_l2sync(void)
+{
+       if (!mips_cm_has_l2sync())
+               return -ENODEV;
+
+       writel(0, mips_cm_l2sync_base);
+       return 0;
+}
+
+#endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
new file mode 100644 (file)
index 0000000..988507e
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_MIPS_CPC_H__
+#define __MIPS_ASM_MIPS_CPC_H__
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+/* The base address of the CPC registers */
+extern void __iomem *mips_cpc_base;
+
+/**
+ * mips_cpc_default_phys_base - retrieve the default physical base address of
+ *                              the CPC
+ *
+ * Returns the default physical base address of the Cluster Power Controller
+ * memory mapped registers. This is platform dependant & must therefore be
+ * implemented per-platform.
+ */
+extern phys_t mips_cpc_default_phys_base(void);
+
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present. It may be overriden by individual platforms which determine
+ * this address in a different way.
+ */
+extern phys_t __weak mips_cpc_phys_base(void);
+
+/**
+ * mips_cpc_probe - probe for a Cluster Power Controller
+ *
+ * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if
+ * a CPC is successfully detected, else -errno.
+ */
+#ifdef CONFIG_MIPS_CPC
+extern int mips_cpc_probe(void);
+#else
+static inline int mips_cpc_probe(void)
+{
+       return -ENODEV;
+}
+#endif
+
+/**
+ * mips_cpc_present - determine whether a Cluster Power Controller is present
+ *
+ * Returns true if a CPC is present in the system, else false.
+ */
+static inline bool mips_cpc_present(void)
+{
+#ifdef CONFIG_MIPS_CPC
+       return mips_cpc_base != NULL;
+#else
+       return false;
+#endif
+}
+
+/* Offsets from the CPC base address to various control blocks */
+#define MIPS_CPC_GCB_OFS       0x0000
+#define MIPS_CPC_CLCB_OFS      0x2000
+#define MIPS_CPC_COCB_OFS      0x4000
+
+/* Macros to ease the creation of register access functions */
+#define BUILD_CPC_R_(name, off) \
+static inline u32 read_cpc_##name(void)                                \
+{                                                              \
+       return __raw_readl(mips_cpc_base + (off));              \
+}
+
+#define BUILD_CPC__W(name, off) \
+static inline void write_cpc_##name(u32 value)                 \
+{                                                              \
+       __raw_writel(value, mips_cpc_base + (off));             \
+}
+
+#define BUILD_CPC_RW(name, off)                                        \
+       BUILD_CPC_R_(name, off)                                 \
+       BUILD_CPC__W(name, off)
+
+#define BUILD_CPC_Cx_R_(name, off)                             \
+       BUILD_CPC_R_(cl_##name, MIPS_CPC_CLCB_OFS + (off))      \
+       BUILD_CPC_R_(co_##name, MIPS_CPC_COCB_OFS + (off))
+
+#define BUILD_CPC_Cx__W(name, off)                             \
+       BUILD_CPC__W(cl_##name, MIPS_CPC_CLCB_OFS + (off))      \
+       BUILD_CPC__W(co_##name, MIPS_CPC_COCB_OFS + (off))
+
+#define BUILD_CPC_Cx_RW(name, off)                             \
+       BUILD_CPC_Cx_R_(name, off)                              \
+       BUILD_CPC_Cx__W(name, off)
+
+/* GCB register accessor functions */
+BUILD_CPC_RW(access,           MIPS_CPC_GCB_OFS + 0x00)
+BUILD_CPC_RW(seqdel,           MIPS_CPC_GCB_OFS + 0x08)
+BUILD_CPC_RW(rail,             MIPS_CPC_GCB_OFS + 0x10)
+BUILD_CPC_RW(resetlen,         MIPS_CPC_GCB_OFS + 0x18)
+BUILD_CPC_R_(revision,         MIPS_CPC_GCB_OFS + 0x20)
+
+/* Core Local & Core Other accessor functions */
+BUILD_CPC_Cx_RW(cmd,           0x00)
+BUILD_CPC_Cx_RW(stat_conf,     0x08)
+BUILD_CPC_Cx_RW(other,         0x10)
+
+/* CPC_Cx_CMD register fields */
+#define CPC_Cx_CMD_SHF                         0
+#define CPC_Cx_CMD_MSK                         (_ULCAST_(0xf) << 0)
+#define  CPC_Cx_CMD_CLOCKOFF                   (_ULCAST_(0x1) << 0)
+#define  CPC_Cx_CMD_PWRDOWN                    (_ULCAST_(0x2) << 0)
+#define  CPC_Cx_CMD_PWRUP                      (_ULCAST_(0x3) << 0)
+#define  CPC_Cx_CMD_RESET                      (_ULCAST_(0x4) << 0)
+
+/* CPC_Cx_STAT_CONF register fields */
+#define CPC_Cx_STAT_CONF_PWRUPE_SHF            23
+#define CPC_Cx_STAT_CONF_PWRUPE_MSK            (_ULCAST_(0x1) << 23)
+#define CPC_Cx_STAT_CONF_SEQSTATE_SHF          19
+#define CPC_Cx_STAT_CONF_SEQSTATE_MSK          (_ULCAST_(0xf) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_D0          (_ULCAST_(0x0) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_U0          (_ULCAST_(0x1) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_U1          (_ULCAST_(0x2) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_U2          (_ULCAST_(0x3) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_U3          (_ULCAST_(0x4) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_U4          (_ULCAST_(0x5) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_U5          (_ULCAST_(0x6) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_U6          (_ULCAST_(0x7) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_D1          (_ULCAST_(0x8) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_D3          (_ULCAST_(0x9) << 19)
+#define  CPC_Cx_STAT_CONF_SEQSTATE_D2          (_ULCAST_(0xa) << 19)
+#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_SHF       17
+#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK       (_ULCAST_(0x1) << 17)
+#define CPC_Cx_STAT_CONF_PWRDN_IMPL_SHF                16
+#define CPC_Cx_STAT_CONF_PWRDN_IMPL_MSK                (_ULCAST_(0x1) << 16)
+#define CPC_Cx_STAT_CONF_EJTAG_PROBE_SHF       15
+#define CPC_Cx_STAT_CONF_EJTAG_PROBE_MSK       (_ULCAST_(0x1) << 15)
+
+/* CPC_Cx_OTHER register fields */
+#define CPC_Cx_OTHER_CORENUM_SHF               16
+#define CPC_Cx_OTHER_CORENUM_MSK               (_ULCAST_(0xff) << 16)
+
+#endif /* __MIPS_ASM_MIPS_CPC_H__ */
index ac7935203f8993e272f96d54de6d78c5a574f221..a3df0c3faa0ee8be783d3d0fe1cf6b89fad6e7dd 100644 (file)
@@ -18,7 +18,12 @@ extern cpumask_t mt_fpu_cpumask;
 extern unsigned long mt_fpemul_threshold;
 
 extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
+
+#ifdef CONFIG_MIPS_MT
 extern void mips_mt_set_cpuoptions(void);
+#else
+static inline void mips_mt_set_cpuoptions(void) { }
+#endif
 
 struct class;
 extern struct class *mt_class;
index 38b7704ee376917450621baf67bf635d23524792..6efa79a27b6a02e1fad11c60f5a801878f0d92a5 100644 (file)
 
 #ifndef __ASSEMBLY__
 
+static inline unsigned core_nvpes(void)
+{
+       unsigned conf0;
+
+       if (!cpu_has_mipsmt)
+               return 1;
+
+       conf0 = read_c0_mvpconf0();
+       return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+}
+
 static inline unsigned int dvpe(void)
 {
        int res = 0;
index bbc3dd4294bc31be3908543be66d385932412ae0..3e025b5311db72306be7890c25f88737649de3d2 100644 (file)
 #define MIPS_CONF1_PC          (_ULCAST_(1) <<  4)
 #define MIPS_CONF1_MD          (_ULCAST_(1) <<  5)
 #define MIPS_CONF1_C2          (_ULCAST_(1) <<  6)
+#define MIPS_CONF1_DA_SHF      7
+#define MIPS_CONF1_DA_SZ       3
 #define MIPS_CONF1_DA          (_ULCAST_(7) <<  7)
+#define MIPS_CONF1_DL_SHF      10
+#define MIPS_CONF1_DL_SZ       3
 #define MIPS_CONF1_DL          (_ULCAST_(7) << 10)
+#define MIPS_CONF1_DS_SHF      13
+#define MIPS_CONF1_DS_SZ       3
 #define MIPS_CONF1_DS          (_ULCAST_(7) << 13)
+#define MIPS_CONF1_IA_SHF      16
+#define MIPS_CONF1_IA_SZ       3
 #define MIPS_CONF1_IA          (_ULCAST_(7) << 16)
+#define MIPS_CONF1_IL_SHF      19
+#define MIPS_CONF1_IL_SZ       3
 #define MIPS_CONF1_IL          (_ULCAST_(7) << 19)
+#define MIPS_CONF1_IS_SHF      22
+#define MIPS_CONF1_IS_SZ       3
 #define MIPS_CONF1_IS          (_ULCAST_(7) << 22)
 #define MIPS_CONF1_TLBS_SHIFT   (25)
 #define MIPS_CONF1_TLBS_SIZE    (6)
 
 #define MIPS_CONF7_RPS         (_ULCAST_(1) << 2)
 
+#define MIPS_CONF7_IAR         (_ULCAST_(1) << 10)
+#define MIPS_CONF7_AR          (_ULCAST_(1) << 16)
+
 /*  EntryHI bit definition */
 #define MIPS_ENTRYHI_EHINV     (_ULCAST_(1) << 10)
 
+/* CMGCRBase bit definitions */
+#define MIPS_CMGCRB_BASE       11
+#define MIPS_CMGCRF_BASE       (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
+
 /*
  * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
  */
@@ -1010,6 +1029,8 @@ do {                                                                      \
 
 #define read_c0_prid()         __read_32bit_c0_register($15, 0)
 
+#define read_c0_cmgcrbase()    __read_ulong_c0_register($15, 3)
+
 #define read_c0_config()       __read_32bit_c0_register($16, 0)
 #define read_c0_config1()      __read_32bit_c0_register($16, 1)
 #define read_c0_config2()      __read_32bit_c0_register($16, 2)
@@ -1883,6 +1904,7 @@ change_c0_##name(unsigned int change, unsigned int newbits)       \
 __BUILD_SET_C0(status)
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
+__BUILD_SET_C0(config5)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
index 44b705d0826218a47ac6f78d5b068165b02732b9..c2edae382d5d5da707a7d137549cbcced4aac53d 100644 (file)
@@ -126,6 +126,8 @@ search_module_dbetables(unsigned long addr)
 #define MODULE_PROC_FAMILY "LOONGSON1 "
 #elif defined CONFIG_CPU_LOONGSON2
 #define MODULE_PROC_FAMILY "LOONGSON2 "
+#elif defined CONFIG_CPU_LOONGSON3
+#define MODULE_PROC_FAMILY "LOONGSON3 "
 #elif defined CONFIG_CPU_CAVIUM_OCTEON
 #define MODULE_PROC_FAMILY "OCTEON "
 #elif defined CONFIG_CPU_XLR
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
new file mode 100644 (file)
index 0000000..a2aba6c
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_MSA_H
+#define _ASM_MSA_H
+
+#include <asm/mipsregs.h>
+
+extern void _save_msa(struct task_struct *);
+extern void _restore_msa(struct task_struct *);
+
+static inline void enable_msa(void)
+{
+       if (cpu_has_msa) {
+               set_c0_config5(MIPS_CONF5_MSAEN);
+               enable_fpu_hazard();
+       }
+}
+
+static inline void disable_msa(void)
+{
+       if (cpu_has_msa) {
+               clear_c0_config5(MIPS_CONF5_MSAEN);
+               disable_fpu_hazard();
+       }
+}
+
+static inline int is_msa_enabled(void)
+{
+       if (!cpu_has_msa)
+               return 0;
+
+       return read_c0_config5() & MIPS_CONF5_MSAEN;
+}
+
+static inline int thread_msa_context_live(void)
+{
+       /*
+        * Check cpu_has_msa only if it's a constant. This will allow the
+        * compiler to optimise out code for CPUs without MSA without adding
+        * an extra redundant check for CPUs with MSA.
+        */
+       if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
+               return 0;
+
+       return test_thread_flag(TIF_MSA_CTX_LIVE);
+}
+
+static inline void save_msa(struct task_struct *t)
+{
+       if (cpu_has_msa)
+               _save_msa(t);
+}
+
+static inline void restore_msa(struct task_struct *t)
+{
+       if (cpu_has_msa)
+               _restore_msa(t);
+}
+
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+
+#define __BUILD_MSA_CTL_REG(name, cs)                          \
+static inline unsigned int read_msa_##name(void)               \
+{                                                              \
+       unsigned int reg;                                       \
+       __asm__ __volatile__(                                   \
+       "       .set    push\n"                                 \
+       "       .set    msa\n"                                  \
+       "       cfcmsa  %0, $" #cs "\n"                         \
+       "       .set    pop\n"                                  \
+       : "=r"(reg));                                           \
+       return reg;                                             \
+}                                                              \
+                                                               \
+static inline void write_msa_##name(unsigned int val)          \
+{                                                              \
+       __asm__ __volatile__(                                   \
+       "       .set    push\n"                                 \
+       "       .set    msa\n"                                  \
+       "       cfcmsa  $" #cs ", %0\n"                         \
+       "       .set    pop\n"                                  \
+       : : "r"(val));                                          \
+}
+
+#else /* !TOOLCHAIN_SUPPORTS_MSA */
+
+/*
+ * Define functions using .word for the c[ft]cmsa instructions in order to
+ * allow compilation with toolchains that do not support MSA. Once all
+ * toolchains in use support MSA these can be removed.
+ */
+
+#define __BUILD_MSA_CTL_REG(name, cs)                          \
+static inline unsigned int read_msa_##name(void)               \
+{                                                              \
+       unsigned int reg;                                       \
+       __asm__ __volatile__(                                   \
+       "       .set    push\n"                                 \
+       "       .set    noat\n"                                 \
+       "       .word   0x787e0059 | (" #cs " << 11)\n"         \
+       "       move    %0, $1\n"                               \
+       "       .set    pop\n"                                  \
+       : "=r"(reg));                                           \
+       return reg;                                             \
+}                                                              \
+                                                               \
+static inline void write_msa_##name(unsigned int val)          \
+{                                                              \
+       __asm__ __volatile__(                                   \
+       "       .set    push\n"                                 \
+       "       .set    noat\n"                                 \
+       "       move    $1, %0\n"                               \
+       "       .word   0x783e0819 | (" #cs " << 6)\n"          \
+       "       .set    pop\n"                                  \
+       : : "r"(val));                                          \
+}
+
+#endif /* !TOOLCHAIN_SUPPORTS_MSA */
+
+#define MSA_IR         0
+#define MSA_CSR                1
+#define MSA_ACCESS     2
+#define MSA_SAVE       3
+#define MSA_MODIFY     4
+#define MSA_REQUEST    5
+#define MSA_MAP                6
+#define MSA_UNMAP      7
+
+__BUILD_MSA_CTL_REG(ir, 0)
+__BUILD_MSA_CTL_REG(csr, 1)
+__BUILD_MSA_CTL_REG(access, 2)
+__BUILD_MSA_CTL_REG(save, 3)
+__BUILD_MSA_CTL_REG(modify, 4)
+__BUILD_MSA_CTL_REG(request, 5)
+__BUILD_MSA_CTL_REG(map, 6)
+__BUILD_MSA_CTL_REG(unmap, 7)
+
+/* MSA Implementation Register (MSAIR) */
+#define MSA_IR_REVB            0
+#define MSA_IR_REVF            (_ULCAST_(0xff) << MSA_IR_REVB)
+#define MSA_IR_PROCB           8
+#define MSA_IR_PROCF           (_ULCAST_(0xff) << MSA_IR_PROCB)
+#define MSA_IR_WRPB            16
+#define MSA_IR_WRPF            (_ULCAST_(0x1) << MSA_IR_WRPB)
+
+/* MSA Control & Status Register (MSACSR) */
+#define MSA_CSR_RMB            0
+#define MSA_CSR_RMF            (_ULCAST_(0x3) << MSA_CSR_RMB)
+#define MSA_CSR_RM_NEAREST     0
+#define MSA_CSR_RM_TO_ZERO     1
+#define MSA_CSR_RM_TO_POS      2
+#define MSA_CSR_RM_TO_NEG      3
+#define MSA_CSR_FLAGSB         2
+#define MSA_CSR_FLAGSF         (_ULCAST_(0x1f) << MSA_CSR_FLAGSB)
+#define MSA_CSR_FLAGS_IB       2
+#define MSA_CSR_FLAGS_IF       (_ULCAST_(0x1) << MSA_CSR_FLAGS_IB)
+#define MSA_CSR_FLAGS_UB       3
+#define MSA_CSR_FLAGS_UF       (_ULCAST_(0x1) << MSA_CSR_FLAGS_UB)
+#define MSA_CSR_FLAGS_OB       4
+#define MSA_CSR_FLAGS_OF       (_ULCAST_(0x1) << MSA_CSR_FLAGS_OB)
+#define MSA_CSR_FLAGS_ZB       5
+#define MSA_CSR_FLAGS_ZF       (_ULCAST_(0x1) << MSA_CSR_FLAGS_ZB)
+#define MSA_CSR_FLAGS_VB       6
+#define MSA_CSR_FLAGS_VF       (_ULCAST_(0x1) << MSA_CSR_FLAGS_VB)
+#define MSA_CSR_ENABLESB       7
+#define MSA_CSR_ENABLESF       (_ULCAST_(0x1f) << MSA_CSR_ENABLESB)
+#define MSA_CSR_ENABLES_IB     7
+#define MSA_CSR_ENABLES_IF     (_ULCAST_(0x1) << MSA_CSR_ENABLES_IB)
+#define MSA_CSR_ENABLES_UB     8
+#define MSA_CSR_ENABLES_UF     (_ULCAST_(0x1) << MSA_CSR_ENABLES_UB)
+#define MSA_CSR_ENABLES_OB     9
+#define MSA_CSR_ENABLES_OF     (_ULCAST_(0x1) << MSA_CSR_ENABLES_OB)
+#define MSA_CSR_ENABLES_ZB     10
+#define MSA_CSR_ENABLES_ZF     (_ULCAST_(0x1) << MSA_CSR_ENABLES_ZB)
+#define MSA_CSR_ENABLES_VB     11
+#define MSA_CSR_ENABLES_VF     (_ULCAST_(0x1) << MSA_CSR_ENABLES_VB)
+#define MSA_CSR_CAUSEB         12
+#define MSA_CSR_CAUSEF         (_ULCAST_(0x3f) << MSA_CSR_CAUSEB)
+#define MSA_CSR_CAUSE_IB       12
+#define MSA_CSR_CAUSE_IF       (_ULCAST_(0x1) << MSA_CSR_CAUSE_IB)
+#define MSA_CSR_CAUSE_UB       13
+#define MSA_CSR_CAUSE_UF       (_ULCAST_(0x1) << MSA_CSR_CAUSE_UB)
+#define MSA_CSR_CAUSE_OB       14
+#define MSA_CSR_CAUSE_OF       (_ULCAST_(0x1) << MSA_CSR_CAUSE_OB)
+#define MSA_CSR_CAUSE_ZB       15
+#define MSA_CSR_CAUSE_ZF       (_ULCAST_(0x1) << MSA_CSR_CAUSE_ZB)
+#define MSA_CSR_CAUSE_VB       16
+#define MSA_CSR_CAUSE_VF       (_ULCAST_(0x1) << MSA_CSR_CAUSE_VB)
+#define MSA_CSR_CAUSE_EB       17
+#define MSA_CSR_CAUSE_EF       (_ULCAST_(0x1) << MSA_CSR_CAUSE_EB)
+#define MSA_CSR_NXB            18
+#define MSA_CSR_NXF            (_ULCAST_(0x1) << MSA_CSR_NXB)
+#define MSA_CSR_FSB            24
+#define MSA_CSR_FSF            (_ULCAST_(0x1) << MSA_CSR_FSB)
+
+#endif /* _ASM_MSA_H */
index 5e08bcc74897ab35c377c2d3b4cca9e7a9829824..5699ec3a71af3b42c6afb98b33a87ceceb58c175 100644 (file)
@@ -190,7 +190,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;
  * https://patchwork.linux-mips.org/patch/1541/
  */
 
+#ifndef __pa_symbol
 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#endif
 
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 
index 32aea4852fb0796045efcbc9fbd6414b67a4fe77..e592f3687d6f9151739d67bb5f8ed0a9c3158c79 100644 (file)
@@ -235,6 +235,15 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
 #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
 
+#elif defined(CONFIG_CPU_LOONGSON3)
+
+/* Using COHERENT flag for NONCOHERENT doesn't hurt. */
+
+#define _CACHE_UNCACHED             (2<<_CACHE_SHIFT)  /* LOONGSON       */
+#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* LOONGSON       */
+#define _CACHE_CACHABLE_COHERENT    (3<<_CACHE_SHIFT)  /* LOONGSON-3     */
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)  /* LOONGSON       */
+
 #else
 
 #define _CACHE_CACHABLE_NO_WA      (0<<_CACHE_SHIFT)  /* R4600 only      */
index 3605b844ad873b72f0881fbd48de0e8c7a3538a7..ad70cba8daffaeaeb7f62a7a47308dc1597a8be4 100644 (file)
@@ -97,18 +97,48 @@ extern unsigned int vced_count, vcei_count;
 
 #define NUM_FPU_REGS   32
 
-typedef __u64 fpureg_t;
+#ifdef CONFIG_CPU_HAS_MSA
+# define FPU_REG_WIDTH 128
+#else
+# define FPU_REG_WIDTH 64
+#endif
+
+union fpureg {
+       __u32   val32[FPU_REG_WIDTH / 32];
+       __u64   val64[FPU_REG_WIDTH / 64];
+};
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# define FPR_IDX(width, idx)   (idx)
+#else
+# define FPR_IDX(width, idx)   ((FPU_REG_WIDTH / (width)) - 1 - (idx))
+#endif
+
+#define BUILD_FPR_ACCESS(width) \
+static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
+{                                                                      \
+       return fpr->val##width[FPR_IDX(width, idx)];                    \
+}                                                                      \
+                                                                       \
+static inline void set_fpr##width(union fpureg *fpr, unsigned idx,     \
+                                 u##width val)                         \
+{                                                                      \
+       fpr->val##width[FPR_IDX(width, idx)] = val;                     \
+}
+
+BUILD_FPR_ACCESS(32)
+BUILD_FPR_ACCESS(64)
 
 /*
- * It would be nice to add some more fields for emulator statistics, but there
- * are a number of fixed offsets in offset.h and elsewhere that would have to
- * be recalculated by hand.  So the additional information will be private to
- * the FPU emulator for now.  See asm-mips/fpu_emulator.h.
+ * It would be nice to add some more fields for emulator statistics,
+ * the additional information is private to the FPU emulator for now.
+ * See arch/mips/include/asm/fpu_emulator.h.
  */
 
 struct mips_fpu_struct {
-       fpureg_t        fpr[NUM_FPU_REGS];
+       union fpureg    fpr[NUM_FPU_REGS];
        unsigned int    fcr31;
+       unsigned int    msacsr;
 };
 
 #define NUM_DSP_REGS   6
@@ -284,8 +314,9 @@ struct thread_struct {
         * Saved FPU/FPU emulator stuff                         \
         */                                                     \
        .fpu                    = {                             \
-               .fpr            = {0,},                         \
+               .fpr            = {{{0,},},},                   \
                .fcr31          = 0,                            \
+               .msacsr         = 0,                            \
        },                                                      \
        /*                                                      \
         * FPU affinity state (null if not FPAFF)               \
index 7bba9da110afab3f9c92eb02a52161ac23642d29..bf1ac8d3578387c19a98b5cd8177c05fa4a1b309 100644 (file)
@@ -82,7 +82,7 @@ static inline long regs_return_value(struct pt_regs *regs)
 #define instruction_pointer(regs) ((regs)->cp0_epc)
 #define profile_pc(regs) instruction_pointer(regs)
 
-extern asmlinkage void syscall_trace_enter(struct pt_regs *regs);
+extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
 extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
 
 extern void die(const char *, struct pt_regs *) __noreturn;
index c84caddb8bdedea260acac25a71c0d5a9c59ef19..ca64cbe44493bf05bd598115103fe7ba86eafa65 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
+#include <asm/uaccess.h> /* for segment_eq() */
 
 /*
  * This macro return a properly sign-extended address suitable as base address
@@ -35,7 +36,7 @@
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noreorder                               \n"     \
-       "       .set    mips3\n\t                               \n"     \
+       "       .set    arch=r4000                              \n"     \
        "       cache   %0, %1                                  \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
@@ -203,7 +204,7 @@ static inline void flush_scache_line(unsigned long addr)
        __asm__ __volatile__(                                   \
        "       .set    push                    \n"             \
        "       .set    noreorder               \n"             \
-       "       .set    mips3                   \n"             \
+       "       .set    arch=r4000              \n"             \
        "1:     cache   %0, (%1)                \n"             \
        "2:     .set    pop                     \n"             \
        "       .section __ex_table,\"a\"       \n"             \
@@ -212,6 +213,20 @@ static inline void flush_scache_line(unsigned long addr)
        :                                                       \
        : "i" (op), "r" (addr))
 
+#define protected_cachee_op(op,addr)                           \
+       __asm__ __volatile__(                                   \
+       "       .set    push                    \n"             \
+       "       .set    noreorder               \n"             \
+       "       .set    mips0                   \n"             \
+       "       .set    eva                     \n"             \
+       "1:     cachee  %0, (%1)                \n"             \
+       "2:     .set    pop                     \n"             \
+       "       .section __ex_table,\"a\"       \n"             \
+       "       "STR(PTR)" 1b, 2b               \n"             \
+       "       .previous"                                      \
+       :                                                       \
+       : "i" (op), "r" (addr))
+
 /*
  * The next two are for badland addresses like signal trampolines.
  */
@@ -223,7 +238,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
                break;
 
        default:
+#ifdef CONFIG_EVA
+               protected_cachee_op(Hit_Invalidate_I, addr);
+#else
                protected_cache_op(Hit_Invalidate_I, addr);
+#endif
                break;
        }
 }
@@ -356,6 +375,91 @@ static inline void invalidate_tcache_page(unsigned long addr)
                : "r" (base),                                           \
                  "i" (op));
 
+/*
+ * Perform the cache operation specified by op using a user mode virtual
+ * address while in kernel mode.
+ */
+#define cache16_unroll32_user(base,op)                                 \
+       __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set noreorder                                  \n"     \
+       "       .set mips0                                      \n"     \
+       "       .set eva                                        \n"     \
+       "       cachee %1, 0x000(%0); cachee %1, 0x010(%0)      \n"     \
+       "       cachee %1, 0x020(%0); cachee %1, 0x030(%0)      \n"     \
+       "       cachee %1, 0x040(%0); cachee %1, 0x050(%0)      \n"     \
+       "       cachee %1, 0x060(%0); cachee %1, 0x070(%0)      \n"     \
+       "       cachee %1, 0x080(%0); cachee %1, 0x090(%0)      \n"     \
+       "       cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)      \n"     \
+       "       cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)      \n"     \
+       "       cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)      \n"     \
+       "       cachee %1, 0x100(%0); cachee %1, 0x110(%0)      \n"     \
+       "       cachee %1, 0x120(%0); cachee %1, 0x130(%0)      \n"     \
+       "       cachee %1, 0x140(%0); cachee %1, 0x150(%0)      \n"     \
+       "       cachee %1, 0x160(%0); cachee %1, 0x170(%0)      \n"     \
+       "       cachee %1, 0x180(%0); cachee %1, 0x190(%0)      \n"     \
+       "       cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)      \n"     \
+       "       cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)      \n"     \
+       "       cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)      \n"     \
+       "       .set pop                                        \n"     \
+               :                                                       \
+               : "r" (base),                                           \
+                 "i" (op));
+
+#define cache32_unroll32_user(base, op)                                        \
+       __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set noreorder                                  \n"     \
+       "       .set mips0                                      \n"     \
+       "       .set eva                                        \n"     \
+       "       cachee %1, 0x000(%0); cachee %1, 0x020(%0)      \n"     \
+       "       cachee %1, 0x040(%0); cachee %1, 0x060(%0)      \n"     \
+       "       cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)      \n"     \
+       "       cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)      \n"     \
+       "       cachee %1, 0x100(%0); cachee %1, 0x120(%0)      \n"     \
+       "       cachee %1, 0x140(%0); cachee %1, 0x160(%0)      \n"     \
+       "       cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)      \n"     \
+       "       cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)      \n"     \
+       "       cachee %1, 0x200(%0); cachee %1, 0x220(%0)      \n"     \
+       "       cachee %1, 0x240(%0); cachee %1, 0x260(%0)      \n"     \
+       "       cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)      \n"     \
+       "       cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)      \n"     \
+       "       cachee %1, 0x300(%0); cachee %1, 0x320(%0)      \n"     \
+       "       cachee %1, 0x340(%0); cachee %1, 0x360(%0)      \n"     \
+       "       cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)      \n"     \
+       "       cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)      \n"     \
+       "       .set pop                                        \n"     \
+               :                                                       \
+               : "r" (base),                                           \
+                 "i" (op));
+
+#define cache64_unroll32_user(base, op)                                        \
+       __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
+       "       .set noreorder                                  \n"     \
+       "       .set mips0                                      \n"     \
+       "       .set eva                                        \n"     \
+       "       cachee %1, 0x000(%0); cachee %1, 0x040(%0)      \n"     \
+       "       cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)      \n"     \
+       "       cachee %1, 0x100(%0); cachee %1, 0x140(%0)      \n"     \
+       "       cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)      \n"     \
+       "       cachee %1, 0x200(%0); cachee %1, 0x240(%0)      \n"     \
+       "       cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)      \n"     \
+       "       cachee %1, 0x300(%0); cachee %1, 0x340(%0)      \n"     \
+       "       cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)      \n"     \
+       "       cachee %1, 0x400(%0); cachee %1, 0x440(%0)      \n"     \
+       "       cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)      \n"     \
+       "       cachee %1, 0x500(%0); cachee %1, 0x540(%0)      \n"     \
+       "       cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)      \n"     \
+       "       cachee %1, 0x600(%0); cachee %1, 0x640(%0)      \n"     \
+       "       cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)      \n"     \
+       "       cachee %1, 0x700(%0); cachee %1, 0x740(%0)      \n"     \
+       "       cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)      \n"     \
+       "       .set pop                                        \n"     \
+               :                                                       \
+               : "r" (base),                                           \
+                 "i" (op));
+
 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)   \
 static inline void extra##blast_##pfx##cache##lsize(void)              \
@@ -429,6 +533,32 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32
 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
 
+#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
+{                                                                      \
+       unsigned long start = page;                                     \
+       unsigned long end = page + PAGE_SIZE;                           \
+                                                                       \
+       __##pfx##flush_prologue                                         \
+                                                                       \
+       do {                                                            \
+               cache##lsize##_unroll32_user(start, hitop);             \
+               start += lsize * 32;                                    \
+       } while (start < end);                                          \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
+}
+
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+                        16)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+                        32)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+                        64)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
+
 /* build blast_xxx_range, protected_blast_xxx_range */
 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)       \
 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
@@ -450,12 +580,51 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
        __##pfx##flush_epilogue                                         \
 }
 
+#ifndef CONFIG_EVA
+
 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
+
+#else
+
+#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)               \
+static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
+                                                       unsigned long end) \
+{                                                                      \
+       unsigned long lsize = cpu_##desc##_line_size();                 \
+       unsigned long addr = start & ~(lsize - 1);                      \
+       unsigned long aend = (end - 1) & ~(lsize - 1);                  \
+                                                                       \
+       __##pfx##flush_prologue                                         \
+                                                                       \
+       if (segment_eq(get_fs(), USER_DS)) {                            \
+               while (1) {                                             \
+                       protected_cachee_op(hitop, addr);               \
+                       if (addr == aend)                               \
+                               break;                                  \
+                       addr += lsize;                                  \
+               }                                                       \
+       } else {                                                        \
+               while (1) {                                             \
+                       protected_cache_op(hitop, addr);                \
+                       if (addr == aend)                               \
+                               break;                                  \
+                       addr += lsize;                                  \
+               }                                                       \
+                                                                       \
+       }                                                               \
+       __##pfx##flush_epilogue                                         \
+}
+
+__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
+__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
+
+#endif
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
        protected_, loongson2_)
 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
 /* blast_inv_dcache_range */
 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
index eeeb0f48c76754c2c9111b2f81528361545bb04e..f54bdbe85c0d859888495644a80484b5eaaff787 100644 (file)
@@ -32,6 +32,8 @@ struct sigcontext32 {
        __u32           sc_lo2;
        __u32           sc_hi3;
        __u32           sc_lo3;
+       __u64           sc_msaregs[32]; /* Most significant 64 bits */
+       __u32           sc_msa_csr;
 };
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
 #endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
new file mode 100644 (file)
index 0000000..d60d1a2
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_SMP_CPS_H__
+#define __MIPS_ASM_SMP_CPS_H__
+
+#ifndef __ASSEMBLY__
+
+struct boot_config {
+       unsigned int core;
+       unsigned int vpe;
+       unsigned long pc;
+       unsigned long sp;
+       unsigned long gp;
+};
+
+extern struct boot_config mips_cps_bootcfg;
+
+extern void mips_cps_core_entry(void);
+
+#else /* __ASSEMBLY__ */
+
+.extern mips_cps_bootcfg;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __MIPS_ASM_SMP_CPS_H__ */
index ef2a8041e78b02f7141e80b0a5440888520323f9..73d35b18fb64420aed254a316003d8282a0c5360 100644 (file)
@@ -13,6 +13,8 @@
 
 #include <linux/errno.h>
 
+#include <asm/mips-cm.h>
+
 #ifdef CONFIG_SMP
 
 #include <linux/cpumask.h>
@@ -43,6 +45,9 @@ static inline void plat_smp_setup(void)
        mp_ops->smp_setup();
 }
 
+extern void gic_send_ipi_single(int cpu, unsigned int action);
+extern void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+
 #else /* !CONFIG_SMP */
 
 struct plat_smp_ops;
@@ -76,6 +81,9 @@ static inline int register_cmp_smp_ops(void)
 #ifdef CONFIG_MIPS_CMP
        extern struct plat_smp_ops cmp_smp_ops;
 
+       if (!mips_cm_present())
+               return -ENODEV;
+
        register_smp_ops(&cmp_smp_ops);
 
        return 0;
@@ -97,4 +105,13 @@ static inline int register_vsmp_smp_ops(void)
 #endif
 }
 
+#ifdef CONFIG_MIPS_CPS
+extern int register_cps_smp_ops(void);
+#else
+static inline int register_cps_smp_ops(void)
+{
+       return -ENODEV;
+}
+#endif
+
 #endif /* __ASM_SMP_OPS_H */
index eb60087584844381a717bf6dc1ffdf39508dcf1a..efa02acd3dd5593849f470f90749f54b760d525f 100644 (file)
@@ -42,6 +42,7 @@ extern int __cpu_logical_map[NR_CPUS];
 #define SMP_ICACHE_FLUSH       0x4
 /* Used by kexec crashdump to save all cpu's state */
 #define SMP_DUMP               0x8
+#define SMP_ASK_C0COUNT                0x10
 
 extern volatile cpumask_t cpu_callin_map;
 
index 4857e2c8df5ae2eae1ac4bcca27b7dca586842aa..d301e108d5b82352d0fca294d96d9157913bc260 100644 (file)
 
                .macro  RESTORE_SP_AND_RET
                LONG_L  sp, PT_R29(sp)
-               .set    mips3
+               .set    arch=r4000
                eret
                .set    mips0
                .endm
index 278d45a097286034bfba656c9d626b1ff3d810b2..495c1041a2cc24e3ca1207ac31e7e3ff41f45382 100644 (file)
 #include <asm/watch.h>
 #include <asm/dsp.h>
 #include <asm/cop2.h>
+#include <asm/msa.h>
 
 struct task_struct;
 
+enum {
+       FP_SAVE_NONE    = 0,
+       FP_SAVE_VECTOR  = -1,
+       FP_SAVE_SCALAR  = 1,
+};
+
 /**
  * resume - resume execution of a task
  * @prev:      The task previously executed.
  * @next:      The task to begin executing.
  * @next_ti:   task_thread_info(next).
- * @usedfpu:   Non-zero if prev's FP context should be saved.
+ * @fp_save:   Which, if any, FP context to save for prev.
  *
  * This function is used whilst scheduling to save the context of prev & load
  * the context of next. Returns prev.
  */
 extern asmlinkage struct task_struct *resume(struct task_struct *prev,
                struct task_struct *next, struct thread_info *next_ti,
-               u32 usedfpu);
+               s32 fp_save);
 
 extern unsigned int ll_bit;
 extern struct task_struct *ll_task;
@@ -75,7 +82,8 @@ do {                                                                  \
 
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
-       u32 __usedfpu, __c0_stat;                                       \
+       u32 __c0_stat;                                                  \
+       s32 __fpsave = FP_SAVE_NONE;                                    \
        __mips_mt_fpaff_switch_to(prev);                                \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
@@ -88,8 +96,12 @@ do {                                                                 \
                write_c0_status(__c0_stat & ~ST0_CU2);                  \
        }                                                               \
        __clear_software_ll_bit();                                      \
-       __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU);  \
-       (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
+       if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU))          \
+               __fpsave = FP_SAVE_SCALAR;                              \
+       if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))          \
+               __fpsave = FP_SAVE_VECTOR;                              \
+       (last) = resume(prev, next, task_thread_info(next), __fpsave);  \
+       disable_msa();                                                  \
 } while (0)
 
 #define finish_arch_switch(prev)                                       \
index f35b131977e62a3ef0d81c47da6827f47d527b71..6c488c85d79141b385d1d6c0c394aa4400518fbf 100644 (file)
 #include <linux/sched.h>
 #include <linux/uaccess.h>
 #include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+#ifndef __NR_syscall /* Only defined if _MIPS_SIM == _MIPS_SIM_ABI32 */
+#define __NR_syscall 4000
+#endif
 
 static inline long syscall_get_nr(struct task_struct *task,
                                  struct pt_regs *regs)
 {
-       return regs->regs[2];
+       /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+       if ((config_enabled(CONFIG_32BIT) ||
+           test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+           (regs->regs[2] == __NR_syscall))
+               return regs->regs[4];
+       else
+               return regs->regs[2];
 }
 
 static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
@@ -68,6 +79,12 @@ static inline long syscall_get_return_value(struct task_struct *task,
        return regs->regs[2];
 }
 
+static inline void syscall_rollback(struct task_struct *task,
+                                   struct pt_regs *regs)
+{
+       /* Do nothing */
+}
+
 static inline void syscall_set_return_value(struct task_struct *task,
                                            struct pt_regs *regs,
                                            int error, long val)
@@ -87,6 +104,13 @@ static inline void syscall_get_arguments(struct task_struct *task,
                                         unsigned long *args)
 {
        int ret;
+       /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+       if ((config_enabled(CONFIG_32BIT) ||
+           test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+           (regs->regs[2] == __NR_syscall)) {
+               i++;
+               n++;
+       }
 
        while (n--)
                ret |= mips_get_syscall_arg(args++, task, regs, i++);
@@ -103,11 +127,13 @@ extern const unsigned long sys_call_table[];
 extern const unsigned long sys32_call_table[];
 extern const unsigned long sysn32_call_table[];
 
-static inline int __syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task,
+                                  struct pt_regs *regs)
 {
        int arch = EM_MIPS;
 #ifdef CONFIG_64BIT
-       arch |=  __AUDIT_ARCH_64BIT;
+       if (!test_tsk_thread_flag(task, TIF_32BIT_REGS))
+               arch |= __AUDIT_ARCH_64BIT;
 #endif
 #if defined(__LITTLE_ENDIAN)
        arch |=  __AUDIT_ARCH_LE;
index 24846f9053fe9af196996a43e7d77ab929f1d0ef..d2d961d6cb86fc1edded8f0ade0adeedc0c995be 100644 (file)
@@ -116,6 +116,8 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_LOAD_WATCH         25      /* If set, load watch registers */
 #define TIF_SYSCALL_TRACEPOINT 26      /* syscall tracepoint instrumentation */
 #define TIF_32BIT_FPREGS       27      /* 32-bit floating point registers */
+#define TIF_USEDMSA            29      /* MSA has been used this quantum */
+#define TIF_MSA_CTX_LIVE       30      /* MSA context must be preserved */
 #define TIF_SYSCALL_TRACE      31      /* syscall trace active */
 
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -133,10 +135,13 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_FPUBOUND          (1<<TIF_FPUBOUND)
 #define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
 #define _TIF_32BIT_FPREGS      (1<<TIF_32BIT_FPREGS)
+#define _TIF_USEDMSA           (1<<TIF_USEDMSA)
+#define _TIF_MSA_CTX_LIVE      (1<<TIF_MSA_CTX_LIVE)
 #define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
 
 #define _TIF_WORK_SYSCALL_ENTRY        (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
-                                _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
+                                _TIF_SYSCALL_AUDIT | \
+                                _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
 
 /* work to do in syscall_trace_leave() */
 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
index f3fa3750f577c2414396943871a8f0bd6df6b928..a10951090234073cae8aa6ea9d8b5542faef6a22 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2014, Imagination Technologies Ltd.
  */
 #ifndef _ASM_UACCESS_H
 #define _ASM_UACCESS_H
@@ -13,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/thread_info.h>
+#include <asm/asm-eva.h>
 
 /*
  * The fs value determines whether argument validity checking should be
@@ -222,11 +224,44 @@ struct __large_struct { unsigned long buf[100]; };
  * Yuck.  We need two variants, one for 64bit operation and one
  * for 32 bit mode and old iron.
  */
+#ifndef CONFIG_EVA
+#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
+#else
+/*
+ * Kernel specific functions for EVA. We need to use normal load instructions
+ * to read data from kernel when operating in EVA mode. We use these macros to
+ * avoid redefining __get_user_asm for EVA.
+ */
+#undef _loadd
+#undef _loadw
+#undef _loadh
+#undef _loadb
 #ifdef CONFIG_32BIT
-#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
+#define _loadd                 _loadw
+#else
+#define _loadd(reg, addr)      "ld " reg ", " addr
+#endif
+#define _loadw(reg, addr)      "lw " reg ", " addr
+#define _loadh(reg, addr)      "lh " reg ", " addr
+#define _loadb(reg, addr)      "lb " reg ", " addr
+
+#define __get_kernel_common(val, size, ptr)                            \
+do {                                                                   \
+       switch (size) {                                                 \
+       case 1: __get_data_asm(val, _loadb, ptr); break;                \
+       case 2: __get_data_asm(val, _loadh, ptr); break;                \
+       case 4: __get_data_asm(val, _loadw, ptr); break;                \
+       case 8: __GET_DW(val, _loadd, ptr); break;                      \
+       default: __get_user_unknown(); break;                           \
+       }                                                               \
+} while (0)
+#endif
+
+#ifdef CONFIG_32BIT
+#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
 #endif
 #ifdef CONFIG_64BIT
-#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
+#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
 #endif
 
 extern void __get_user_unknown(void);
@@ -234,10 +269,10 @@ extern void __get_user_unknown(void);
 #define __get_user_common(val, size, ptr)                              \
 do {                                                                   \
        switch (size) {                                                 \
-       case 1: __get_user_asm(val, "lb", ptr); break;                  \
-       case 2: __get_user_asm(val, "lh", ptr); break;                  \
-       case 4: __get_user_asm(val, "lw", ptr); break;                  \
-       case 8: __GET_USER_DW(val, ptr); break;                         \
+       case 1: __get_data_asm(val, user_lb, ptr); break;               \
+       case 2: __get_data_asm(val, user_lh, ptr); break;               \
+       case 4: __get_data_asm(val, user_lw, ptr); break;               \
+       case 8: __GET_DW(val, user_ld, ptr); break;                     \
        default: __get_user_unknown(); break;                           \
        }                                                               \
 } while (0)
@@ -246,8 +281,12 @@ do {                                                                       \
 ({                                                                     \
        int __gu_err;                                                   \
                                                                        \
-       __chk_user_ptr(ptr);                                            \
-       __get_user_common((x), size, ptr);                              \
+       if (segment_eq(get_fs(), get_ds())) {                           \
+               __get_kernel_common((x), size, ptr);                    \
+       } else {                                                        \
+               __chk_user_ptr(ptr);                                    \
+               __get_user_common((x), size, ptr);                      \
+       }                                                               \
        __gu_err;                                                       \
 })
 
@@ -257,18 +296,22 @@ do {                                                                      \
        const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
                                                                        \
        might_fault();                                                  \
-       if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
-               __get_user_common((x), size, __gu_ptr);                 \
+       if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
+               if (segment_eq(get_fs(), get_ds()))                     \
+                       __get_kernel_common((x), size, __gu_ptr);       \
+               else                                                    \
+                       __get_user_common((x), size, __gu_ptr);         \
+       }                                                               \
                                                                        \
        __gu_err;                                                       \
 })
 
-#define __get_user_asm(val, insn, addr)                                        \
+#define __get_data_asm(val, insn, addr)                                        \
 {                                                                      \
        long __gu_tmp;                                                  \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:     " insn "        %1, %3                          \n"     \
+       "1:     "insn("%1", "%3")"                              \n"     \
        "2:                                                     \n"     \
        "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
@@ -287,7 +330,7 @@ do {                                                                        \
 /*
  * Get a long long 64 using 32 bit registers.
  */
-#define __get_user_asm_ll32(val, addr)                                 \
+#define __get_data_asm_ll32(val, insn, addr)                           \
 {                                                                      \
        union {                                                         \
                unsigned long long      l;                              \
@@ -295,8 +338,8 @@ do {                                                                        \
        } __gu_tmp;                                                     \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:     lw      %1, (%3)                                \n"     \
-       "2:     lw      %D1, 4(%3)                              \n"     \
+       "1:     " insn("%1", "(%3)")"                           \n"     \
+       "2:     " insn("%D1", "4(%3)")"                         \n"     \
        "3:                                                     \n"     \
        "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
@@ -315,30 +358,73 @@ do {                                                                      \
        (val) = __gu_tmp.t;                                             \
 }
 
+#ifndef CONFIG_EVA
+#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
+#else
+/*
+ * Kernel specific functions for EVA. We need to use normal load instructions
+ * to read data from kernel when operating in EVA mode. We use these macros to
+ * avoid redefining __get_data_asm for EVA.
+ */
+#undef _stored
+#undef _storew
+#undef _storeh
+#undef _storeb
+#ifdef CONFIG_32BIT
+#define _stored                        _storew
+#else
+#define _stored(reg, addr)     "ld " reg ", " addr
+#endif
+
+#define _storew(reg, addr)     "sw " reg ", " addr
+#define _storeh(reg, addr)     "sh " reg ", " addr
+#define _storeb(reg, addr)     "sb " reg ", " addr
+
+#define __put_kernel_common(ptr, size)                                 \
+do {                                                                   \
+       switch (size) {                                                 \
+       case 1: __put_data_asm(_storeb, ptr); break;                    \
+       case 2: __put_data_asm(_storeh, ptr); break;                    \
+       case 4: __put_data_asm(_storew, ptr); break;                    \
+       case 8: __PUT_DW(_stored, ptr); break;                          \
+       default: __put_user_unknown(); break;                           \
+       }                                                               \
+} while(0)
+#endif
+
 /*
  * Yuck.  We need two variants, one for 64bit operation and one
  * for 32 bit mode and old iron.
  */
 #ifdef CONFIG_32BIT
-#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
+#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
 #endif
 #ifdef CONFIG_64BIT
-#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
+#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
 #endif
 
+#define __put_user_common(ptr, size)                                   \
+do {                                                                   \
+       switch (size) {                                                 \
+       case 1: __put_data_asm(user_sb, ptr); break;                    \
+       case 2: __put_data_asm(user_sh, ptr); break;                    \
+       case 4: __put_data_asm(user_sw, ptr); break;                    \
+       case 8: __PUT_DW(user_sd, ptr); break;                          \
+       default: __put_user_unknown(); break;                           \
+       }                                                               \
+} while (0)
+
 #define __put_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
        __typeof__(*(ptr)) __pu_val;                                    \
        int __pu_err = 0;                                               \
                                                                        \
-       __chk_user_ptr(ptr);                                            \
        __pu_val = (x);                                                 \
-       switch (size) {                                                 \
-       case 1: __put_user_asm("sb", ptr); break;                       \
-       case 2: __put_user_asm("sh", ptr); break;                       \
-       case 4: __put_user_asm("sw", ptr); break;                       \
-       case 8: __PUT_USER_DW(ptr); break;                              \
-       default: __put_user_unknown(); break;                           \
+       if (segment_eq(get_fs(), get_ds())) {                           \
+               __put_kernel_common(ptr, size);                         \
+       } else {                                                        \
+               __chk_user_ptr(ptr);                                    \
+               __put_user_common(ptr, size);                           \
        }                                                               \
        __pu_err;                                                       \
 })
@@ -351,21 +437,19 @@ do {                                                                      \
                                                                        \
        might_fault();                                                  \
        if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
-               switch (size) {                                         \
-               case 1: __put_user_asm("sb", __pu_addr); break;         \
-               case 2: __put_user_asm("sh", __pu_addr); break;         \
-               case 4: __put_user_asm("sw", __pu_addr); break;         \
-               case 8: __PUT_USER_DW(__pu_addr); break;                \
-               default: __put_user_unknown(); break;                   \
-               }                                                       \
+               if (segment_eq(get_fs(), get_ds()))                     \
+                       __put_kernel_common(__pu_addr, size);           \
+               else                                                    \
+                       __put_user_common(__pu_addr, size);             \
        }                                                               \
+                                                                       \
        __pu_err;                                                       \
 })
 
-#define __put_user_asm(insn, ptr)                                      \
+#define __put_data_asm(insn, ptr)                                      \
 {                                                                      \
        __asm__ __volatile__(                                           \
-       "1:     " insn "        %z2, %3         # __put_user_asm\n"     \
+       "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
        "2:                                                     \n"     \
        "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
@@ -380,11 +464,11 @@ do {                                                                      \
          "i" (-EFAULT));                                               \
 }
 
-#define __put_user_asm_ll32(ptr)                                       \
+#define __put_data_asm_ll32(insn, ptr)                                 \
 {                                                                      \
        __asm__ __volatile__(                                           \
-       "1:     sw      %2, (%3)        # __put_user_asm_ll32   \n"     \
-       "2:     sw      %D2, 4(%3)                              \n"     \
+       "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
+       "2:     "insn("%D2", "4(%3)")"                          \n"     \
        "3:                                                     \n"     \
        "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
@@ -402,6 +486,11 @@ do {                                                                       \
 
 extern void __put_user_unknown(void);
 
+/*
+ * ul{b,h,w} are macros and there are no equivalent macros for EVA.
+ * EVA unaligned access is handled in the ADE exception handler.
+ */
+#ifndef CONFIG_EVA
 /*
  * put_user_unaligned: - Write a simple value into user space.
  * @x:  Value to copy to user space.
@@ -504,7 +593,7 @@ extern void __get_user_unaligned_unknown(void);
 #define __get_user_unaligned_common(val, size, ptr)                    \
 do {                                                                   \
        switch (size) {                                                 \
-       case 1: __get_user_asm(val, "lb", ptr); break;                  \
+       case 1: __get_data_asm(val, "lb", ptr); break;                  \
        case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;       \
        case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;       \
        case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
@@ -531,7 +620,7 @@ do {                                                                        \
        __gu_err;                                                       \
 })
 
-#define __get_user_unaligned_asm(val, insn, addr)                      \
+#define __get_data_unaligned_asm(val, insn, addr)                      \
 {                                                                      \
        long __gu_tmp;                                                  \
                                                                        \
@@ -594,19 +683,23 @@ do {                                                                      \
 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
 #endif
 
+#define __put_user_unaligned_common(ptr, size)                         \
+do {                                                                   \
+       switch (size) {                                                 \
+       case 1: __put_data_asm("sb", ptr); break;                       \
+       case 2: __put_user_unaligned_asm("ush", ptr); break;            \
+       case 4: __put_user_unaligned_asm("usw", ptr); break;            \
+       case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
+       default: __put_user_unaligned_unknown(); break;                 \
+} while (0)
+
 #define __put_user_unaligned_nocheck(x,ptr,size)                       \
 ({                                                                     \
        __typeof__(*(ptr)) __pu_val;                                    \
        int __pu_err = 0;                                               \
                                                                        \
        __pu_val = (x);                                                 \
-       switch (size) {                                                 \
-       case 1: __put_user_asm("sb", ptr); break;                       \
-       case 2: __put_user_unaligned_asm("ush", ptr); break;            \
-       case 4: __put_user_unaligned_asm("usw", ptr); break;            \
-       case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
-       default: __put_user_unaligned_unknown(); break;                 \
-       }                                                               \
+       __put_user_unaligned_common(ptr, size);                         \
        __pu_err;                                                       \
 })
 
@@ -616,15 +709,9 @@ do {                                                                       \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        int __pu_err = -EFAULT;                                         \
                                                                        \
-       if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
-               switch (size) {                                         \
-               case 1: __put_user_asm("sb", __pu_addr); break;         \
-               case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
-               case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
-               case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break;      \
-               default: __put_user_unaligned_unknown(); break;         \
-               }                                                       \
-       }                                                               \
+       if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))          \
+               __put_user_unaligned_common(__pu_addr, size);           \
+                                                                       \
        __pu_err;                                                       \
 })
 
@@ -669,6 +756,7 @@ do {                                                                        \
 }
 
 extern void __put_user_unaligned_unknown(void);
+#endif
 
 /*
  * We're generating jump to subroutines which will be outside the range of
@@ -693,6 +781,7 @@ extern void __put_user_unaligned_unknown(void);
 
 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 
+#ifndef CONFIG_EVA
 #define __invoke_copy_to_user(to, from, n)                             \
 ({                                                                     \
        register void __user *__cu_to_r __asm__("$4");                  \
@@ -711,6 +800,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
        __cu_len_r;                                                     \
 })
 
+#define __invoke_copy_to_kernel(to, from, n)                           \
+       __invoke_copy_to_user(to, from, n)
+
+#endif
+
 /*
  * __copy_to_user: - Copy a block of data into user space, with less checking.
  * @to:          Destination address, in user space.
@@ -735,7 +829,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
        might_fault();                                                  \
-       __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+       if (segment_eq(get_fs(), get_ds()))                             \
+               __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
+                                                  __cu_len);           \
+       else                                                            \
+               __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
+                                                __cu_len);             \
        __cu_len;                                                       \
 })
 
@@ -750,7 +849,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+       if (segment_eq(get_fs(), get_ds()))                             \
+               __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
+                                                  __cu_len);           \
+       else                                                            \
+               __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
+                                                __cu_len);             \
        __cu_len;                                                       \
 })
 
@@ -763,8 +867,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
-                                                   __cu_len);          \
+       if (segment_eq(get_fs(), get_ds()))                             \
+               __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,  \
+                                                             __cu_from,\
+                                                             __cu_len);\
+       else                                                            \
+               __cu_len = __invoke_copy_from_user_inatomic(__cu_to,    \
+                                                           __cu_from,  \
+                                                           __cu_len);  \
        __cu_len;                                                       \
 })
 
@@ -790,14 +900,23 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {               \
-               might_fault();                                          \
-               __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
-                                                __cu_len);             \
+       if (segment_eq(get_fs(), get_ds())) {                           \
+               __cu_len = __invoke_copy_to_kernel(__cu_to,             \
+                                                  __cu_from,           \
+                                                  __cu_len);           \
+       } else {                                                        \
+               if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
+                       might_fault();                                  \
+                       __cu_len = __invoke_copy_to_user(__cu_to,       \
+                                                        __cu_from,     \
+                                                        __cu_len);     \
+               }                                                       \
        }                                                               \
        __cu_len;                                                       \
 })
 
+#ifndef CONFIG_EVA
+
 #define __invoke_copy_from_user(to, from, n)                           \
 ({                                                                     \
        register void *__cu_to_r __asm__("$4");                         \
@@ -821,6 +940,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_len_r;                                                     \
 })
 
+#define __invoke_copy_from_kernel(to, from, n)                         \
+       __invoke_copy_from_user(to, from, n)
+
+/* For userland <-> userland operations */
+#define ___invoke_copy_in_user(to, from, n)                            \
+       __invoke_copy_from_user(to, from, n)
+
+/* For kernel <-> kernel operations */
+#define ___invoke_copy_in_kernel(to, from, n)                          \
+       __invoke_copy_from_user(to, from, n)
+
 #define __invoke_copy_from_user_inatomic(to, from, n)                  \
 ({                                                                     \
        register void *__cu_to_r __asm__("$4");                         \
@@ -844,6 +974,97 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_len_r;                                                     \
 })
 
+#define __invoke_copy_from_kernel_inatomic(to, from, n)                        \
+       __invoke_copy_from_user_inatomic(to, from, n)                   \
+
+#else
+
+/* EVA specific functions */
+
+extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
+                                      size_t __n);
+extern size_t __copy_from_user_eva(void *__to, const void *__from,
+                                  size_t __n);
+extern size_t __copy_to_user_eva(void *__to, const void *__from,
+                                size_t __n);
+extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
+
+#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)     \
+({                                                                     \
+       register void *__cu_to_r __asm__("$4");                         \
+       register const void __user *__cu_from_r __asm__("$5");          \
+       register long __cu_len_r __asm__("$6");                         \
+                                                                       \
+       __cu_to_r = (to);                                               \
+       __cu_from_r = (from);                                           \
+       __cu_len_r = (n);                                               \
+       __asm__ __volatile__(                                           \
+       ".set\tnoreorder\n\t"                                           \
+       __MODULE_JAL(func_ptr)                                          \
+       ".set\tnoat\n\t"                                                \
+       __UA_ADDU "\t$1, %1, %2\n\t"                                    \
+       ".set\tat\n\t"                                                  \
+       ".set\treorder"                                                 \
+       : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
+       :                                                               \
+       : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
+         DADDI_SCRATCH, "memory");                                     \
+       __cu_len_r;                                                     \
+})
+
+#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)       \
+({                                                                     \
+       register void *__cu_to_r __asm__("$4");                         \
+       register const void __user *__cu_from_r __asm__("$5");          \
+       register long __cu_len_r __asm__("$6");                         \
+                                                                       \
+       __cu_to_r = (to);                                               \
+       __cu_from_r = (from);                                           \
+       __cu_len_r = (n);                                               \
+       __asm__ __volatile__(                                           \
+       __MODULE_JAL(func_ptr)                                          \
+       : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
+       :                                                               \
+       : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
+         DADDI_SCRATCH, "memory");                                     \
+       __cu_len_r;                                                     \
+})
+
+/*
+ * Source or destination address is in userland. We need to go through
+ * the TLB
+ */
+#define __invoke_copy_from_user(to, from, n)                           \
+       __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
+
+#define __invoke_copy_from_user_inatomic(to, from, n)                  \
+       __invoke_copy_from_user_eva_generic(to, from, n,                \
+                                           __copy_user_inatomic_eva)
+
+#define __invoke_copy_to_user(to, from, n)                             \
+       __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
+
+#define ___invoke_copy_in_user(to, from, n)                            \
+       __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
+
+/*
+ * Source or destination address in the kernel. We are not going through
+ * the TLB
+ */
+#define __invoke_copy_from_kernel(to, from, n)                         \
+       __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
+
+#define __invoke_copy_from_kernel_inatomic(to, from, n)                        \
+       __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
+
+#define __invoke_copy_to_kernel(to, from, n)                           \
+       __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
+
+#define ___invoke_copy_in_kernel(to, from, n)                          \
+       __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
+
+#endif /* CONFIG_EVA */
+
 /*
  * __copy_from_user: - Copy a block of data from user space, with less checking.
  * @to:          Destination address, in kernel space.
@@ -901,10 +1122,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {              \
-               might_fault();                                          \
-               __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
-                                                  __cu_len);           \
+       if (segment_eq(get_fs(), get_ds())) {                           \
+               __cu_len = __invoke_copy_from_kernel(__cu_to,           \
+                                                    __cu_from,         \
+                                                    __cu_len);         \
+       } else {                                                        \
+               if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {      \
+                       might_fault();                                  \
+                       __cu_len = __invoke_copy_from_user(__cu_to,     \
+                                                          __cu_from,   \
+                                                          __cu_len);   \
+               }                                                       \
        }                                                               \
        __cu_len;                                                       \
 })
@@ -918,9 +1146,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       might_fault();                                                  \
-       __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
-                                          __cu_len);                   \
+       if (segment_eq(get_fs(), get_ds())) {                           \
+               __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
+                                                   __cu_len);          \
+       } else {                                                        \
+               might_fault();                                          \
+               __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,   \
+                                                 __cu_len);            \
+       }                                                               \
        __cu_len;                                                       \
 })
 
@@ -933,11 +1166,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&       \
-                  access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {       \
-               might_fault();                                          \
-               __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
-                                                  __cu_len);           \
+       if (segment_eq(get_fs(), get_ds())) {                           \
+               __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,  \
+                                                   __cu_len);          \
+       } else {                                                        \
+               if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
+                          access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
+                       might_fault();                                  \
+                       __cu_len = ___invoke_copy_in_user(__cu_to,      \
+                                                         __cu_from,    \
+                                                         __cu_len);    \
+               }                                                       \
        }                                                               \
        __cu_len;                                                       \
 })
@@ -1007,16 +1246,28 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
 {
        long res;
 
-       might_fault();
-       __asm__ __volatile__(
-               "move\t$4, %1\n\t"
-               "move\t$5, %2\n\t"
-               "move\t$6, %3\n\t"
-               __MODULE_JAL(__strncpy_from_user_nocheck_asm)
-               "move\t%0, $2"
-               : "=r" (res)
-               : "r" (__to), "r" (__from), "r" (__len)
-               : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+       if (segment_eq(get_fs(), get_ds())) {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, %2\n\t"
+                       "move\t$6, %3\n\t"
+                       __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (__to), "r" (__from), "r" (__len)
+                       : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+       } else {
+               might_fault();
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, %2\n\t"
+                       "move\t$6, %3\n\t"
+                       __MODULE_JAL(__strncpy_from_user_nocheck_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (__to), "r" (__from), "r" (__len)
+                       : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+       }
 
        return res;
 }
@@ -1044,16 +1295,28 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
 {
        long res;
 
-       might_fault();
-       __asm__ __volatile__(
-               "move\t$4, %1\n\t"
-               "move\t$5, %2\n\t"
-               "move\t$6, %3\n\t"
-               __MODULE_JAL(__strncpy_from_user_asm)
-               "move\t%0, $2"
-               : "=r" (res)
-               : "r" (__to), "r" (__from), "r" (__len)
-               : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+       if (segment_eq(get_fs(), get_ds())) {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, %2\n\t"
+                       "move\t$6, %3\n\t"
+                       __MODULE_JAL(__strncpy_from_kernel_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (__to), "r" (__from), "r" (__len)
+                       : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+       } else {
+               might_fault();
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, %2\n\t"
+                       "move\t$6, %3\n\t"
+                       __MODULE_JAL(__strncpy_from_user_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (__to), "r" (__from), "r" (__len)
+                       : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+       }
 
        return res;
 }
@@ -1063,14 +1326,24 @@ static inline long __strlen_user(const char __user *s)
 {
        long res;
 
-       might_fault();
-       __asm__ __volatile__(
-               "move\t$4, %1\n\t"
-               __MODULE_JAL(__strlen_user_nocheck_asm)
-               "move\t%0, $2"
-               : "=r" (res)
-               : "r" (s)
-               : "$2", "$4", __UA_t0, "$31");
+       if (segment_eq(get_fs(), get_ds())) {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       __MODULE_JAL(__strlen_kernel_nocheck_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (s)
+                       : "$2", "$4", __UA_t0, "$31");
+       } else {
+               might_fault();
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       __MODULE_JAL(__strlen_user_nocheck_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (s)
+                       : "$2", "$4", __UA_t0, "$31");
+       }
 
        return res;
 }
@@ -1093,14 +1366,24 @@ static inline long strlen_user(const char __user *s)
 {
        long res;
 
-       might_fault();
-       __asm__ __volatile__(
-               "move\t$4, %1\n\t"
-               __MODULE_JAL(__strlen_user_asm)
-               "move\t%0, $2"
-               : "=r" (res)
-               : "r" (s)
-               : "$2", "$4", __UA_t0, "$31");
+       if (segment_eq(get_fs(), get_ds())) {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       __MODULE_JAL(__strlen_kernel_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (s)
+                       : "$2", "$4", __UA_t0, "$31");
+       } else {
+               might_fault();
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       __MODULE_JAL(__strlen_kernel_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (s)
+                       : "$2", "$4", __UA_t0, "$31");
+       }
 
        return res;
 }
@@ -1110,15 +1393,26 @@ static inline long __strnlen_user(const char __user *s, long n)
 {
        long res;
 
-       might_fault();
-       __asm__ __volatile__(
-               "move\t$4, %1\n\t"
-               "move\t$5, %2\n\t"
-               __MODULE_JAL(__strnlen_user_nocheck_asm)
-               "move\t%0, $2"
-               : "=r" (res)
-               : "r" (s), "r" (n)
-               : "$2", "$4", "$5", __UA_t0, "$31");
+       if (segment_eq(get_fs(), get_ds())) {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, %2\n\t"
+                       __MODULE_JAL(__strnlen_kernel_nocheck_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (s), "r" (n)
+                       : "$2", "$4", "$5", __UA_t0, "$31");
+       } else {
+               might_fault();
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, %2\n\t"
+                       __MODULE_JAL(__strnlen_user_nocheck_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (s), "r" (n)
+                       : "$2", "$4", "$5", __UA_t0, "$31");
+       }
 
        return res;
 }
@@ -1142,14 +1436,25 @@ static inline long strnlen_user(const char __user *s, long n)
        long res;
 
        might_fault();
-       __asm__ __volatile__(
-               "move\t$4, %1\n\t"
-               "move\t$5, %2\n\t"
-               __MODULE_JAL(__strnlen_user_asm)
-               "move\t%0, $2"
-               : "=r" (res)
-               : "r" (s), "r" (n)
-               : "$2", "$4", "$5", __UA_t0, "$31");
+       if (segment_eq(get_fs(), get_ds())) {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, %2\n\t"
+                       __MODULE_JAL(__strnlen_kernel_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (s), "r" (n)
+                       : "$2", "$4", "$5", __UA_t0, "$31");
+       } else {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+                       "move\t$5, %2\n\t"
+                       __MODULE_JAL(__strnlen_user_asm)
+                       "move\t%0, $2"
+                       : "=r" (res)
+                       : "r" (s), "r" (n)
+                       : "$2", "$4", "$5", __UA_t0, "$31");
+       }
 
        return res;
 }
index f25181b19941db0544dc8e98187827343df99238..df6e775f3fef524e8d049c24433eff06dade0fd7 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (C) 1996, 2000 by Ralf Baechle
  * Copyright (C) 2006 by Thiemo Seufer
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
  */
 #ifndef _UAPI_ASM_INST_H
 #define _UAPI_ASM_INST_H
@@ -73,10 +74,16 @@ enum spec2_op {
 enum spec3_op {
        ext_op, dextm_op, dextu_op, dext_op,
        ins_op, dinsm_op, dinsu_op, dins_op,
-       lx_op = 0x0a,
-       bshfl_op = 0x20,
-       dbshfl_op = 0x24,
-       rdhwr_op = 0x3b
+       lx_op     = 0x0a, lwle_op   = 0x19,
+       lwre_op   = 0x1a, cachee_op = 0x1b,
+       sbe_op    = 0x1c, she_op    = 0x1d,
+       sce_op    = 0x1e, swe_op    = 0x1f,
+       bshfl_op  = 0x20, swle_op   = 0x21,
+       swre_op   = 0x22, prefe_op  = 0x23,
+       dbshfl_op = 0x24, lbue_op   = 0x28,
+       lhue_op   = 0x29, lbe_op    = 0x2c,
+       lhe_op    = 0x2d, lle_op    = 0x2e,
+       lwe_op    = 0x2f, rdhwr_op  = 0x3b
 };
 
 /*
@@ -592,6 +599,15 @@ struct v_format {                          /* MDMX vector format */
        ;)))))))
 };
 
+struct spec3_format {   /* SPEC3 */
+       BITFIELD_FIELD(unsigned int opcode:6,
+       BITFIELD_FIELD(unsigned int rs:5,
+       BITFIELD_FIELD(unsigned int rt:5,
+       BITFIELD_FIELD(signed int simmediate:9,
+       BITFIELD_FIELD(unsigned int func:7,
+       ;)))))
+};
+
 /*
  * microMIPS instruction formats (32-bit length)
  *
@@ -863,6 +879,7 @@ union mips_instruction {
        struct b_format b_format;
        struct ps_format ps_format;
        struct v_format v_format;
+       struct spec3_format spec3_format;
        struct fb_format fb_format;
        struct fp0_format fp0_format;
        struct mm_fp0_format mm_fp0_format;
index 6c9906f59c6e69f4a6d778064204c8764653b12a..681c17603a48d41fa463028f3cee0731b166d556 100644 (file)
 #include <linux/types.h>
 #include <asm/sgidefs.h>
 
+/* Bits which may be set in sc_used_math */
+#define USEDMATH_FP    (1 << 0)
+#define USEDMATH_MSA   (1 << 1)
+
 #if _MIPS_SIM == _MIPS_SIM_ABI32
 
 /*
@@ -37,6 +41,8 @@ struct sigcontext {
        unsigned long           sc_lo2;
        unsigned long           sc_hi3;
        unsigned long           sc_lo3;
+       unsigned long long      sc_msaregs[32]; /* Most significant 64 bits */
+       unsigned long           sc_msa_csr;
 };
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -70,6 +76,8 @@ struct sigcontext {
        __u32   sc_used_math;
        __u32   sc_dsp;
        __u32   sc_reserved;
+       __u64   sc_msaregs[32];
+       __u32   sc_msa_csr;
 };
 
 
index 26c6175e137962545086bf4d11c3119b22c315f4..277dab301cea1e85cef71437029a51f5a56550db 100644 (file)
@@ -53,6 +53,8 @@ obj-$(CONFIG_MIPS_MT_FPAFF)   += mips-mt-fpaff.o
 obj-$(CONFIG_MIPS_MT_SMTC)     += smtc.o smtc-asm.o smtc-proc.o
 obj-$(CONFIG_MIPS_MT_SMP)      += smp-mt.o
 obj-$(CONFIG_MIPS_CMP)         += smp-cmp.o
+obj-$(CONFIG_MIPS_CPS)         += smp-cps.o cps-vec.o
+obj-$(CONFIG_MIPS_GIC_IPI)     += smp-gic.o
 obj-$(CONFIG_CPU_MIPSR2)       += spram.o
 
 obj-$(CONFIG_MIPS_VPE_LOADER)  += vpe.o
@@ -102,6 +104,9 @@ obj-$(CONFIG_HW_PERF_EVENTS)        += perf_event_mipsxx.o
 
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o
 
+obj-$(CONFIG_MIPS_CM)          += mips-cm.o
+obj-$(CONFIG_MIPS_CPC)         += mips-cpc.o
+
 #
 # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
 # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
index 0c2e853c3db46d72d81766e5f009ad627e41bac7..0ea75c244b487848c36c168b4f2f9c7dad4822a5 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/suspend.h>
 #include <asm/ptrace.h>
 #include <asm/processor.h>
+#include <asm/smp-cps.h>
 
 #include <linux/kvm_host.h>
 
@@ -168,6 +169,72 @@ void output_thread_fpu_defines(void)
        OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
        OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
 
+       /* the least significant 64 bits of each FP register */
+       OFFSET(THREAD_FPR0_LS64, task_struct,
+              thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR1_LS64, task_struct,
+              thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR2_LS64, task_struct,
+              thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR3_LS64, task_struct,
+              thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR4_LS64, task_struct,
+              thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR5_LS64, task_struct,
+              thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR6_LS64, task_struct,
+              thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR7_LS64, task_struct,
+              thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR8_LS64, task_struct,
+              thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR9_LS64, task_struct,
+              thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR10_LS64, task_struct,
+              thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR11_LS64, task_struct,
+              thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR12_LS64, task_struct,
+              thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR13_LS64, task_struct,
+              thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR14_LS64, task_struct,
+              thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR15_LS64, task_struct,
+              thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR16_LS64, task_struct,
+              thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR17_LS64, task_struct,
+              thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR18_LS64, task_struct,
+              thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR19_LS64, task_struct,
+              thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR20_LS64, task_struct,
+              thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR21_LS64, task_struct,
+              thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR22_LS64, task_struct,
+              thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR23_LS64, task_struct,
+              thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR24_LS64, task_struct,
+              thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR25_LS64, task_struct,
+              thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR26_LS64, task_struct,
+              thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR27_LS64, task_struct,
+              thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR28_LS64, task_struct,
+              thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR29_LS64, task_struct,
+              thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR30_LS64, task_struct,
+              thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
+       OFFSET(THREAD_FPR31_LS64, task_struct,
+              thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
+
        OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
        BLANK();
 }
@@ -228,6 +295,7 @@ void output_sc_defines(void)
        OFFSET(SC_LO2, sigcontext, sc_lo2);
        OFFSET(SC_HI3, sigcontext, sc_hi3);
        OFFSET(SC_LO3, sigcontext, sc_lo3);
+       OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
        BLANK();
 }
 #endif
@@ -242,6 +310,7 @@ void output_sc_defines(void)
        OFFSET(SC_MDLO, sigcontext, sc_mdlo);
        OFFSET(SC_PC, sigcontext, sc_pc);
        OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+       OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
        BLANK();
 }
 #endif
@@ -253,6 +322,7 @@ void output_sc32_defines(void)
        OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
        OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
        OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
+       OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
        BLANK();
 }
 #endif
@@ -397,3 +467,15 @@ void output_kvm_defines(void)
        OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
        BLANK();
 }
+
+#ifdef CONFIG_MIPS_CPS
+void output_cps_defines(void)
+{
+       COMMENT(" MIPS CPS offsets. ");
+       OFFSET(BOOTCFG_CORE, boot_config, core);
+       OFFSET(BOOTCFG_VPE, boot_config, vpe);
+       OFFSET(BOOTCFG_PC, boot_config, pc);
+       OFFSET(BOOTCFG_SP, boot_config, sp);
+       OFFSET(BOOTCFG_GP, boot_config, gp);
+}
+#endif
index a5bf73d22fcc378ae9dfa4648cb45ed9249f3c09..290c23b516789ba16193f7b72fa61eafbf7907b2 100644 (file)
@@ -122,7 +122,7 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
        jr      k0
 
        RESTORE_ALL
-       .set    mips3
+       .set    arch=r4000
        eret
 
 /***********************************************************************
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
new file mode 100644 (file)
index 0000000..f7a46db
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/mipsregs.h>
+
+#define GCR_CL_COHERENCE_OFS 0x2008
+
+.section .text.cps-vec
+.balign 0x1000
+.set noreorder
+
+LEAF(mips_cps_core_entry)
+       /*
+        * These first 8 bytes will be patched by cps_smp_setup to load the
+        * base address of the CM GCRs into register v1.
+        */
+       .quad   0
+
+       /* Check whether we're here due to an NMI */
+       mfc0    k0, CP0_STATUS
+       and     k0, k0, ST0_NMI
+       beqz    k0, not_nmi
+        nop
+
+       /* This is an NMI */
+       la      k0, nmi_handler
+       jr      k0
+        nop
+
+not_nmi:
+       /* Setup Cause */
+       li      t0, CAUSEF_IV
+       mtc0    t0, CP0_CAUSE
+
+       /* Setup Status */
+       li      t0, ST0_CU1 | ST0_CU0
+       mtc0    t0, CP0_STATUS
+
+       /*
+        * Clear the bits used to index the caches. Note that the architecture
+        * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
+        * be valid for all MIPS32 CPUs, even those for which said writes are
+        * unnecessary.
+        */
+       mtc0    zero, CP0_TAGLO, 0
+       mtc0    zero, CP0_TAGHI, 0
+       mtc0    zero, CP0_TAGLO, 2
+       mtc0    zero, CP0_TAGHI, 2
+       ehb
+
+       /* Primary cache configuration is indicated by Config1 */
+       mfc0    v0, CP0_CONFIG, 1
+
+       /* Detect I-cache line size */
+       _EXT    t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
+       beqz    t0, icache_done
+        li     t1, 2
+       sllv    t0, t1, t0
+
+       /* Detect I-cache size */
+       _EXT    t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
+       xori    t2, t1, 0x7
+       beqz    t2, 1f
+        li     t3, 32
+       addi    t1, t1, 1
+       sllv    t1, t3, t1
+1:     /* At this point t1 == I-cache sets per way */
+       _EXT    t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+       addi    t2, t2, 1
+       mul     t1, t1, t0
+       mul     t1, t1, t2
+
+       li      a0, KSEG0
+       add     a1, a0, t1
+1:     cache   Index_Store_Tag_I, 0(a0)
+       add     a0, a0, t0
+       bne     a0, a1, 1b
+        nop
+icache_done:
+
+       /* Detect D-cache line size */
+       _EXT    t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
+       beqz    t0, dcache_done
+        li     t1, 2
+       sllv    t0, t1, t0
+
+       /* Detect D-cache size */
+       _EXT    t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
+       xori    t2, t1, 0x7
+       beqz    t2, 1f
+        li     t3, 32
+       addi    t1, t1, 1
+       sllv    t1, t3, t1
+1:     /* At this point t1 == D-cache sets per way */
+       _EXT    t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+       addi    t2, t2, 1
+       mul     t1, t1, t0
+       mul     t1, t1, t2
+
+       li      a0, KSEG0
+       addu    a1, a0, t1
+       subu    a1, a1, t0
+1:     cache   Index_Store_Tag_D, 0(a0)
+       bne     a0, a1, 1b
+        add    a0, a0, t0
+dcache_done:
+
+       /* Set Kseg0 cacheable, coherent, write-back, write-allocate */
+       mfc0    t0, CP0_CONFIG
+       ori     t0, 0x7
+       xori    t0, 0x2
+       mtc0    t0, CP0_CONFIG
+       ehb
+
+       /* Enter the coherent domain */
+       li      t0, 0xff
+       sw      t0, GCR_CL_COHERENCE_OFS(v1)
+       ehb
+
+       /* Jump to kseg0 */
+       la      t0, 1f
+       jr      t0
+        nop
+
+1:     /* We're up, cached & coherent */
+
+       /*
+        * TODO: We should check the VPE number we intended to boot here, and
+        *       if non-zero we should start that VPE and stop this one. For
+        *       the moment this doesn't matter since CPUs are brought up
+        *       sequentially and in order, but once hotplug is implemented
+        *       this will need revisiting.
+        */
+
+       /* Off we go! */
+       la      t0, mips_cps_bootcfg
+       lw      t1, BOOTCFG_PC(t0)
+       lw      gp, BOOTCFG_GP(t0)
+       lw      sp, BOOTCFG_SP(t0)
+       jr      t1
+        nop
+       END(mips_cps_core_entry)
+
+.org 0x200
+LEAF(excep_tlbfill)
+       b       .
+        nop
+       END(excep_tlbfill)
+
+.org 0x280
+LEAF(excep_xtlbfill)
+       b       .
+        nop
+       END(excep_xtlbfill)
+
+.org 0x300
+LEAF(excep_cache)
+       b       .
+        nop
+       END(excep_cache)
+
+.org 0x380
+LEAF(excep_genex)
+       b       .
+        nop
+       END(excep_genex)
+
+.org 0x400
+LEAF(excep_intex)
+       b       .
+        nop
+       END(excep_intex)
+
+.org 0x480
+LEAF(excep_ejtag)
+       la      k0, ejtag_debug_handler
+       jr      k0
+        nop
+       END(excep_ejtag)
index 530f832de02c20165c6d36a6a63f5db8b2603f99..6e8fb85ce7c3b076fd6bec6b2b770ed3b425c597 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/cpu-type.h>
 #include <asm/fpu.h>
 #include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/msa.h>
 #include <asm/watch.h>
 #include <asm/elf.h>
 #include <asm/spram.h>
@@ -126,6 +128,20 @@ static inline int __cpu_has_fpu(void)
        return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
 }
 
+static inline unsigned long cpu_get_msa_id(void)
+{
+       unsigned long status, conf5, msa_id;
+
+       status = read_c0_status();
+       __enable_fpu(FPU_64BIT);
+       conf5 = read_c0_config5();
+       enable_msa();
+       msa_id = read_msa_ir();
+       write_c0_config5(conf5);
+       write_c0_status(status);
+       return msa_id;
+}
+
 static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
 {
 #ifdef __NEED_VMBITS_PROBE
@@ -166,11 +182,12 @@ static char unknown_isa[] = KERN_ERR \
 static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 {
        unsigned int config6;
-       /*
-        * Config6 is implementation dependent and it's currently only
-        * used by proAptiv
-        */
-       if (c->cputype == CPU_PROAPTIV) {
+
+       /* It's implementation dependent how the FTLB can be enabled */
+       switch (c->cputype) {
+       case CPU_PROAPTIV:
+       case CPU_P5600:
+               /* proAptiv & related cores use Config6 to enable the FTLB */
                config6 = read_c0_config6();
                if (enable)
                        /* Enable FTLB */
@@ -179,6 +196,7 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
                        /* Disable FTLB */
                        write_c0_config6(config6 &  ~MIPS_CONF6_FTLBEN);
                back_to_back_c0_hazard();
+               break;
        }
 }
 
@@ -301,6 +319,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
                c->ases |= MIPS_ASE_VZ;
        if (config3 & MIPS_CONF3_SC)
                c->options |= MIPS_CPU_SEGMENTS;
+       if (config3 & MIPS_CONF3_MSA)
+               c->ases |= MIPS_ASE_MSA;
 
        return config3 & MIPS_CONF_M;
 }
@@ -367,6 +387,9 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
        config5 &= ~MIPS_CONF5_UFR;
        write_c0_config5(config5);
 
+       if (config5 & MIPS_CONF5_EVA)
+               c->options |= MIPS_CPU_EVA;
+
        return config5 & MIPS_CONF_M;
 }
 
@@ -398,8 +421,13 @@ static void decode_configs(struct cpuinfo_mips *c)
 
        mips_probe_watch_registers(c);
 
-       if (cpu_has_mips_r2)
+#ifndef CONFIG_MIPS_CPS
+       if (cpu_has_mips_r2) {
                c->core = read_c0_ebase() & 0x3ff;
+               if (cpu_has_mipsmt)
+                       c->core >>= fls(core_nvpes()) - 1;
+       }
+#endif
 }
 
 #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
@@ -710,17 +738,23 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                             MIPS_CPU_LLSC;
                c->tlbsize = 64;
                break;
-       case PRID_IMP_LOONGSON2:
-               c->cputype = CPU_LOONGSON2;
-               __cpu_name[cpu] = "ICT Loongson-2";
-
+       case PRID_IMP_LOONGSON_64:  /* Loongson-2/3 */
                switch (c->processor_id & PRID_REV_MASK) {
                case PRID_REV_LOONGSON2E:
+                       c->cputype = CPU_LOONGSON2;
+                       __cpu_name[cpu] = "ICT Loongson-2";
                        set_elf_platform(cpu, "loongson2e");
                        break;
                case PRID_REV_LOONGSON2F:
+                       c->cputype = CPU_LOONGSON2;
+                       __cpu_name[cpu] = "ICT Loongson-2";
                        set_elf_platform(cpu, "loongson2f");
                        break;
+               case PRID_REV_LOONGSON3A:
+                       c->cputype = CPU_LOONGSON3;
+                       __cpu_name[cpu] = "ICT Loongson-3";
+                       set_elf_platform(cpu, "loongson3a");
+                       break;
                }
 
                set_isa(c, MIPS_CPU_ISA_III);
@@ -729,7 +763,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                             MIPS_CPU_32FPR;
                c->tlbsize = 64;
                break;
-       case PRID_IMP_LOONGSON1:
+       case PRID_IMP_LOONGSON_32:  /* Loongson-1 */
                decode_configs(c);
 
                c->cputype = CPU_LOONGSON1;
@@ -806,7 +840,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "MIPS 1004Kc";
                break;
        case PRID_IMP_1074K:
-               c->cputype = CPU_74K;
+               c->cputype = CPU_1074K;
                __cpu_name[cpu] = "MIPS 1074Kc";
                break;
        case PRID_IMP_INTERAPTIV_UP:
@@ -825,6 +859,14 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_PROAPTIV;
                __cpu_name[cpu] = "MIPS proAptiv (multi)";
                break;
+       case PRID_IMP_P5600:
+               c->cputype = CPU_P5600;
+               __cpu_name[cpu] = "MIPS P5600";
+               break;
+       case PRID_IMP_M5150:
+               c->cputype = CPU_M5150;
+               __cpu_name[cpu] = "MIPS M5150";
+               break;
        }
 
        decode_configs(c);
@@ -1176,6 +1218,12 @@ void cpu_probe(void)
        else
                c->srsets = 1;
 
+       if (cpu_has_msa) {
+               c->msa_id = cpu_get_msa_id();
+               WARN(c->msa_id & MSA_IR_WRPF,
+                    "Vector register partitioning unimplemented!");
+       }
+
        cpu_probe_vmbits(c);
 
 #ifdef CONFIG_64BIT
@@ -1192,4 +1240,6 @@ void cpu_report(void)
                smp_processor_id(), c->processor_id, cpu_name_string());
        if (c->options & MIPS_CPU_FPU)
                printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
+       if (cpu_has_msa)
+               pr_info("MSA revision is: %08x\n", c->msa_id);
 }
index 374ed74cd516d91e27638ce12e4c3c731a191ef4..74fe73506d8f9ce8a1eb5f3536df282829f02c7e 100644 (file)
@@ -90,6 +90,7 @@ static inline void ftrace_dyn_arch_init_insns(void)
 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 {
        int faulted;
+       mm_segment_t old_fs;
 
        /* *(unsigned int *)ip = new_code; */
        safe_store_code(new_code, ip, faulted);
@@ -97,7 +98,10 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
        if (unlikely(faulted))
                return -EFAULT;
 
+       old_fs = get_fs();
+       set_fs(get_ds());
        flush_icache_range(ip, ip + 8);
+       set_fs(old_fs);
 
        return 0;
 }
index d84f6a5095023ea5ff1bd052822937b8f87ac5dc..a9ce3408be258ce612977127ad72c79542b3a985 100644 (file)
@@ -67,7 +67,7 @@ NESTED(except_vec3_generic, 0, sp)
  */
 NESTED(except_vec3_r4000, 0, sp)
        .set    push
-       .set    mips3
+       .set    arch=r4000
        .set    noat
        mfc0    k1, CP0_CAUSE
        li      k0, 31<<2
@@ -139,7 +139,7 @@ LEAF(__r4k_wait)
        nop
        nop
 #endif
-       .set    mips3
+       .set    arch=r4000
        wait
        /* end of rollback region (the region size must be power of two) */
 1:
@@ -475,8 +475,10 @@ NESTED(nmi_handler, PT_SIZE, sp)
        BUILD_HANDLER cpu cpu sti silent                /* #11 */
        BUILD_HANDLER ov ov sti silent                  /* #12 */
        BUILD_HANDLER tr tr sti silent                  /* #13 */
+       BUILD_HANDLER msa_fpe msa_fpe sti silent        /* #14 */
        BUILD_HANDLER fpe fpe fpe silent                /* #15 */
        BUILD_HANDLER ftlb ftlb none silent             /* #16 */
+       BUILD_HANDLER msa msa sti silent                /* #21 */
        BUILD_HANDLER mdmx mdmx sti silent              /* #22 */
 #ifdef CONFIG_HARDWARE_WATCHPOINTS
        /*
@@ -575,7 +577,7 @@ isrdhwr:
        ori     k1, _THREAD_MASK
        xori    k1, _THREAD_MASK
        LONG_L  v1, TI_TP_VALUE(k1)
-       .set    mips3
+       .set    arch=r4000
        eret
        .set    mips0
 #endif
index 7b6a5b3e3acf6af099f8fe30a980cc43ac120ace..e712dcf18b2de22bbc6c14d9fbf84f37e9542215 100644 (file)
@@ -175,8 +175,8 @@ NESTED(smp_bootstrap, 16, sp)
        DMT     10      # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
        jal     mips_ihb
 #endif /* CONFIG_MIPS_MT_SMTC */
-       setup_c0_status_sec
        smp_slave_setup
+       setup_c0_status_sec
 #ifdef CONFIG_MIPS_MT_SMTC
        andi    t2, t2, VPECONTROL_TE
        beqz    t2, 2f
index 3553243bf9d660f0f7ec69941462990ac7ee55b7..837ff27950bcb7018f8130ab5700a7c3522fe7ab 100644 (file)
@@ -64,7 +64,7 @@ void r4k_wait_irqoff(void)
        if (!need_resched())
                __asm__(
                "       .set    push            \n"
-               "       .set    mips3           \n"
+               "       .set    arch=r4000      \n"
                "       wait                    \n"
                "       .set    pop             \n");
        local_irq_enable();
@@ -82,7 +82,7 @@ static void rm7k_wait_irqoff(void)
        if (!need_resched())
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "       .set    noat                                    \n"
                "       mfc0    $1, $12                                 \n"
                "       sync                                            \n"
@@ -103,7 +103,7 @@ static void au1k_wait(void)
        unsigned long c0status = read_c0_status() | 1;  /* irqs on */
 
        __asm__(
-       "       .set    mips3                   \n"
+       "       .set    arch=r4000                      \n"
        "       cache   0x14, 0(%0)             \n"
        "       cache   0x14, 32(%0)            \n"
        "       sync                            \n"
@@ -184,8 +184,11 @@ void __init check_wait(void)
        case CPU_24K:
        case CPU_34K:
        case CPU_1004K:
+       case CPU_1074K:
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
+       case CPU_P5600:
+       case CPU_M5150:
                cpu_wait = r4k_wait;
                if (read_c0_config7() & MIPS_CONF7_WII)
                        cpu_wait = r4k_wait_irqoff;
index 5b5ddb231f26927bd99ba469015b1657baea903a..8520dad6d4e3c069d19926c04963c4472d4cc9e9 100644 (file)
@@ -16,7 +16,6 @@
 #include <asm/gic.h>
 #include <asm/setup.h>
 #include <asm/traps.h>
-#include <asm/gcmpregs.h>
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
 
index fcaac2f132f08e850e1a4088788e6327c2ce4f64..7afcc2f22c0dbeb724090c6bccecc1bfff90c510 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/cacheflush.h>
 #include <asm/processor.h>
 #include <asm/sigcontext.h>
+#include <asm/uaccess.h>
 
 static struct hard_trap_info {
        unsigned char tt;       /* Trap type code for MIPS R3xxx and R4xxx */
@@ -208,7 +209,14 @@ void arch_kgdb_breakpoint(void)
 
 static void kgdb_call_nmi_hook(void *ignored)
 {
+       mm_segment_t old_fs;
+
+       old_fs = get_fs();
+       set_fs(get_ds());
+
        kgdb_nmicallback(raw_smp_processor_id(), NULL);
+
+       set_fs(old_fs);
 }
 
 void kgdb_roundup_cpus(unsigned long flags)
@@ -282,6 +290,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        struct die_args *args = (struct die_args *)ptr;
        struct pt_regs *regs = args->regs;
        int trap = (regs->cp0_cause & 0x7c) >> 2;
+       mm_segment_t old_fs;
 
 #ifdef CONFIG_KPROBES
        /*
@@ -296,11 +305,17 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        if (user_mode(regs))
                return NOTIFY_DONE;
 
+       /* Kernel mode. Set correct address limit */
+       old_fs = get_fs();
+       set_fs(get_ds());
+
        if (atomic_read(&kgdb_active) != -1)
                kgdb_nmicallback(smp_processor_id(), regs);
 
-       if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
+       if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
+               set_fs(old_fs);
                return NOTIFY_DONE;
+       }
 
        if (atomic_read(&kgdb_setting_breakpoint))
                if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
@@ -310,6 +325,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        local_irq_enable();
        __flush_cache_all();
 
+       set_fs(old_fs);
        return NOTIFY_STOP;
 }
 
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
new file mode 100644 (file)
index 0000000..f76f7a0
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+
+#include <asm/mips-cm.h>
+#include <asm/mipsregs.h>
+
+void __iomem *mips_cm_base;
+void __iomem *mips_cm_l2sync_base;
+
+phys_t __mips_cm_phys_base(void)
+{
+       u32 config3 = read_c0_config3();
+       u32 cmgcr;
+
+       /* Check the CMGCRBase register is implemented */
+       if (!(config3 & MIPS_CONF3_CMGCR))
+               return 0;
+
+       /* Read the address from CMGCRBase */
+       cmgcr = read_c0_cmgcrbase();
+       return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
+}
+
+phys_t mips_cm_phys_base(void)
+       __attribute__((weak, alias("__mips_cm_phys_base")));
+
+phys_t __mips_cm_l2sync_phys_base(void)
+{
+       u32 base_reg;
+
+       /*
+        * If the L2-only sync region is already enabled then leave it at it's
+        * current location.
+        */
+       base_reg = read_gcr_l2_only_sync_base();
+       if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK)
+               return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK;
+
+       /* Default to following the CM */
+       return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
+}
+
+phys_t mips_cm_l2sync_phys_base(void)
+       __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
+
+static void mips_cm_probe_l2sync(void)
+{
+       unsigned major_rev;
+       phys_t addr;
+
+       /* L2-only sync was introduced with CM major revision 6 */
+       major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
+               CM_GCR_REV_MAJOR_SHF;
+       if (major_rev < 6)
+               return;
+
+       /* Find a location for the L2 sync region */
+       addr = mips_cm_l2sync_phys_base();
+       BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr);
+       if (!addr)
+               return;
+
+       /* Set the region base address & enable it */
+       write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK);
+
+       /* Map the region */
+       mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
+}
+
+int mips_cm_probe(void)
+{
+       phys_t addr;
+       u32 base_reg;
+
+       addr = mips_cm_phys_base();
+       BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
+       if (!addr)
+               return -ENODEV;
+
+       mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
+       if (!mips_cm_base)
+               return -ENXIO;
+
+       /* sanity check that we're looking at a CM */
+       base_reg = read_gcr_base();
+       if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) {
+               pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
+                      (unsigned long)addr);
+               mips_cm_base = NULL;
+               return -ENODEV;
+       }
+
+       /* set default target to memory */
+       base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK;
+       base_reg |= CM_GCR_BASE_CMDEFTGT_MEM;
+       write_gcr_base(base_reg);
+
+       /* disable CM regions */
+       write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+       write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+       write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+       write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+       write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+       write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+       write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+       write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+
+       /* probe for an L2-only sync region */
+       mips_cm_probe_l2sync();
+
+       return 0;
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
new file mode 100644 (file)
index 0000000..c9dc674
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+
+void __iomem *mips_cpc_base;
+
+phys_t __weak mips_cpc_phys_base(void)
+{
+       u32 cpc_base;
+
+       if (!mips_cm_present())
+               return 0;
+
+       if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
+               return 0;
+
+       /* If the CPC is already enabled, leave it so */
+       cpc_base = read_gcr_cpc_base();
+       if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
+               return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
+
+       /* Otherwise, give it the default address & enable it */
+       cpc_base = mips_cpc_default_phys_base();
+       write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
+       return cpc_base;
+}
+
+int mips_cpc_probe(void)
+{
+       phys_t addr;
+
+       addr = mips_cpc_phys_base();
+       if (!addr)
+               return -ENODEV;
+
+       mips_cpc_base = ioremap_nocache(addr, 0x8000);
+       if (!mips_cpc_base)
+               return -ENXIO;
+
+       return 0;
+}
index 6e58e97fcd39bb09581d39030a38940cc9a4f848..2607c3a4ff7e9348f9c197884bd75bbe05fee76c 100644 (file)
 #include <asm/ftrace.h>
 
 extern void *__bzero(void *__s, size_t __count);
+extern long __strncpy_from_kernel_nocheck_asm(char *__to,
+                                             const char *__from, long __len);
+extern long __strncpy_from_kernel_asm(char *__to, const char *__from,
+                                     long __len);
 extern long __strncpy_from_user_nocheck_asm(char *__to,
                                            const char *__from, long __len);
 extern long __strncpy_from_user_asm(char *__to, const char *__from,
                                    long __len);
+extern long __strlen_kernel_nocheck_asm(const char *s);
+extern long __strlen_kernel_asm(const char *s);
 extern long __strlen_user_nocheck_asm(const char *s);
 extern long __strlen_user_asm(const char *s);
+extern long __strnlen_kernel_nocheck_asm(const char *s);
+extern long __strnlen_kernel_asm(const char *s);
 extern long __strnlen_user_nocheck_asm(const char *s);
 extern long __strnlen_user_asm(const char *s);
 
@@ -43,17 +51,31 @@ EXPORT_SYMBOL(copy_page);
  */
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(__copy_user_inatomic);
+#ifdef CONFIG_EVA
+EXPORT_SYMBOL(__copy_from_user_eva);
+EXPORT_SYMBOL(__copy_in_user_eva);
+EXPORT_SYMBOL(__copy_to_user_eva);
+EXPORT_SYMBOL(__copy_user_inatomic_eva);
+#endif
 EXPORT_SYMBOL(__bzero);
+EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strncpy_from_kernel_asm);
 EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
 EXPORT_SYMBOL(__strncpy_from_user_asm);
+EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strlen_kernel_asm);
 EXPORT_SYMBOL(__strlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strlen_user_asm);
+EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strnlen_kernel_asm);
 EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strnlen_user_asm);
 
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_partial_copy_user);
+EXPORT_SYMBOL(__csum_partial_copy_kernel);
+EXPORT_SYMBOL(__csum_partial_copy_to_user);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
 
 EXPORT_SYMBOL(invalid_pte_table);
 #ifdef CONFIG_FUNCTION_TRACER
index 24cdf64789c39dcf8be4a4c214b02540aa02370b..4f2d9dece7abf7917dc1fd0ded97e81bb57a0572 100644 (file)
@@ -805,7 +805,7 @@ static void reset_counters(void *arg)
        }
 }
 
-/* 24K/34K/1004K cores can share the same event map. */
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
 static const struct mips_perf_event mipsxxcore_event_map
                                [PERF_COUNT_HW_MAX] = {
        [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
@@ -814,8 +814,8 @@ static const struct mips_perf_event mipsxxcore_event_map
        [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
 };
 
-/* 74K core has different branch event code. */
-static const struct mips_perf_event mipsxx74Kcore_event_map
+/* 74K/proAptiv core has different branch event code. */
+static const struct mips_perf_event mipsxxcore_event_map2
                                [PERF_COUNT_HW_MAX] = {
        [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
        [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
@@ -849,7 +849,7 @@ static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
        [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
 };
 
-/* 24K/34K/1004K cores can share the same cache event map. */
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
 static const struct mips_perf_event mipsxxcore_cache_map
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -930,8 +930,8 @@ static const struct mips_perf_event mipsxxcore_cache_map
 },
 };
 
-/* 74K core has completely different cache event map. */
-static const struct mips_perf_event mipsxx74Kcore_cache_map
+/* 74K/proAptiv core has completely different cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map2
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -978,6 +978,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
                [C(RESULT_MISS)]        = { 0x1d, CNTR_EVEN, P },
        },
 },
+/*
+ * 74K core does not have specific DTLB events. proAptiv core has
+ * "speculative" DTLB events which are numbered 0x63 (even/odd) and
+ * not included here. One can use raw events if really needed.
+ */
 [C(ITLB)] = {
        [C(OP_READ)] = {
                [C(RESULT_ACCESS)]      = { 0x04, CNTR_EVEN, T },
@@ -1378,6 +1383,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
 #define IS_BOTH_COUNTERS_74K_EVENT(b)                                  \
        ((b) == 0 || (b) == 1)
 
+/* proAptiv */
+#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)                             \
+       ((b) == 0 || (b) == 1)
+
 /* 1004K */
 #define IS_BOTH_COUNTERS_1004K_EVENT(b)                                        \
        ((b) == 0 || (b) == 1 || (b) == 11)
@@ -1391,6 +1400,20 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
 #define IS_RANGE_V_1004K_EVENT(r)      ((r) == 47)
 #endif
 
+/* interAptiv */
+#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)                           \
+       ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
+#define IS_RANGE_P_INTERAPTIV_EVENT(r, b)                              \
+       ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||             \
+        (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||            \
+        (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&         \
+        (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||          \
+        ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
+#endif
+
 /* BMIPS5000 */
 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)                            \
        ((b) == 0 || (b) == 1)
@@ -1442,6 +1465,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
 #endif
                break;
        case CPU_74K:
+       case CPU_1074K:
                if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
                        raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
                else
@@ -1449,6 +1473,16 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
                                raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 #ifdef CONFIG_MIPS_MT_SMP
                raw_event.range = P;
+#endif
+               break;
+       case CPU_PROAPTIV:
+               if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
+                       raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+               else
+                       raw_event.cntr_mask =
+                               raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+               raw_event.range = P;
 #endif
                break;
        case CPU_1004K:
@@ -1464,6 +1498,21 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
                        raw_event.range = V;
                else
                        raw_event.range = T;
+#endif
+               break;
+       case CPU_INTERAPTIV:
+               if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
+                       raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+               else
+                       raw_event.cntr_mask =
+                               raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+               if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
+                       raw_event.range = P;
+               else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
+                       raw_event.range = V;
+               else
+                       raw_event.range = T;
 #endif
                break;
        case CPU_BMIPS5000:
@@ -1576,14 +1625,29 @@ init_hw_perf_events(void)
                break;
        case CPU_74K:
                mipspmu.name = "mips/74K";
-               mipspmu.general_event_map = &mipsxx74Kcore_event_map;
-               mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
+               mipspmu.general_event_map = &mipsxxcore_event_map2;
+               mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+               break;
+       case CPU_PROAPTIV:
+               mipspmu.name = "mips/proAptiv";
+               mipspmu.general_event_map = &mipsxxcore_event_map2;
+               mipspmu.cache_event_map = &mipsxxcore_cache_map2;
                break;
        case CPU_1004K:
                mipspmu.name = "mips/1004K";
                mipspmu.general_event_map = &mipsxxcore_event_map;
                mipspmu.cache_event_map = &mipsxxcore_cache_map;
                break;
+       case CPU_1074K:
+               mipspmu.name = "mips/1074K";
+               mipspmu.general_event_map = &mipsxxcore_event_map;
+               mipspmu.cache_event_map = &mipsxxcore_cache_map;
+               break;
+       case CPU_INTERAPTIV:
+               mipspmu.name = "mips/interAptiv";
+               mipspmu.general_event_map = &mipsxxcore_event_map;
+               mipspmu.cache_event_map = &mipsxxcore_cache_map;
+               break;
        case CPU_LOONGSON1:
                mipspmu.name = "mips/loongson1";
                mipspmu.general_event_map = &mipsxxcore_event_map;
index 00d20974b3e7b2bde15969c78402dbea65793edc..e40971b51d2f0bf47e3eb217f43c652c732e7a09 100644 (file)
 
 unsigned int vced_count, vcei_count;
 
+/*
+ *  * No lock; only written during early bootup by CPU 0.
+ *   */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+       return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+       return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
+       struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
        unsigned long n = (unsigned long) v - 1;
        unsigned int version = cpu_data[n].processor_id;
        unsigned int fp_vers = cpu_data[n].fpu_id;
@@ -95,6 +111,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_mipsmt)     seq_printf(m, "%s", " mt");
        if (cpu_has_mmips)      seq_printf(m, "%s", " micromips");
        if (cpu_has_vz)         seq_printf(m, "%s", " vz");
+       if (cpu_has_msa)        seq_printf(m, "%s", " msa");
+       if (cpu_has_eva)        seq_printf(m, "%s", " eva");
        seq_printf(m, "\n");
 
        if (cpu_has_mmips) {
@@ -118,6 +136,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                      cpu_has_vce ? "%u" : "not available");
        seq_printf(m, fmt, 'D', vced_count);
        seq_printf(m, fmt, 'I', vcei_count);
+
+       proc_cpuinfo_notifier_args.m = m;
+       proc_cpuinfo_notifier_args.n = n;
+
+       raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+                               &proc_cpuinfo_notifier_args);
+
        seq_printf(m, "\n");
 
        return 0;
index 6ae540e133b2aa592a029a131f541a6619aaaceb..60e39dc7f1eb1f5eb72b2e335aa909471684fe0b 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/cpu.h>
 #include <asm/dsp.h>
 #include <asm/fpu.h>
+#include <asm/msa.h>
 #include <asm/pgtable.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
@@ -65,6 +66,8 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
        clear_used_math();
        clear_fpu_owner();
        init_dsp();
+       clear_thread_flag(TIF_MSA_CTX_LIVE);
+       disable_msa();
        regs->cp0_epc = pc;
        regs->regs[29] = sp;
 }
@@ -89,7 +92,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 
        preempt_disable();
 
-       if (is_fpu_owner())
+       if (is_msa_enabled())
+               save_msa(p);
+       else if (is_fpu_owner())
                save_fp(p);
 
        if (cpu_has_dsp)
@@ -157,7 +162,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 /* Fill in the fpu structure for a core dump.. */
 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
 {
-       memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
+       int i;
+
+       for (i = 0; i < NUM_FPU_REGS; i++)
+               memcpy(&r[i], &current->thread.fpu.fpr[i], sizeof(*r));
+
+       memcpy(&r[NUM_FPU_REGS], &current->thread.fpu.fcr31,
+              sizeof(current->thread.fpu.fcr31));
 
        return 1;
 }
@@ -192,7 +203,13 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 
 int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
 {
-       memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
+       int i;
+
+       for (i = 0; i < NUM_FPU_REGS; i++)
+               memcpy(&fpr[i], &t->thread.fpu.fpr[i], sizeof(*fpr));
+
+       memcpy(&fpr[NUM_FPU_REGS], &t->thread.fpu.fcr31,
+              sizeof(t->thread.fpu.fcr31));
 
        return 1;
 }
index 7da9b76db4d9719157d112044a81a23d44063261..7271e5a8308165c901a5c4e248f3b7c02225239f 100644 (file)
@@ -114,51 +114,30 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
 {
        int i;
-       unsigned int tmp;
 
        if (!access_ok(VERIFY_WRITE, data, 33 * 8))
                return -EIO;
 
        if (tsk_used_math(child)) {
-               fpureg_t *fregs = get_fpu_regs(child);
+               union fpureg *fregs = get_fpu_regs(child);
                for (i = 0; i < 32; i++)
-                       __put_user(fregs[i], i + (__u64 __user *) data);
+                       __put_user(get_fpr64(&fregs[i], 0),
+                                  i + (__u64 __user *)data);
        } else {
                for (i = 0; i < 32; i++)
                        __put_user((__u64) -1, i + (__u64 __user *) data);
        }
 
        __put_user(child->thread.fpu.fcr31, data + 64);
-
-       preempt_disable();
-       if (cpu_has_fpu) {
-               unsigned int flags;
-
-               if (cpu_has_mipsmt) {
-                       unsigned int vpflags = dvpe();
-                       flags = read_c0_status();
-                       __enable_fpu(FPU_AS_IS);
-                       __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
-                       write_c0_status(flags);
-                       evpe(vpflags);
-               } else {
-                       flags = read_c0_status();
-                       __enable_fpu(FPU_AS_IS);
-                       __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
-                       write_c0_status(flags);
-               }
-       } else {
-               tmp = 0;
-       }
-       preempt_enable();
-       __put_user(tmp, data + 65);
+       __put_user(current_cpu_data.fpu_id, data + 65);
 
        return 0;
 }
 
 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 {
-       fpureg_t *fregs;
+       union fpureg *fregs;
+       u64 fpr_val;
        int i;
 
        if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -166,8 +145,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 
        fregs = get_fpu_regs(child);
 
-       for (i = 0; i < 32; i++)
-               __get_user(fregs[i], i + (__u64 __user *) data);
+       for (i = 0; i < 32; i++) {
+               __get_user(fpr_val, i + (__u64 __user *)data);
+               set_fpr64(&fregs[i], 0, fpr_val);
+       }
 
        __get_user(child->thread.fpu.fcr31, data + 64);
 
@@ -300,10 +281,27 @@ static int fpr_get(struct task_struct *target,
                   unsigned int pos, unsigned int count,
                   void *kbuf, void __user *ubuf)
 {
-       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                  &target->thread.fpu,
-                                  0, sizeof(elf_fpregset_t));
+       unsigned i;
+       int err;
+       u64 fpr_val;
+
        /* XXX fcr31  */
+
+       if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+               return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                          &target->thread.fpu,
+                                          0, sizeof(elf_fpregset_t));
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+               err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                         &fpr_val, i * sizeof(elf_fpreg_t),
+                                         (i + 1) * sizeof(elf_fpreg_t));
+               if (err)
+                       return err;
+       }
+
+       return 0;
 }
 
 static int fpr_set(struct task_struct *target,
@@ -311,10 +309,27 @@ static int fpr_set(struct task_struct *target,
                   unsigned int pos, unsigned int count,
                   const void *kbuf, const void __user *ubuf)
 {
-       return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                 &target->thread.fpu,
-                                 0, sizeof(elf_fpregset_t));
+       unsigned i;
+       int err;
+       u64 fpr_val;
+
        /* XXX fcr31  */
+
+       if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+               return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                         &target->thread.fpu,
+                                         0, sizeof(elf_fpregset_t));
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                        &fpr_val, i * sizeof(elf_fpreg_t),
+                                        (i + 1) * sizeof(elf_fpreg_t));
+               if (err)
+                       return err;
+               set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+       }
+
+       return 0;
 }
 
 enum mips_regset {
@@ -408,7 +423,7 @@ long arch_ptrace(struct task_struct *child, long request,
        /* Read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR: {
                struct pt_regs *regs;
-               fpureg_t *fregs;
+               union fpureg *fregs;
                unsigned long tmp = 0;
 
                regs = task_pt_regs(child);
@@ -433,14 +448,12 @@ long arch_ptrace(struct task_struct *child, long request,
                                 * order bits of the values stored in the even
                                 * registers - unless we're using r2k_switch.S.
                                 */
-                               if (addr & 1)
-                                       tmp = fregs[(addr & ~1) - 32] >> 32;
-                               else
-                                       tmp = fregs[addr - 32];
+                               tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+                                               addr & 1);
                                break;
                        }
 #endif
-                       tmp = fregs[addr - FPR_BASE];
+                       tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
                        break;
                case PC:
                        tmp = regs->cp0_epc;
@@ -465,44 +478,10 @@ long arch_ptrace(struct task_struct *child, long request,
                case FPC_CSR:
                        tmp = child->thread.fpu.fcr31;
                        break;
-               case FPC_EIR: { /* implementation / version register */
-                       unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
-                       unsigned long irqflags;
-                       unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
-                       preempt_disable();
-                       if (!cpu_has_fpu) {
-                               preempt_enable();
-                               break;
-                       }
-
-#ifdef CONFIG_MIPS_MT_SMTC
-                       /* Read-modify-write of Status must be atomic */
-                       local_irq_save(irqflags);
-                       mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
-                       if (cpu_has_mipsmt) {
-                               unsigned int vpflags = dvpe();
-                               flags = read_c0_status();
-                               __enable_fpu(FPU_AS_IS);
-                               __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
-                               write_c0_status(flags);
-                               evpe(vpflags);
-                       } else {
-                               flags = read_c0_status();
-                               __enable_fpu(FPU_AS_IS);
-                               __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
-                               write_c0_status(flags);
-                       }
-#ifdef CONFIG_MIPS_MT_SMTC
-                       emt(mtflags);
-                       local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
-                       preempt_enable();
+               case FPC_EIR:
+                       /* implementation / version register */
+                       tmp = current_cpu_data.fpu_id;
                        break;
-               }
                case DSP_BASE ... DSP_BASE + 5: {
                        dspreg_t *dregs;
 
@@ -548,7 +527,7 @@ long arch_ptrace(struct task_struct *child, long request,
                        regs->regs[addr] = data;
                        break;
                case FPR_BASE ... FPR_BASE + 31: {
-                       fpureg_t *fregs = get_fpu_regs(child);
+                       union fpureg *fregs = get_fpu_regs(child);
 
                        if (!tsk_used_math(child)) {
                                /* FP not yet used  */
@@ -563,19 +542,12 @@ long arch_ptrace(struct task_struct *child, long request,
                                 * order bits of the values stored in the even
                                 * registers - unless we're using r2k_switch.S.
                                 */
-                               if (addr & 1) {
-                                       fregs[(addr & ~1) - FPR_BASE] &=
-                                               0xffffffff;
-                                       fregs[(addr & ~1) - FPR_BASE] |=
-                                               ((u64)data) << 32;
-                               } else {
-                                       fregs[addr - FPR_BASE] &= ~0xffffffffLL;
-                                       fregs[addr - FPR_BASE] |= data;
-                               }
+                               set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+                                         addr & 1, data);
                                break;
                        }
 #endif
-                       fregs[addr - FPR_BASE] = data;
+                       set_fpr64(&fregs[addr - FPR_BASE], 0, data);
                        break;
                }
                case PC:
@@ -662,13 +634,13 @@ long arch_ptrace(struct task_struct *child, long request,
  * Notification of system call entry/exit
  * - triggered by current->work.syscall_trace
  */
-asmlinkage void syscall_trace_enter(struct pt_regs *regs)
+asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
 {
        long ret = 0;
        user_exit();
 
-       /* do the secure computing check first */
-       secure_computing_strict(regs->regs[2]);
+       if (secure_computing(syscall) == -1)
+               return -1;
 
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
@@ -677,10 +649,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[2]);
 
-       audit_syscall_entry(__syscall_get_arch(),
-                           regs->regs[2],
+       audit_syscall_entry(syscall_get_arch(current, regs),
+                           syscall,
                            regs->regs[4], regs->regs[5],
                            regs->regs[6], regs->regs[7]);
+       return syscall;
 }
 
 /*
index b8aa2dd5b00bc13af9f2b88225718773d7b5732d..b40c3ca60ee55161c6641d6b650829f07259004b 100644 (file)
@@ -80,7 +80,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
        /* Read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR: {
                struct pt_regs *regs;
-               fpureg_t *fregs;
+               union fpureg *fregs;
                unsigned int tmp;
 
                regs = task_pt_regs(child);
@@ -103,13 +103,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                                 * order bits of the values stored in the even
                                 * registers - unless we're using r2k_switch.S.
                                 */
-                               if (addr & 1)
-                                       tmp = fregs[(addr & ~1) - 32] >> 32;
-                               else
-                                       tmp = fregs[addr - 32];
+                               tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+                                               addr & 1);
                                break;
                        }
-                       tmp = fregs[addr - FPR_BASE];
+                       tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
                        break;
                case PC:
                        tmp = regs->cp0_epc;
@@ -129,46 +127,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                case FPC_CSR:
                        tmp = child->thread.fpu.fcr31;
                        break;
-               case FPC_EIR: { /* implementation / version register */
-                       unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
-                       unsigned int irqflags;
-                       unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
-                       preempt_disable();
-                       if (!cpu_has_fpu) {
-                               preempt_enable();
-                               tmp = 0;
-                               break;
-                       }
-
-#ifdef CONFIG_MIPS_MT_SMTC
-                       /* Read-modify-write of Status must be atomic */
-                       local_irq_save(irqflags);
-                       mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
-
-                       if (cpu_has_mipsmt) {
-                               unsigned int vpflags = dvpe();
-                               flags = read_c0_status();
-                               __enable_fpu(FPU_AS_IS);
-                               __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
-                               write_c0_status(flags);
-                               evpe(vpflags);
-                       } else {
-                               flags = read_c0_status();
-                               __enable_fpu(FPU_AS_IS);
-                               __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
-                               write_c0_status(flags);
-                       }
-#ifdef CONFIG_MIPS_MT_SMTC
-                       emt(mtflags);
-                       local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
-                       preempt_enable();
+               case FPC_EIR:
+                       /* implementation / version register */
+                       tmp = current_cpu_data.fpu_id;
                        break;
-               }
                case DSP_BASE ... DSP_BASE + 5: {
                        dspreg_t *dregs;
 
@@ -233,7 +195,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        regs->regs[addr] = data;
                        break;
                case FPR_BASE ... FPR_BASE + 31: {
-                       fpureg_t *fregs = get_fpu_regs(child);
+                       union fpureg *fregs = get_fpu_regs(child);
 
                        if (!tsk_used_math(child)) {
                                /* FP not yet used  */
@@ -247,18 +209,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                                 * order bits of the values stored in the even
                                 * registers - unless we're using r2k_switch.S.
                                 */
-                               if (addr & 1) {
-                                       fregs[(addr & ~1) - FPR_BASE] &=
-                                               0xffffffff;
-                                       fregs[(addr & ~1) - FPR_BASE] |=
-                                               ((u64)data) << 32;
-                               } else {
-                                       fregs[addr - FPR_BASE] &= ~0xffffffffLL;
-                                       fregs[addr - FPR_BASE] |= data;
-                               }
+                               set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+                                         addr & 1, data);
                                break;
                        }
-                       fregs[addr - FPR_BASE] = data;
+                       set_fpr64(&fregs[addr - FPR_BASE], 0, data);
                        break;
                }
                case PC:
index 73b0ddf910d41b08dd1bacd97f862e97f94f4c95..71814272d148e18e11d00a80392ddcdefd1cfe07 100644 (file)
@@ -13,6 +13,7 @@
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
  */
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/errno.h>
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
@@ -30,7 +31,7 @@
        .endm
 
        .set    noreorder
-       .set    mips3
+       .set    arch=r4000
 
 LEAF(_save_fp_context)
        cfc1    t1, fcr31
@@ -245,6 +246,218 @@ LEAF(_restore_fp_context32)
        END(_restore_fp_context32)
 #endif
 
+#ifdef CONFIG_CPU_HAS_MSA
+
+       .macro  save_sc_msareg  wr, off, sc, tmp
+#ifdef CONFIG_64BIT
+       copy_u_d \tmp, \wr, 1
+       EX sd   \tmp, (\off+(\wr*8))(\sc)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+       copy_u_w \tmp, \wr, 2
+       EX sw   \tmp, (\off+(\wr*8)+0)(\sc)
+       copy_u_w \tmp, \wr, 3
+       EX sw   \tmp, (\off+(\wr*8)+4)(\sc)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+       copy_u_w \tmp, \wr, 2
+       EX sw   \tmp, (\off+(\wr*8)+4)(\sc)
+       copy_u_w \tmp, \wr, 3
+       EX sw   \tmp, (\off+(\wr*8)+0)(\sc)
+#endif
+       .endm
+
+/*
+ * int _save_msa_context(struct sigcontext *sc)
+ *
+ * Save the upper 64 bits of each vector register along with the MSA_CSR
+ * register into sc. Returns zero on success, else non-zero.
+ */
+LEAF(_save_msa_context)
+       save_sc_msareg  0, SC_MSAREGS, a0, t0
+       save_sc_msareg  1, SC_MSAREGS, a0, t0
+       save_sc_msareg  2, SC_MSAREGS, a0, t0
+       save_sc_msareg  3, SC_MSAREGS, a0, t0
+       save_sc_msareg  4, SC_MSAREGS, a0, t0
+       save_sc_msareg  5, SC_MSAREGS, a0, t0
+       save_sc_msareg  6, SC_MSAREGS, a0, t0
+       save_sc_msareg  7, SC_MSAREGS, a0, t0
+       save_sc_msareg  8, SC_MSAREGS, a0, t0
+       save_sc_msareg  9, SC_MSAREGS, a0, t0
+       save_sc_msareg  10, SC_MSAREGS, a0, t0
+       save_sc_msareg  11, SC_MSAREGS, a0, t0
+       save_sc_msareg  12, SC_MSAREGS, a0, t0
+       save_sc_msareg  13, SC_MSAREGS, a0, t0
+       save_sc_msareg  14, SC_MSAREGS, a0, t0
+       save_sc_msareg  15, SC_MSAREGS, a0, t0
+       save_sc_msareg  16, SC_MSAREGS, a0, t0
+       save_sc_msareg  17, SC_MSAREGS, a0, t0
+       save_sc_msareg  18, SC_MSAREGS, a0, t0
+       save_sc_msareg  19, SC_MSAREGS, a0, t0
+       save_sc_msareg  20, SC_MSAREGS, a0, t0
+       save_sc_msareg  21, SC_MSAREGS, a0, t0
+       save_sc_msareg  22, SC_MSAREGS, a0, t0
+       save_sc_msareg  23, SC_MSAREGS, a0, t0
+       save_sc_msareg  24, SC_MSAREGS, a0, t0
+       save_sc_msareg  25, SC_MSAREGS, a0, t0
+       save_sc_msareg  26, SC_MSAREGS, a0, t0
+       save_sc_msareg  27, SC_MSAREGS, a0, t0
+       save_sc_msareg  28, SC_MSAREGS, a0, t0
+       save_sc_msareg  29, SC_MSAREGS, a0, t0
+       save_sc_msareg  30, SC_MSAREGS, a0, t0
+       save_sc_msareg  31, SC_MSAREGS, a0, t0
+       jr      ra
+        li     v0, 0
+       END(_save_msa_context)
+
+#ifdef CONFIG_MIPS32_COMPAT
+
+/*
+ * int _save_msa_context32(struct sigcontext32 *sc)
+ *
+ * Save the upper 64 bits of each vector register along with the MSA_CSR
+ * register into sc. Returns zero on success, else non-zero.
+ */
+LEAF(_save_msa_context32)
+       save_sc_msareg  0, SC32_MSAREGS, a0, t0
+       save_sc_msareg  1, SC32_MSAREGS, a0, t0
+       save_sc_msareg  2, SC32_MSAREGS, a0, t0
+       save_sc_msareg  3, SC32_MSAREGS, a0, t0
+       save_sc_msareg  4, SC32_MSAREGS, a0, t0
+       save_sc_msareg  5, SC32_MSAREGS, a0, t0
+       save_sc_msareg  6, SC32_MSAREGS, a0, t0
+       save_sc_msareg  7, SC32_MSAREGS, a0, t0
+       save_sc_msareg  8, SC32_MSAREGS, a0, t0
+       save_sc_msareg  9, SC32_MSAREGS, a0, t0
+       save_sc_msareg  10, SC32_MSAREGS, a0, t0
+       save_sc_msareg  11, SC32_MSAREGS, a0, t0
+       save_sc_msareg  12, SC32_MSAREGS, a0, t0
+       save_sc_msareg  13, SC32_MSAREGS, a0, t0
+       save_sc_msareg  14, SC32_MSAREGS, a0, t0
+       save_sc_msareg  15, SC32_MSAREGS, a0, t0
+       save_sc_msareg  16, SC32_MSAREGS, a0, t0
+       save_sc_msareg  17, SC32_MSAREGS, a0, t0
+       save_sc_msareg  18, SC32_MSAREGS, a0, t0
+       save_sc_msareg  19, SC32_MSAREGS, a0, t0
+       save_sc_msareg  20, SC32_MSAREGS, a0, t0
+       save_sc_msareg  21, SC32_MSAREGS, a0, t0
+       save_sc_msareg  22, SC32_MSAREGS, a0, t0
+       save_sc_msareg  23, SC32_MSAREGS, a0, t0
+       save_sc_msareg  24, SC32_MSAREGS, a0, t0
+       save_sc_msareg  25, SC32_MSAREGS, a0, t0
+       save_sc_msareg  26, SC32_MSAREGS, a0, t0
+       save_sc_msareg  27, SC32_MSAREGS, a0, t0
+       save_sc_msareg  28, SC32_MSAREGS, a0, t0
+       save_sc_msareg  29, SC32_MSAREGS, a0, t0
+       save_sc_msareg  30, SC32_MSAREGS, a0, t0
+       save_sc_msareg  31, SC32_MSAREGS, a0, t0
+       jr      ra
+        li     v0, 0
+       END(_save_msa_context32)
+
+#endif /* CONFIG_MIPS32_COMPAT */
+
+       .macro restore_sc_msareg        wr, off, sc, tmp
+#ifdef CONFIG_64BIT
+       EX ld   \tmp, (\off+(\wr*8))(\sc)
+       insert_d \wr, 1, \tmp
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+       EX lw   \tmp, (\off+(\wr*8)+0)(\sc)
+       insert_w \wr, 2, \tmp
+       EX lw   \tmp, (\off+(\wr*8)+4)(\sc)
+       insert_w \wr, 3, \tmp
+#else /* CONFIG_CPU_BIG_ENDIAN */
+       EX lw   \tmp, (\off+(\wr*8)+4)(\sc)
+       insert_w \wr, 2, \tmp
+       EX lw   \tmp, (\off+(\wr*8)+0)(\sc)
+       insert_w \wr, 3, \tmp
+#endif
+       .endm
+
+/*
+ * int _restore_msa_context(struct sigcontext *sc)
+ */
+LEAF(_restore_msa_context)
+       restore_sc_msareg       0, SC_MSAREGS, a0, t0
+       restore_sc_msareg       1, SC_MSAREGS, a0, t0
+       restore_sc_msareg       2, SC_MSAREGS, a0, t0
+       restore_sc_msareg       3, SC_MSAREGS, a0, t0
+       restore_sc_msareg       4, SC_MSAREGS, a0, t0
+       restore_sc_msareg       5, SC_MSAREGS, a0, t0
+       restore_sc_msareg       6, SC_MSAREGS, a0, t0
+       restore_sc_msareg       7, SC_MSAREGS, a0, t0
+       restore_sc_msareg       8, SC_MSAREGS, a0, t0
+       restore_sc_msareg       9, SC_MSAREGS, a0, t0
+       restore_sc_msareg       10, SC_MSAREGS, a0, t0
+       restore_sc_msareg       11, SC_MSAREGS, a0, t0
+       restore_sc_msareg       12, SC_MSAREGS, a0, t0
+       restore_sc_msareg       13, SC_MSAREGS, a0, t0
+       restore_sc_msareg       14, SC_MSAREGS, a0, t0
+       restore_sc_msareg       15, SC_MSAREGS, a0, t0
+       restore_sc_msareg       16, SC_MSAREGS, a0, t0
+       restore_sc_msareg       17, SC_MSAREGS, a0, t0
+       restore_sc_msareg       18, SC_MSAREGS, a0, t0
+       restore_sc_msareg       19, SC_MSAREGS, a0, t0
+       restore_sc_msareg       20, SC_MSAREGS, a0, t0
+       restore_sc_msareg       21, SC_MSAREGS, a0, t0
+       restore_sc_msareg       22, SC_MSAREGS, a0, t0
+       restore_sc_msareg       23, SC_MSAREGS, a0, t0
+       restore_sc_msareg       24, SC_MSAREGS, a0, t0
+       restore_sc_msareg       25, SC_MSAREGS, a0, t0
+       restore_sc_msareg       26, SC_MSAREGS, a0, t0
+       restore_sc_msareg       27, SC_MSAREGS, a0, t0
+       restore_sc_msareg       28, SC_MSAREGS, a0, t0
+       restore_sc_msareg       29, SC_MSAREGS, a0, t0
+       restore_sc_msareg       30, SC_MSAREGS, a0, t0
+       restore_sc_msareg       31, SC_MSAREGS, a0, t0
+       jr      ra
+        li     v0, 0
+       END(_restore_msa_context)
+
+#ifdef CONFIG_MIPS32_COMPAT
+
+/*
+ * int _restore_msa_context32(struct sigcontext32 *sc)
+ */
+LEAF(_restore_msa_context32)
+       restore_sc_msareg       0, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       1, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       2, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       3, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       4, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       5, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       6, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       7, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       8, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       9, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       10, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       11, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       12, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       13, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       14, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       15, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       16, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       17, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       18, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       19, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       20, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       21, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       22, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       23, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       24, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       25, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       26, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       27, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       28, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       29, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       30, SC32_MSAREGS, a0, t0
+       restore_sc_msareg       31, SC32_MSAREGS, a0, t0
+       jr      ra
+        li     v0, 0
+       END(_restore_msa_context32)
+
+#endif /* CONFIG_MIPS32_COMPAT */
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
        .set    reorder
 
        .type   fault@function
index cc78dd9a17c788412e2254f0e505aadb789be34b..abacac7c33ef3561e40ba17304a831d2402f6d99 100644 (file)
  */
 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
 
-/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by _TIF_USEDFPU.  In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                    struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti, s32 fp_save)
  */
        .align  5
        LEAF(resume)
        LONG_S  ra, THREAD_REG31(a0)
 
        /*
-        * check if we need to save FPU registers
+        * Check whether we need to save any FP context. FP context is saved
+        * iff the process has used the context with the scalar FPU or the MSA
+        * ASE in the current time slice, as indicated by _TIF_USEDFPU and
+        * _TIF_USEDMSA respectively. switch_to will have set fp_save
+        * accordingly to an FP_SAVE_ enum value.
         */
+       beqz    a3, 2f
 
-       beqz    a3, 1f
-
-       PTR_L   t3, TASK_THREAD_INFO(a0)
        /*
-        * clear saved user stack CU1 bit
+        * We do. Clear the saved CU1 bit for prev, such that next time it is
+        * scheduled it will start in userland with the FPU disabled. If the
+        * task uses the FPU then it will be enabled again via the do_cpu trap.
+        * This allows us to lazily restore the FP context.
         */
+       PTR_L   t3, TASK_THREAD_INFO(a0)
        LONG_L  t0, ST_OFF(t3)
        li      t1, ~ST0_CU1
        and     t0, t0, t1
        LONG_S  t0, ST_OFF(t3)
 
+       /* Check whether we're saving scalar or vector context. */
+       bgtz    a3, 1f
+
+       /* Save 128b MSA vector context. */
+       msa_save_all    a0
+       b       2f
+
+1:     /* Save 32b/64b scalar FP context. */
        fpu_save_double a0 t0 t1                # c0_status passed in t0
                                                # clobbers t1
-1:
+2:
 
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
        PTR_LA  t8, __stack_chk_guard
@@ -141,6 +145,26 @@ LEAF(_restore_fp)
        jr      ra
        END(_restore_fp)
 
+#ifdef CONFIG_CPU_HAS_MSA
+
+/*
+ * Save a thread's MSA vector context.
+ */
+LEAF(_save_msa)
+       msa_save_all    a0
+       jr      ra
+       END(_save_msa)
+
+/*
+ * Restore a thread's MSA vector context.
+ */
+LEAF(_restore_msa)
+       msa_restore_all a0
+       jr      ra
+       END(_restore_msa)
+
+#endif
+
 /*
  * Load the FPU with signalling NANS.  This bit pattern we're using has
  * the property that no matter whether considered as single or as double
@@ -270,7 +294,7 @@ LEAF(_init_fpu)
 1:     .set    pop
 #endif /* CONFIG_CPU_MIPS32_R2 */
 #else
-       .set    mips3
+       .set    arch=r4000
        dmtc1   t1, $f0
        dmtc1   t1, $f2
        dmtc1   t1, $f4
index a5b14f48e1af805af4aaff1b4d6f17fff0beffe9..fdc70b40044265fb2107df186eb0e593a96b9571 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
  * Copyright (C) 2001 MIPS Technologies, Inc.
  * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2014 Imagination Technologies Ltd.
  */
 #include <linux/errno.h>
 #include <asm/asm.h>
@@ -74,10 +75,10 @@ NESTED(handle_sys, PT_SIZE, sp)
        .set    noreorder
        .set    nomacro
 
-1:     lw      t5, 16(t0)              # argument #5 from usp
-4:     lw      t6, 20(t0)              # argument #6 from usp
-3:     lw      t7, 24(t0)              # argument #7 from usp
-2:     lw      t8, 28(t0)              # argument #8 from usp
+1:     user_lw(t5, 16(t0))             # argument #5 from usp
+4:     user_lw(t6, 20(t0))             # argument #6 from usp
+3:     user_lw(t7, 24(t0))             # argument #7 from usp
+2:     user_lw(t8, 28(t0))             # argument #8 from usp
 
        sw      t5, 16(sp)              # argument #5 to ksp
        sw      t6, 20(sp)              # argument #6 to ksp
@@ -118,7 +119,18 @@ syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       jal     syscall_trace_enter
+
+       /*
+        * syscall number is in v0 unless we called syscall(__NR_###)
+        * where the real syscall number is in a0
+        */
+       addiu   a1, v0,  __NR_O32_Linux
+       bnez    v0, 1f /* __NR_syscall at offset 0 */
+       lw      a1, PT_R4(sp)
+
+1:     jal     syscall_trace_enter
+
+       bltz    v0, 2f                  # seccomp failed? Skip syscall
 
        move    t0, s0
        RESTORE_STATIC
@@ -138,7 +150,7 @@ syscall_trace_entry:
        sw      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sw      v0, PT_R2(sp)           # result
 
-       j       syscall_exit
+2:     j       syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
index b56e254beb15b6bb91078b43e1c13b4ea5b83bd3..dd99c3285aeae75f65ae982de46a755ae953f140 100644 (file)
@@ -80,8 +80,11 @@ syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
+       daddiu  a1, v0, __NR_64_Linux
        jal     syscall_trace_enter
 
+       bltz    v0, 2f                  # seccomp failed? Skip syscall
+
        move    t0, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
@@ -102,7 +105,7 @@ syscall_trace_entry:
        sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
-       j       syscall_exit
+2:     j       syscall_exit
 
 illegal_syscall:
        /* This also isn't a 64-bit syscall, throw an error.  */
index f7e5b72cf481256103919641267714e6c9e59742..f68d2f4f009021de3ed784e9733aee5d3580d8e0 100644 (file)
@@ -72,8 +72,11 @@ n32_syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
+       daddiu  a1, v0, __NR_N32_Linux
        jal     syscall_trace_enter
 
+       bltz    v0, 2f                  # seccomp failed? Skip syscall
+
        move    t0, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
@@ -94,7 +97,7 @@ n32_syscall_trace_entry:
        sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
-       j       syscall_exit
+2:     j       syscall_exit
 
 not_n32_scall:
        /* This is not an n32 compatibility syscall, pass it on to
index 6788727d91af1f79da1af76fbb0e7c3cc99ec600..70f6acecd928896c54b586b3470c27eb703721be 100644 (file)
@@ -112,7 +112,20 @@ trace_a_syscall:
 
        move    s0, t2                  # Save syscall pointer
        move    a0, sp
-       jal     syscall_trace_enter
+       /*
+        * syscall number is in v0 unless we called syscall(__NR_###)
+        * where the real syscall number is in a0
+        * note: NR_syscall is the first O32 syscall but the macro is
+        * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+        * therefore __NR_O32_Linux is used (4000)
+        */
+       addiu   a1, v0,  __NR_O32_Linux
+       bnez    v0, 1f /* __NR_syscall at offset 0 */
+       lw      a1, PT_R4(sp)
+
+1:     jal     syscall_trace_enter
+
+       bltz    v0, 2f                  # seccomp failed? Skip syscall
 
        move    t0, s0
        RESTORE_STATIC
@@ -136,7 +149,7 @@ trace_a_syscall:
        sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
-       j       syscall_exit
+2:     j       syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
index 5199563c4403a5bb0cebbeaf85bb2aeca8b1b8c2..33133d3df3e5a5bdb5a62647522b0946c874ba6d 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 1991, 1992  Linus Torvalds
  * Copyright (C) 1994 - 2000  Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
  */
 #include <linux/cache.h>
 #include <linux/context_tracking.h>
@@ -30,6 +31,7 @@
 #include <linux/bitops.h>
 #include <asm/cacheflush.h>
 #include <asm/fpu.h>
+#include <asm/msa.h>
 #include <asm/sim.h>
 #include <asm/ucontext.h>
 #include <asm/cpu-features.h>
@@ -46,8 +48,8 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
 
-extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
-extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
+extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
+extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
 
 struct sigframe {
        u32 sf_ass[4];          /* argument save space for o32 */
@@ -63,18 +65,96 @@ struct rt_sigframe {
        struct ucontext rs_uc;
 };
 
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
+{
+       int i;
+       int err = 0;
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err |=
+                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+                              &sc->sc_fpregs[i]);
+       }
+       err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+       return err;
+}
+
+static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
+{
+       int i;
+       int err = 0;
+       u64 fpr_val;
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+               set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+       }
+       err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+       return err;
+}
+
+/*
+ * These functions will save only the upper 64 bits of the vector registers,
+ * since the lower 64 bits have already been saved as the scalar FP context.
+ */
+static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
+{
+       int i;
+       int err = 0;
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err |=
+                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
+                              &sc->sc_msaregs[i]);
+       }
+       err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
+
+       return err;
+}
+
+static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
+{
+       int i;
+       int err = 0;
+       u64 val;
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err |= __get_user(val, &sc->sc_msaregs[i]);
+               set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+       }
+       err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
+
+       return err;
+}
+
 /*
  * Helper routines
  */
-static int protected_save_fp_context(struct sigcontext __user *sc)
+static int protected_save_fp_context(struct sigcontext __user *sc,
+                                    unsigned used_math)
 {
        int err;
+       bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
+#ifndef CONFIG_EVA
        while (1) {
                lock_fpu_owner();
-               err = own_fpu_inatomic(1);
-               if (!err)
-                       err = save_fp_context(sc); /* this might fail */
-               unlock_fpu_owner();
+               if (is_fpu_owner()) {
+                       err = save_fp_context(sc);
+                       if (save_msa && !err)
+                               err = _save_msa_context(sc);
+                       unlock_fpu_owner();
+               } else {
+                       unlock_fpu_owner();
+                       err = copy_fp_to_sigcontext(sc);
+                       if (save_msa && !err)
+                               err = copy_msa_to_sigcontext(sc);
+               }
                if (likely(!err))
                        break;
                /* touch the sigcontext and try again */
@@ -84,18 +164,44 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
                if (err)
                        break;  /* really bad sigcontext */
        }
+#else
+       /*
+        * EVA does not have FPU EVA instructions so saving fpu context directly
+        * does not work.
+        */
+       disable_msa();
+       lose_fpu(1);
+       err = save_fp_context(sc); /* this might fail */
+       if (save_msa && !err)
+               err = copy_msa_to_sigcontext(sc);
+#endif
        return err;
 }
 
-static int protected_restore_fp_context(struct sigcontext __user *sc)
+static int protected_restore_fp_context(struct sigcontext __user *sc,
+                                       unsigned used_math)
 {
        int err, tmp __maybe_unused;
+       bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
+#ifndef CONFIG_EVA
        while (1) {
                lock_fpu_owner();
-               err = own_fpu_inatomic(0);
-               if (!err)
-                       err = restore_fp_context(sc); /* this might fail */
-               unlock_fpu_owner();
+               if (is_fpu_owner()) {
+                       err = restore_fp_context(sc);
+                       if (restore_msa && !err) {
+                               enable_msa();
+                               err = _restore_msa_context(sc);
+                       } else {
+                               /* signal handler may have used MSA */
+                               disable_msa();
+                       }
+                       unlock_fpu_owner();
+               } else {
+                       unlock_fpu_owner();
+                       err = copy_fp_from_sigcontext(sc);
+                       if (!err && (used_math & USEDMATH_MSA))
+                               err = copy_msa_from_sigcontext(sc);
+               }
                if (likely(!err))
                        break;
                /* touch the sigcontext and try again */
@@ -105,6 +211,17 @@ static int protected_restore_fp_context(struct sigcontext __user *sc)
                if (err)
                        break;  /* really bad sigcontext */
        }
+#else
+       /*
+        * EVA does not have FPU EVA instructions so restoring fpu context
+        * directly does not work.
+        */
+       enable_msa();
+       lose_fpu(0);
+       err = restore_fp_context(sc); /* this might fail */
+       if (restore_msa && !err)
+               err = copy_msa_from_sigcontext(sc);
+#endif
        return err;
 }
 
@@ -135,7 +252,8 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
        }
 
-       used_math = !!used_math();
+       used_math = used_math() ? USEDMATH_FP : 0;
+       used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
        err |= __put_user(used_math, &sc->sc_used_math);
 
        if (used_math) {
@@ -143,7 +261,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                 * Save FPU state to signal context. Signal handler
                 * will "inherit" current FPU state.
                 */
-               err |= protected_save_fp_context(sc);
+               err |= protected_save_fp_context(sc, used_math);
        }
        return err;
 }
@@ -168,14 +286,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
 }
 
 static int
-check_and_restore_fp_context(struct sigcontext __user *sc)
+check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
 {
        int err, sig;
 
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
-       err |= protected_restore_fp_context(sc);
+       err |= protected_restore_fp_context(sc, used_math);
        return err ?: sig;
 }
 
@@ -215,9 +333,10 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
        if (used_math) {
                /* restore fpu context if we have used it before */
                if (!err)
-                       err = check_and_restore_fp_context(sc);
+                       err = check_and_restore_fp_context(sc, used_math);
        } else {
-               /* signal handler may have used FPU.  Give it up. */
+               /* signal handler may have used FPU or MSA. Disable them. */
+               disable_msa();
                lose_fpu(0);
        }
 
@@ -591,23 +710,26 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
 }
 
 #ifdef CONFIG_SMP
+#ifndef CONFIG_EVA
 static int smp_save_fp_context(struct sigcontext __user *sc)
 {
        return raw_cpu_has_fpu
               ? _save_fp_context(sc)
-              : fpu_emulator_save_context(sc);
+              : copy_fp_to_sigcontext(sc);
 }
 
 static int smp_restore_fp_context(struct sigcontext __user *sc)
 {
        return raw_cpu_has_fpu
               ? _restore_fp_context(sc)
-              : fpu_emulator_restore_context(sc);
+              : copy_fp_from_sigcontext(sc);
 }
+#endif /* CONFIG_EVA */
 #endif
 
 static int signal_setup(void)
 {
+#ifndef CONFIG_EVA
 #ifdef CONFIG_SMP
        /* For now just do the cpu_has_fpu check when the functions are invoked */
        save_fp_context = smp_save_fp_context;
@@ -617,9 +739,13 @@ static int signal_setup(void)
                save_fp_context = _save_fp_context;
                restore_fp_context = _restore_fp_context;
        } else {
-               save_fp_context = fpu_emulator_save_context;
-               restore_fp_context = fpu_emulator_restore_context;
+               save_fp_context = copy_fp_from_sigcontext;
+               restore_fp_context = copy_fp_to_sigcontext;
        }
+#endif /* CONFIG_SMP */
+#else
+       save_fp_context = copy_fp_from_sigcontext;;
+       restore_fp_context = copy_fp_to_sigcontext;
 #endif
 
        return 0;
index 3d60f7750fa8d873c4651cd362239a0441e24c72..299f956e4db3cecc989b14994c3db67b22b50c16 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/sim.h>
 #include <asm/ucontext.h>
 #include <asm/fpu.h>
+#include <asm/msa.h>
 #include <asm/war.h>
 #include <asm/vdso.h>
 #include <asm/dsp.h>
@@ -42,8 +43,8 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
 
-extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
 
 /*
  * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
@@ -77,18 +78,97 @@ struct rt_sigframe32 {
        struct ucontext32 rs_uc;
 };
 
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
+{
+       int i;
+       int err = 0;
+       int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+
+       for (i = 0; i < NUM_FPU_REGS; i += inc) {
+               err |=
+                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+                              &sc->sc_fpregs[i]);
+       }
+       err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+       return err;
+}
+
+static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
+{
+       int i;
+       int err = 0;
+       int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+       u64 fpr_val;
+
+       for (i = 0; i < NUM_FPU_REGS; i += inc) {
+               err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+               set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+       }
+       err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+       return err;
+}
+
+/*
+ * These functions will save only the upper 64 bits of the vector registers,
+ * since the lower 64 bits have already been saved as the scalar FP context.
+ */
+static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
+{
+       int i;
+       int err = 0;
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err |=
+                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
+                              &sc->sc_msaregs[i]);
+       }
+       err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
+
+       return err;
+}
+
+static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
+{
+       int i;
+       int err = 0;
+       u64 val;
+
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               err |= __get_user(val, &sc->sc_msaregs[i]);
+               set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+       }
+       err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
+
+       return err;
+}
+
 /*
  * sigcontext handlers
  */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc)
+static int protected_save_fp_context32(struct sigcontext32 __user *sc,
+                                      unsigned used_math)
 {
        int err;
+       bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
        while (1) {
                lock_fpu_owner();
-               err = own_fpu_inatomic(1);
-               if (!err)
-                       err = save_fp_context32(sc); /* this might fail */
-               unlock_fpu_owner();
+               if (is_fpu_owner()) {
+                       err = save_fp_context32(sc);
+                       if (save_msa && !err)
+                               err = _save_msa_context32(sc);
+                       unlock_fpu_owner();
+               } else {
+                       unlock_fpu_owner();
+                       err = copy_fp_to_sigcontext32(sc);
+                       if (save_msa && !err)
+                               err = copy_msa_to_sigcontext32(sc);
+               }
                if (likely(!err))
                        break;
                /* touch the sigcontext and try again */
@@ -101,15 +181,29 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
        return err;
 }
 
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
+                                         unsigned used_math)
 {
        int err, tmp __maybe_unused;
+       bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
        while (1) {
                lock_fpu_owner();
-               err = own_fpu_inatomic(0);
-               if (!err)
-                       err = restore_fp_context32(sc); /* this might fail */
-               unlock_fpu_owner();
+               if (is_fpu_owner()) {
+                       err = restore_fp_context32(sc);
+                       if (restore_msa && !err) {
+                               enable_msa();
+                               err = _restore_msa_context32(sc);
+                       } else {
+                               /* signal handler may have used MSA */
+                               disable_msa();
+                       }
+                       unlock_fpu_owner();
+               } else {
+                       unlock_fpu_owner();
+                       err = copy_fp_from_sigcontext32(sc);
+                       if (restore_msa && !err)
+                               err = copy_msa_from_sigcontext32(sc);
+               }
                if (likely(!err))
                        break;
                /* touch the sigcontext and try again */
@@ -147,7 +241,8 @@ static int setup_sigcontext32(struct pt_regs *regs,
                err |= __put_user(mflo3(), &sc->sc_lo3);
        }
 
-       used_math = !!used_math();
+       used_math = used_math() ? USEDMATH_FP : 0;
+       used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
        err |= __put_user(used_math, &sc->sc_used_math);
 
        if (used_math) {
@@ -155,20 +250,21 @@ static int setup_sigcontext32(struct pt_regs *regs,
                 * Save FPU state to signal context.  Signal handler
                 * will "inherit" current FPU state.
                 */
-               err |= protected_save_fp_context32(sc);
+               err |= protected_save_fp_context32(sc, used_math);
        }
        return err;
 }
 
 static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc)
+check_and_restore_fp_context32(struct sigcontext32 __user *sc,
+                              unsigned used_math)
 {
        int err, sig;
 
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
-       err |= protected_restore_fp_context32(sc);
+       err |= protected_restore_fp_context32(sc, used_math);
        return err ?: sig;
 }
 
@@ -205,9 +301,10 @@ static int restore_sigcontext32(struct pt_regs *regs,
        if (used_math) {
                /* restore fpu context if we have used it before */
                if (!err)
-                       err = check_and_restore_fp_context32(sc);
+                       err = check_and_restore_fp_context32(sc, used_math);
        } else {
-               /* signal handler may have used FPU.  Give it up. */
+               /* signal handler may have used FPU or MSA. Disable them. */
+               disable_msa();
                lose_fpu(0);
        }
 
@@ -566,8 +663,8 @@ static int signal32_init(void)
                save_fp_context32 = _save_fp_context32;
                restore_fp_context32 = _restore_fp_context32;
        } else {
-               save_fp_context32 = fpu_emulator_save_context32;
-               restore_fp_context32 = fpu_emulator_restore_context32;
+               save_fp_context32 = copy_fp_to_sigcontext32;
+               restore_fp_context32 = copy_fp_from_sigcontext32;
        }
 
        return 0;
index 1b925d8a610cdce41f7e0f031943d8afeb678518..3ef55fb7ac036c7d52eb0766c3b7c5c4a687b02f 100644 (file)
 #include <asm/amon.h>
 #include <asm/gic.h>
 
-static void ipi_call_function(unsigned int cpu)
-{
-       pr_debug("CPU%d: %s cpu %d status %08x\n",
-                smp_processor_id(), __func__, cpu, read_c0_status());
-
-       gic_send_ipi(plat_ipi_call_int_xlate(cpu));
-}
-
-
-static void ipi_resched(unsigned int cpu)
-{
-       pr_debug("CPU%d: %s cpu %d status %08x\n",
-                smp_processor_id(), __func__, cpu, read_c0_status());
-
-       gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
-}
-
-/*
- * FIXME: This isn't restricted to CMP
- * The SMVP kernel could use GIC interrupts if available
- */
-void cmp_send_ipi_single(int cpu, unsigned int action)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       switch (action) {
-       case SMP_CALL_FUNCTION:
-               ipi_call_function(cpu);
-               break;
-
-       case SMP_RESCHEDULE_YOURSELF:
-               ipi_resched(cpu);
-               break;
-       }
-
-       local_irq_restore(flags);
-}
-
-static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
-{
-       unsigned int i;
-
-       for_each_cpu(i, mask)
-               cmp_send_ipi_single(i, action);
-}
-
 static void cmp_init_secondary(void)
 {
-       struct cpuinfo_mips *c = &current_cpu_data;
+       struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
 
        /* Assume GIC is present */
        change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
@@ -97,7 +49,6 @@ static void cmp_init_secondary(void)
 
        /* Enable per-cpu interrupts: platform specific */
 
-       c->core = (read_c0_ebase() >> 1) & 0x1ff;
 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
        if (cpu_has_mipsmt)
                c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
@@ -210,8 +161,8 @@ void __init cmp_prepare_cpus(unsigned int max_cpus)
 }
 
 struct plat_smp_ops cmp_smp_ops = {
-       .send_ipi_single        = cmp_send_ipi_single,
-       .send_ipi_mask          = cmp_send_ipi_mask,
+       .send_ipi_single        = gic_send_ipi_single,
+       .send_ipi_mask          = gic_send_ipi_mask,
        .init_secondary         = cmp_init_secondary,
        .smp_finish             = cmp_smp_finish,
        .cpus_done              = cmp_cpus_done,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
new file mode 100644 (file)
index 0000000..536eec0
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/gic.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/mips_mt.h>
+#include <asm/mipsregs.h>
+#include <asm/smp-cps.h>
+#include <asm/time.h>
+#include <asm/uasm.h>
+
+static DECLARE_BITMAP(core_power, NR_CPUS);
+
+struct boot_config mips_cps_bootcfg;
+
+static void init_core(void)
+{
+       unsigned int nvpes, t;
+       u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
+
+       if (!cpu_has_mipsmt)
+               return;
+
+       /* Enter VPE configuration state */
+       dvpe();
+       set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+       /* Retrieve the count of VPEs in this core */
+       mvpconf0 = read_c0_mvpconf0();
+       nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+       smp_num_siblings = nvpes;
+
+       for (t = 1; t < nvpes; t++) {
+               /* Use a 1:1 mapping of TC index to VPE index */
+               settc(t);
+
+               /* Bind 1 TC to this VPE */
+               tcbind = read_tc_c0_tcbind();
+               tcbind &= ~TCBIND_CURVPE;
+               tcbind |= t << TCBIND_CURVPE_SHIFT;
+               write_tc_c0_tcbind(tcbind);
+
+               /* Set exclusive TC, non-active, master */
+               vpeconf0 = read_vpe_c0_vpeconf0();
+               vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
+               vpeconf0 |= t << VPECONF0_XTC_SHIFT;
+               vpeconf0 |= VPECONF0_MVP;
+               write_vpe_c0_vpeconf0(vpeconf0);
+
+               /* Declare TC non-active, non-allocatable & interrupt exempt */
+               tcstatus = read_tc_c0_tcstatus();
+               tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
+               tcstatus |= TCSTATUS_IXMT;
+               write_tc_c0_tcstatus(tcstatus);
+
+               /* Halt the TC */
+               write_tc_c0_tchalt(TCHALT_H);
+
+               /* Allow only 1 TC to execute */
+               vpecontrol = read_vpe_c0_vpecontrol();
+               vpecontrol &= ~VPECONTROL_TE;
+               write_vpe_c0_vpecontrol(vpecontrol);
+
+               /* Copy (most of) Status from VPE 0 */
+               status = read_c0_status();
+               status &= ~(ST0_IM | ST0_IE | ST0_KSU);
+               status |= ST0_CU0;
+               write_vpe_c0_status(status);
+
+               /* Copy Config from VPE 0 */
+               write_vpe_c0_config(read_c0_config());
+               write_vpe_c0_config7(read_c0_config7());
+
+               /* Ensure no software interrupts are pending */
+               write_vpe_c0_cause(0);
+
+               /* Sync Count */
+               write_vpe_c0_count(read_c0_count());
+       }
+
+       /* Leave VPE configuration state */
+       clear_c0_mvpcontrol(MVPCONTROL_VPC);
+}
+
+static void __init cps_smp_setup(void)
+{
+       unsigned int ncores, nvpes, core_vpes;
+       int c, v;
+       u32 core_cfg, *entry_code;
+
+       /* Detect & record VPE topology */
+       ncores = mips_cm_numcores();
+       pr_info("VPE topology ");
+       for (c = nvpes = 0; c < ncores; c++) {
+               if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) {
+                       write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
+                       core_cfg = read_gcr_co_config();
+                       core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
+                                    CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
+               } else {
+                       core_vpes = 1;
+               }
+
+               pr_cont("%c%u", c ? ',' : '{', core_vpes);
+
+               for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
+                       cpu_data[nvpes + v].core = c;
+#ifdef CONFIG_MIPS_MT_SMP
+                       cpu_data[nvpes + v].vpe_id = v;
+#endif
+               }
+
+               nvpes += core_vpes;
+       }
+       pr_cont("} total %u\n", nvpes);
+
+       /* Indicate present CPUs (CPU being synonymous with VPE) */
+       for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
+               set_cpu_possible(v, true);
+               set_cpu_present(v, true);
+               __cpu_number_map[v] = v;
+               __cpu_logical_map[v] = v;
+       }
+
+       /* Core 0 is powered up (we're running on it) */
+       bitmap_set(core_power, 0, 1);
+
+       /* Disable MT - we only want to run 1 TC per VPE */
+       if (cpu_has_mipsmt)
+               dmt();
+
+       /* Initialise core 0 */
+       init_core();
+
+       /* Patch the start of mips_cps_core_entry to provide the CM base */
+       entry_code = (u32 *)&mips_cps_core_entry;
+       UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
+
+       /* Make core 0 coherent with everything */
+       write_gcr_cl_coherence(0xff);
+}
+
+static void __init cps_prepare_cpus(unsigned int max_cpus)
+{
+       mips_mt_set_cpuoptions();
+}
+
+static void boot_core(struct boot_config *cfg)
+{
+       u32 access;
+
+       /* Select the appropriate core */
+       write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+
+       /* Set its reset vector */
+       write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
+
+       /* Ensure its coherency is disabled */
+       write_gcr_co_coherence(0);
+
+       /* Ensure the core can access the GCRs */
+       access = read_gcr_access();
+       access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core);
+       write_gcr_access(access);
+
+       /* Copy cfg */
+       mips_cps_bootcfg = *cfg;
+
+       if (mips_cpc_present()) {
+               /* Select the appropriate core */
+               write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF);
+
+               /* Reset the core */
+               write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+       } else {
+               /* Take the core out of reset */
+               write_gcr_co_reset_release(0);
+       }
+
+       /* The core is now powered up */
+       bitmap_set(core_power, cfg->core, 1);
+}
+
+static void boot_vpe(void *info)
+{
+       struct boot_config *cfg = info;
+       u32 tcstatus, vpeconf0;
+
+       /* Enter VPE configuration state */
+       dvpe();
+       set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+       settc(cfg->vpe);
+
+       /* Set the TC restart PC */
+       write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
+
+       /* Activate the TC, allow interrupts */
+       tcstatus = read_tc_c0_tcstatus();
+       tcstatus &= ~TCSTATUS_IXMT;
+       tcstatus |= TCSTATUS_A;
+       write_tc_c0_tcstatus(tcstatus);
+
+       /* Clear the TC halt bit */
+       write_tc_c0_tchalt(0);
+
+       /* Activate the VPE */
+       vpeconf0 = read_vpe_c0_vpeconf0();
+       vpeconf0 |= VPECONF0_VPA;
+       write_vpe_c0_vpeconf0(vpeconf0);
+
+       /* Set the stack & global pointer registers */
+       write_tc_gpr_sp(cfg->sp);
+       write_tc_gpr_gp(cfg->gp);
+
+       /* Leave VPE configuration state */
+       clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+       /* Enable other VPEs to execute */
+       evpe(EVPE_ENABLE);
+}
+
+static void cps_boot_secondary(int cpu, struct task_struct *idle)
+{
+       struct boot_config cfg;
+       unsigned int remote;
+       int err;
+
+       cfg.core = cpu_data[cpu].core;
+       cfg.vpe = cpu_vpe_id(&cpu_data[cpu]);
+       cfg.pc = (unsigned long)&smp_bootstrap;
+       cfg.sp = __KSTK_TOS(idle);
+       cfg.gp = (unsigned long)task_thread_info(idle);
+
+       if (!test_bit(cfg.core, core_power)) {
+               /* Boot a VPE on a powered down core */
+               boot_core(&cfg);
+               return;
+       }
+
+       if (cfg.core != current_cpu_data.core) {
+               /* Boot a VPE on another powered up core */
+               for (remote = 0; remote < NR_CPUS; remote++) {
+                       if (cpu_data[remote].core != cfg.core)
+                               continue;
+                       if (cpu_online(remote))
+                               break;
+               }
+               BUG_ON(remote >= NR_CPUS);
+
+               err = smp_call_function_single(remote, boot_vpe, &cfg, 1);
+               if (err)
+                       panic("Failed to call remote CPU\n");
+               return;
+       }
+
+       BUG_ON(!cpu_has_mipsmt);
+
+       /* Boot a VPE on this core */
+       boot_vpe(&cfg);
+}
+
+static void cps_init_secondary(void)
+{
+       /* Disable MT - we only want to run 1 TC per VPE */
+       if (cpu_has_mipsmt)
+               dmt();
+
+       /* TODO: revisit this assumption once hotplug is implemented */
+       if (cpu_vpe_id(&current_cpu_data) == 0)
+               init_core();
+
+       change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+                                STATUSF_IP6 | STATUSF_IP7);
+}
+
+static void cps_smp_finish(void)
+{
+       write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+       /* If we have an FPU, enroll ourselves in the FPU-full mask */
+       if (cpu_has_fpu)
+               cpu_set(smp_processor_id(), mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+       local_irq_enable();
+}
+
+static void cps_cpus_done(void)
+{
+}
+
+static struct plat_smp_ops cps_smp_ops = {
+       .smp_setup              = cps_smp_setup,
+       .prepare_cpus           = cps_prepare_cpus,
+       .boot_secondary         = cps_boot_secondary,
+       .init_secondary         = cps_init_secondary,
+       .smp_finish             = cps_smp_finish,
+       .send_ipi_single        = gic_send_ipi_single,
+       .send_ipi_mask          = gic_send_ipi_mask,
+       .cpus_done              = cps_cpus_done,
+};
+
+int register_cps_smp_ops(void)
+{
+       if (!mips_cm_present()) {
+               pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
+               return -ENODEV;
+       }
+
+       /* check we have a GIC - we need one for IPIs */
+       if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
+               pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
+               return -ENODEV;
+       }
+
+       register_smp_ops(&cps_smp_ops);
+       return 0;
+}
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
new file mode 100644 (file)
index 0000000..3bb1f92
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * Based on smp-cmp.c:
+ *  Copyright (C) 2007 MIPS Technologies, Inc.
+ *  Author: Chris Dearman (chris@mips.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/printk.h>
+
+#include <asm/gic.h>
+#include <asm/smp-ops.h>
+
+void gic_send_ipi_single(int cpu, unsigned int action)
+{
+       unsigned long flags;
+       unsigned int intr;
+
+       pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
+                smp_processor_id(), __func__, cpu, action, read_c0_status());
+
+       local_irq_save(flags);
+
+       switch (action) {
+       case SMP_CALL_FUNCTION:
+               intr = plat_ipi_call_int_xlate(cpu);
+               break;
+
+       case SMP_RESCHEDULE_YOURSELF:
+               intr = plat_ipi_resched_int_xlate(cpu);
+               break;
+
+       default:
+               BUG();
+       }
+
+       gic_send_ipi(intr);
+       local_irq_restore(flags);
+}
+
+void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+       unsigned int i;
+
+       for_each_cpu(i, mask)
+               gic_send_ipi_single(i, action);
+}
index 0fb8cefc9114b299fd5ed3b73ef3da86abf4da8e..f8e13149604d74529dd746f889bf6a1345f4bf9e 100644 (file)
@@ -113,27 +113,6 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
        write_tc_c0_tchalt(TCHALT_H);
 }
 
-#ifdef CONFIG_IRQ_GIC
-static void mp_send_ipi_single(int cpu, unsigned int action)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       switch (action) {
-       case SMP_CALL_FUNCTION:
-               gic_send_ipi(plat_ipi_call_int_xlate(cpu));
-               break;
-
-       case SMP_RESCHEDULE_YOURSELF:
-               gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
-               break;
-       }
-
-       local_irq_restore(flags);
-}
-#endif
-
 static void vsmp_send_ipi_single(int cpu, unsigned int action)
 {
        int i;
@@ -142,7 +121,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
 
 #ifdef CONFIG_IRQ_GIC
        if (gic_present) {
-               mp_send_ipi_single(cpu, action);
+               gic_send_ipi_single(cpu, action);
                return;
        }
 #endif
@@ -313,3 +292,25 @@ struct plat_smp_ops vsmp_smp_ops = {
        .smp_setup              = vsmp_smp_setup,
        .prepare_cpus           = vsmp_prepare_cpus,
 };
+
+static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
+       unsigned long action_unused, void *data)
+{
+       struct proc_cpuinfo_notifier_args *pcn = data;
+       struct seq_file *m = pcn->m;
+       unsigned long n = pcn->n;
+
+       if (!cpu_has_mipsmt)
+               return NOTIFY_OK;
+
+       seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+
+       return NOTIFY_OK;
+}
+
+static int __init proc_cpuinfo_notifier_init(void)
+{
+       return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
+}
+
+subsys_initcall(proc_cpuinfo_notifier_init);
index c10aa84c9fa9d4d4d66cc6005ee8c82b7bce495e..38635a996cbfd6a2f883cacb68e8950e9a792b25 100644 (file)
@@ -77,3 +77,26 @@ void init_smtc_stats(void)
 
        proc_create("smtc", 0444, NULL, &smtc_proc_fops);
 }
+
+static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
+       unsigned long action_unused, void *data)
+{
+       struct proc_cpuinfo_notifier_args *pcn = data;
+       struct seq_file *m = pcn->m;
+       unsigned long n = pcn->n;
+
+       if (!cpu_has_mipsmt)
+               return NOTIFY_OK;
+
+       seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+       seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
+
+       return NOTIFY_OK;
+}
+
+static int __init proc_cpuinfo_notifier_init(void)
+{
+       return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
+}
+
+subsys_initcall(proc_cpuinfo_notifier_init);
index b242e2c10ea0b6b5c1230579e4a6ad83463fed49..67f2495def1cd18615210c32e2cc111ea66c8fe0 100644 (file)
@@ -197,16 +197,17 @@ static void probe_spram(char *type,
 }
 void spram_config(void)
 {
-       struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int config0;
 
-       switch (c->cputype) {
+       switch (current_cpu_type()) {
        case CPU_24K:
        case CPU_34K:
        case CPU_74K:
        case CPU_1004K:
+       case CPU_1074K:
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
+       case CPU_P5600:
                config0 = read_c0_config();
                /* FIXME: addresses are Malta specific */
                if (config0 & (1<<24)) {
index b79d13f95bf01b5d666e18fa22caf0ff66e0188b..4a4f9dda5658af8297b66981532c01f288609306 100644 (file)
@@ -110,7 +110,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
 
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                __asm__ __volatile__ (
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "       li      %[err], 0                               \n"
                "1:     ll      %[old], (%[addr])                       \n"
                "       move    %[tmp], %[new]                          \n"
@@ -135,7 +135,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
                : "memory");
        } else if (cpu_has_llsc) {
                __asm__ __volatile__ (
-               "       .set    mips3                                   \n"
+               "       .set    arch=r4000                              \n"
                "       li      %[err], 0                               \n"
                "1:     ll      %[old], (%[addr])                       \n"
                "       move    %[tmp], %[new]                          \n"
index e0b499694d180ae1513153b1acbcfbf031ab240a..074e857ced284eb70bab2d8ae31ae496a14984b5 100644 (file)
@@ -10,6 +10,7 @@
  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
  */
 #include <linux/bug.h>
 #include <linux/compiler.h>
@@ -47,6 +48,7 @@
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/module.h>
+#include <asm/msa.h>
 #include <asm/pgtable.h>
 #include <asm/ptrace.h>
 #include <asm/sections.h>
@@ -77,8 +79,10 @@ extern asmlinkage void handle_ri_rdhwr(void);
 extern asmlinkage void handle_cpu(void);
 extern asmlinkage void handle_ov(void);
 extern asmlinkage void handle_tr(void);
+extern asmlinkage void handle_msa_fpe(void);
 extern asmlinkage void handle_fpe(void);
 extern asmlinkage void handle_ftlb(void);
+extern asmlinkage void handle_msa(void);
 extern asmlinkage void handle_mdmx(void);
 extern asmlinkage void handle_watch(void);
 extern asmlinkage void handle_mt(void);
@@ -861,6 +865,11 @@ asmlinkage void do_bp(struct pt_regs *regs)
        enum ctx_state prev_state;
        unsigned long epc;
        u16 instr[2];
+       mm_segment_t seg;
+
+       seg = get_fs();
+       if (!user_mode(regs))
+               set_fs(KERNEL_DS);
 
        prev_state = exception_enter();
        if (get_isa16_mode(regs->cp0_epc)) {
@@ -870,17 +879,19 @@ asmlinkage void do_bp(struct pt_regs *regs)
                        if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
                            (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
                                goto out_sigsegv;
-                   opcode = (instr[0] << 16) | instr[1];
+                       opcode = (instr[0] << 16) | instr[1];
                } else {
-                   /* MIPS16e mode */
-                   if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+                       /* MIPS16e mode */
+                       if (__get_user(instr[0],
+                                      (u16 __user *)msk_isa16_mode(epc)))
                                goto out_sigsegv;
-                   bcode = (instr[0] >> 6) & 0x3f;
-                   do_trap_or_bp(regs, bcode, "Break");
-                   goto out;
+                       bcode = (instr[0] >> 6) & 0x3f;
+                       do_trap_or_bp(regs, bcode, "Break");
+                       goto out;
                }
        } else {
-               if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+               if (__get_user(opcode,
+                              (unsigned int __user *) exception_epc(regs)))
                        goto out_sigsegv;
        }
 
@@ -918,6 +929,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
        do_trap_or_bp(regs, bcode, "Break");
 
 out:
+       set_fs(seg);
        exception_exit(prev_state);
        return;
 
@@ -931,8 +943,13 @@ asmlinkage void do_tr(struct pt_regs *regs)
        u32 opcode, tcode = 0;
        enum ctx_state prev_state;
        u16 instr[2];
+       mm_segment_t seg;
        unsigned long epc = msk_isa16_mode(exception_epc(regs));
 
+       seg = get_fs();
+       if (!user_mode(regs))
+               set_fs(get_ds());
+
        prev_state = exception_enter();
        if (get_isa16_mode(regs->cp0_epc)) {
                if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
@@ -953,6 +970,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
        do_trap_or_bp(regs, tcode, "Trap");
 
 out:
+       set_fs(seg);
        exception_exit(prev_state);
        return;
 
@@ -1074,6 +1092,76 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
        return NOTIFY_OK;
 }
 
+static int enable_restore_fp_context(int msa)
+{
+       int err, was_fpu_owner;
+
+       if (!used_math()) {
+               /* First time FP context user. */
+               err = init_fpu();
+               if (msa && !err)
+                       enable_msa();
+               if (!err)
+                       set_used_math();
+               return err;
+       }
+
+       /*
+        * This task has formerly used the FP context.
+        *
+        * If this thread has no live MSA vector context then we can simply
+        * restore the scalar FP context. If it has live MSA vector context
+        * (that is, it has or may have used MSA since last performing a
+        * function call) then we'll need to restore the vector context. This
+        * applies even if we're currently only executing a scalar FP
+        * instruction. This is because if we were to later execute an MSA
+        * instruction then we'd either have to:
+        *
+        *  - Restore the vector context & clobber any registers modified by
+        *    scalar FP instructions between now & then.
+        *
+        * or
+        *
+        *  - Not restore the vector context & lose the most significant bits
+        *    of all vector registers.
+        *
+        * Neither of those options is acceptable. We cannot restore the least
+        * significant bits of the registers now & only restore the most
+        * significant bits later because the most significant bits of any
+        * vector registers whose aliased FP register is modified now will have
+        * been zeroed. We'd have no way to know that when restoring the vector
+        * context & thus may load an outdated value for the most significant
+        * bits of a vector register.
+        */
+       if (!msa && !thread_msa_context_live())
+               return own_fpu(1);
+
+       /*
+        * This task is using or has previously used MSA. Thus we require
+        * that Status.FR == 1.
+        */
+       was_fpu_owner = is_fpu_owner();
+       err = own_fpu(0);
+       if (err)
+               return err;
+
+       enable_msa();
+       write_msa_csr(current->thread.fpu.msacsr);
+       set_thread_flag(TIF_USEDMSA);
+
+       /*
+        * If this is the first time that the task is using MSA and it has
+        * previously used scalar FP in this time slice then we already nave
+        * FP context which we shouldn't clobber.
+        */
+       if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
+               return 0;
+
+       /* We need to restore the vector context. */
+       restore_msa(current);
+       return 0;
+}
+
 asmlinkage void do_cpu(struct pt_regs *regs)
 {
        enum ctx_state prev_state;
@@ -1153,12 +1241,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
                /* Fall through.  */
 
        case 1:
-               if (used_math())        /* Using the FPU again.  */
-                       err = own_fpu(1);
-               else {                  /* First time FPU user.  */
-                       err = init_fpu();
-                       set_used_math();
-               }
+               err = enable_restore_fp_context(0);
 
                if (!raw_cpu_has_fpu || err) {
                        int sig;
@@ -1183,6 +1266,37 @@ out:
        exception_exit(prev_state);
 }
 
+asmlinkage void do_msa_fpe(struct pt_regs *regs)
+{
+       enum ctx_state prev_state;
+
+       prev_state = exception_enter();
+       die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
+       force_sig(SIGFPE, current);
+       exception_exit(prev_state);
+}
+
+asmlinkage void do_msa(struct pt_regs *regs)
+{
+       enum ctx_state prev_state;
+       int err;
+
+       prev_state = exception_enter();
+
+       if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
+               force_sig(SIGILL, current);
+               goto out;
+       }
+
+       die_if_kernel("do_msa invoked from kernel context!", regs);
+
+       err = enable_restore_fp_context(1);
+       if (err)
+               force_sig(SIGILL, current);
+out:
+       exception_exit(prev_state);
+}
+
 asmlinkage void do_mdmx(struct pt_regs *regs)
 {
        enum ctx_state prev_state;
@@ -1337,8 +1451,10 @@ static inline void parity_protection_init(void)
        case CPU_34K:
        case CPU_74K:
        case CPU_1004K:
+       case CPU_1074K:
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
+       case CPU_P5600:
                {
 #define ERRCTL_PE      0x80000000
 #define ERRCTL_L2P     0x00800000
@@ -2017,6 +2133,7 @@ void __init trap_init(void)
        set_except_vector(11, handle_cpu);
        set_except_vector(12, handle_ov);
        set_except_vector(13, handle_tr);
+       set_except_vector(14, handle_msa_fpe);
 
        if (current_cpu_type() == CPU_R6000 ||
            current_cpu_type() == CPU_R6000A) {
@@ -2040,6 +2157,7 @@ void __init trap_init(void)
                set_except_vector(15, handle_fpe);
 
        set_except_vector(16, handle_ftlb);
+       set_except_vector(21, handle_msa);
        set_except_vector(22, handle_mdmx);
 
        if (cpu_has_mcheck)
index c369a5d355273c49e3d7e35601afa2f86fe4fd70..2b3517214d6d8cbdbe6a7bbc31187e1abcc893a5 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
  * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
  *
  * This file contains exception handler for address error exception with the
  * special capability to execute faulting instructions in software.  The
@@ -110,8 +111,8 @@ extern void show_registers(struct pt_regs *regs);
 #ifdef __BIG_ENDIAN
 #define     LoadHW(addr, value, res)  \
                __asm__ __volatile__ (".set\tnoat\n"        \
-                       "1:\tlb\t%0, 0(%2)\n"               \
-                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "1:\t"user_lb("%0", "0(%2)")"\n"    \
+                       "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -130,8 +131,8 @@ extern void show_registers(struct pt_regs *regs);
 
 #define     LoadW(addr, value, res)   \
                __asm__ __volatile__ (                      \
-                       "1:\tlwl\t%0, (%2)\n"               \
-                       "2:\tlwr\t%0, 3(%2)\n\t"            \
+                       "1:\t"user_lwl("%0", "(%2)")"\n"    \
+                       "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
                        "li\t%1, 0\n"                       \
                        "3:\n\t"                            \
                        ".insn\n\t"                         \
@@ -149,8 +150,8 @@ extern void show_registers(struct pt_regs *regs);
 #define     LoadHWU(addr, value, res) \
                __asm__ __volatile__ (                      \
                        ".set\tnoat\n"                      \
-                       "1:\tlbu\t%0, 0(%2)\n"              \
-                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "1:\t"user_lbu("%0", "0(%2)")"\n"   \
+                       "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -170,8 +171,8 @@ extern void show_registers(struct pt_regs *regs);
 
 #define     LoadWU(addr, value, res)  \
                __asm__ __volatile__ (                      \
-                       "1:\tlwl\t%0, (%2)\n"               \
-                       "2:\tlwr\t%0, 3(%2)\n\t"            \
+                       "1:\t"user_lwl("%0", "(%2)")"\n"    \
+                       "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
                        "dsll\t%0, %0, 32\n\t"              \
                        "dsrl\t%0, %0, 32\n\t"              \
                        "li\t%1, 0\n"                       \
@@ -209,9 +210,9 @@ extern void show_registers(struct pt_regs *regs);
 #define     StoreHW(addr, value, res) \
                __asm__ __volatile__ (                      \
                        ".set\tnoat\n"                      \
-                       "1:\tsb\t%1, 1(%2)\n\t"             \
+                       "1:\t"user_sb("%1", "1(%2)")"\n"    \
                        "srl\t$1, %1, 0x8\n"                \
-                       "2:\tsb\t$1, 0(%2)\n\t"             \
+                       "2:\t"user_sb("$1", "0(%2)")"\n"    \
                        ".set\tat\n\t"                      \
                        "li\t%0, 0\n"                       \
                        "3:\n\t"                            \
@@ -229,8 +230,8 @@ extern void show_registers(struct pt_regs *regs);
 
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
-                       "1:\tswl\t%1,(%2)\n"                \
-                       "2:\tswr\t%1, 3(%2)\n\t"            \
+                       "1:\t"user_swl("%1", "(%2)")"\n"    \
+                       "2:\t"user_swr("%1", "3(%2)")"\n\t" \
                        "li\t%0, 0\n"                       \
                        "3:\n\t"                            \
                        ".insn\n\t"                         \
@@ -267,8 +268,8 @@ extern void show_registers(struct pt_regs *regs);
 #ifdef __LITTLE_ENDIAN
 #define     LoadHW(addr, value, res)  \
                __asm__ __volatile__ (".set\tnoat\n"        \
-                       "1:\tlb\t%0, 1(%2)\n"               \
-                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "1:\t"user_lb("%0", "1(%2)")"\n"    \
+                       "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -287,8 +288,8 @@ extern void show_registers(struct pt_regs *regs);
 
 #define     LoadW(addr, value, res)   \
                __asm__ __volatile__ (                      \
-                       "1:\tlwl\t%0, 3(%2)\n"              \
-                       "2:\tlwr\t%0, (%2)\n\t"             \
+                       "1:\t"user_lwl("%0", "3(%2)")"\n"   \
+                       "2:\t"user_lwr("%0", "(%2)")"\n\t"  \
                        "li\t%1, 0\n"                       \
                        "3:\n\t"                            \
                        ".insn\n\t"                         \
@@ -306,8 +307,8 @@ extern void show_registers(struct pt_regs *regs);
 #define     LoadHWU(addr, value, res) \
                __asm__ __volatile__ (                      \
                        ".set\tnoat\n"                      \
-                       "1:\tlbu\t%0, 1(%2)\n"              \
-                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "1:\t"user_lbu("%0", "1(%2)")"\n"   \
+                       "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
                        "sll\t%0, 0x8\n\t"                  \
                        "or\t%0, $1\n\t"                    \
                        "li\t%1, 0\n"                       \
@@ -327,8 +328,8 @@ extern void show_registers(struct pt_regs *regs);
 
 #define     LoadWU(addr, value, res)  \
                __asm__ __volatile__ (                      \
-                       "1:\tlwl\t%0, 3(%2)\n"              \
-                       "2:\tlwr\t%0, (%2)\n\t"             \
+                       "1:\t"user_lwl("%0", "3(%2)")"\n"   \
+                       "2:\t"user_lwr("%0", "(%2)")"\n\t"  \
                        "dsll\t%0, %0, 32\n\t"              \
                        "dsrl\t%0, %0, 32\n\t"              \
                        "li\t%1, 0\n"                       \
@@ -366,9 +367,9 @@ extern void show_registers(struct pt_regs *regs);
 #define     StoreHW(addr, value, res) \
                __asm__ __volatile__ (                      \
                        ".set\tnoat\n"                      \
-                       "1:\tsb\t%1, 0(%2)\n\t"             \
+                       "1:\t"user_sb("%1", "0(%2)")"\n"    \
                        "srl\t$1,%1, 0x8\n"                 \
-                       "2:\tsb\t$1, 1(%2)\n\t"             \
+                       "2:\t"user_sb("$1", "1(%2)")"\n"    \
                        ".set\tat\n\t"                      \
                        "li\t%0, 0\n"                       \
                        "3:\n\t"                            \
@@ -386,8 +387,8 @@ extern void show_registers(struct pt_regs *regs);
 
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
-                       "1:\tswl\t%1, 3(%2)\n"              \
-                       "2:\tswr\t%1, (%2)\n\t"             \
+                       "1:\t"user_swl("%1", "3(%2)")"\n"   \
+                       "2:\t"user_swr("%1", "(%2)")"\n\t"  \
                        "li\t%0, 0\n"                       \
                        "3:\n\t"                            \
                        ".insn\n\t"                         \
@@ -430,7 +431,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        unsigned long origpc;
        unsigned long orig31;
        void __user *fault_addr = NULL;
-
+#ifdef CONFIG_EVA
+       mm_segment_t seg;
+#endif
        origpc = (unsigned long)pc;
        orig31 = regs->regs[31];
 
@@ -475,6 +478,88 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                 * The remaining opcodes are the ones that are really of
                 * interest.
                 */
+#ifdef CONFIG_EVA
+       case spec3_op:
+               /*
+                * we can land here only from kernel accessing user memory,
+                * so we need to "switch" the address limit to user space, so
+                * address check can work properly.
+                */
+               seg = get_fs();
+               set_fs(USER_DS);
+               switch (insn.spec3_format.func) {
+               case lhe_op:
+                       if (!access_ok(VERIFY_READ, addr, 2)) {
+                               set_fs(seg);
+                               goto sigbus;
+                       }
+                       LoadHW(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+                       }
+                       compute_return_epc(regs);
+                       regs->regs[insn.spec3_format.rt] = value;
+                       break;
+               case lwe_op:
+                       if (!access_ok(VERIFY_READ, addr, 4)) {
+                               set_fs(seg);
+                               goto sigbus;
+                       }
+                               LoadW(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+                       }
+                       compute_return_epc(regs);
+                       regs->regs[insn.spec3_format.rt] = value;
+                       break;
+               case lhue_op:
+                       if (!access_ok(VERIFY_READ, addr, 2)) {
+                               set_fs(seg);
+                               goto sigbus;
+                       }
+                       LoadHWU(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+                       }
+                       compute_return_epc(regs);
+                       regs->regs[insn.spec3_format.rt] = value;
+                       break;
+               case she_op:
+                       if (!access_ok(VERIFY_WRITE, addr, 2)) {
+                               set_fs(seg);
+                               goto sigbus;
+                       }
+                       compute_return_epc(regs);
+                       value = regs->regs[insn.spec3_format.rt];
+                       StoreHW(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+                       }
+                       break;
+               case swe_op:
+                       if (!access_ok(VERIFY_WRITE, addr, 4)) {
+                               set_fs(seg);
+                               goto sigbus;
+                       }
+                       compute_return_epc(regs);
+                       value = regs->regs[insn.spec3_format.rt];
+                       StoreW(addr, value, res);
+                       if (res) {
+                               set_fs(seg);
+                               goto fault;
+                       }
+                       break;
+               default:
+                       set_fs(seg);
+                       goto sigill;
+               }
+               set_fs(seg);
+               break;
+#endif
        case lh_op:
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
index 638c5db122c924dca997725d58ee0fd34b36fe1b..2bcd8391bc93a0e9b0505c9e862038ecfcbb11d8 100644 (file)
@@ -175,7 +175,7 @@ static void pvc_proc_cleanup(void)
        remove_proc_entry("scroll", pvc_display_dir);
        remove_proc_entry(DISPLAY_DIR_NAME, NULL);
 
-       del_timer(&timer);
+       del_timer_sync(&timer);
 }
 
 static int __init pvc_proc_init(void)
index a6adffbb4e5f0a5ccc5ec268c18c718b69c5a8da..2e4825e483882b217046caf4a847a99c3db56afe 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (C) 1998, 1999 Ralf Baechle
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
  */
 #include <linux/errno.h>
 #include <asm/asm.h>
@@ -296,7 +297,7 @@ LEAF(csum_partial)
  * checksum and copy routines based on memcpy.S
  *
  *     csum_partial_copy_nocheck(src, dst, len, sum)
- *     __csum_partial_copy_user(src, dst, len, sum, errp)
+ *     __csum_partial_copy_kernel(src, dst, len, sum, errp)
  *
  * See "Spec" in memcpy.S for details. Unlike __copy_user, all
  * function in this file use the standard calling convention.
@@ -327,20 +328,58 @@ LEAF(csum_partial)
  * These handlers do not need to overwrite any data.
  */
 
-#define EXC(inst_reg,addr,handler)             \
-9:     inst_reg, addr;                         \
-       .section __ex_table,"a";                \
-       PTR     9b, handler;                    \
-       .previous
+/* Instruction type */
+#define LD_INSN 1
+#define ST_INSN 2
+#define LEGACY_MODE 1
+#define EVA_MODE    2
+#define USEROP   1
+#define KERNELOP 2
+
+/*
+ * Wrapper to add an entry in the exception table
+ * in case the insn causes a memory exception.
+ * Arguments:
+ * insn    : Load/store instruction
+ * type    : Instruction type
+ * reg     : Register
+ * addr    : Address
+ * handler : Exception handler
+ */
+#define EXC(insn, type, reg, addr, handler)    \
+       .if \mode == LEGACY_MODE;               \
+9:             insn reg, addr;                 \
+               .section __ex_table,"a";        \
+               PTR     9b, handler;            \
+               .previous;                      \
+       /* This is enabled in EVA mode */       \
+       .else;                                  \
+               /* If loading from user or storing to user */   \
+               .if ((\from == USEROP) && (type == LD_INSN)) || \
+                   ((\to == USEROP) && (type == ST_INSN));     \
+9:                     __BUILD_EVA_INSN(insn##e, reg, addr);   \
+                       .section __ex_table,"a";                \
+                       PTR     9b, handler;                    \
+                       .previous;                              \
+               .else;                                          \
+                       /* EVA without exception */             \
+                       insn reg, addr;                         \
+               .endif;                                         \
+       .endif
+
+#undef LOAD
 
 #ifdef USE_DOUBLE
 
-#define LOAD   ld
-#define LOADL  ldl
-#define LOADR  ldr
-#define STOREL sdl
-#define STORER sdr
-#define STORE  sd
+#define LOADK  ld /* No exception */
+#define LOAD(reg, addr, handler)       EXC(ld, LD_INSN, reg, addr, handler)
+#define LOADBU(reg, addr, handler)     EXC(lbu, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler)      EXC(ldl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler)      EXC(ldr, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler)     EXC(sb, ST_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler)     EXC(sdl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler)     EXC(sdr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler)      EXC(sd, ST_INSN, reg, addr, handler)
 #define ADD    daddu
 #define SUB    dsubu
 #define SRL    dsrl
@@ -352,12 +391,15 @@ LEAF(csum_partial)
 
 #else
 
-#define LOAD   lw
-#define LOADL  lwl
-#define LOADR  lwr
-#define STOREL swl
-#define STORER swr
-#define STORE  sw
+#define LOADK  lw /* No exception */
+#define LOAD(reg, addr, handler)       EXC(lw, LD_INSN, reg, addr, handler)
+#define LOADBU(reg, addr, handler)     EXC(lbu, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler)      EXC(lwl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler)      EXC(lwr, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler)     EXC(sb, ST_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler)     EXC(swl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler)     EXC(swr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler)      EXC(sw, ST_INSN, reg, addr, handler)
 #define ADD    addu
 #define SUB    subu
 #define SRL    srl
@@ -396,14 +438,20 @@ LEAF(csum_partial)
        .set    at=v1
 #endif
 
-LEAF(__csum_partial_copy_user)
+       .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+
        PTR_ADDU        AT, src, len    /* See (1) above. */
+       /* initialize __nocheck if this the first time we execute this
+        * macro
+        */
 #ifdef CONFIG_64BIT
        move    errptr, a4
 #else
        lw      errptr, 16(sp)
 #endif
-FEXPORT(csum_partial_copy_nocheck)
+       .if \__nocheck == 1
+       FEXPORT(csum_partial_copy_nocheck)
+       .endif
        move    sum, zero
        move    odd, zero
        /*
@@ -419,48 +467,48 @@ FEXPORT(csum_partial_copy_nocheck)
         */
        sltu    t2, len, NBYTES
        and     t1, dst, ADDRMASK
-       bnez    t2, .Lcopy_bytes_checklen
+       bnez    t2, .Lcopy_bytes_checklen\@
         and    t0, src, ADDRMASK
        andi    odd, dst, 0x1                   /* odd buffer? */
-       bnez    t1, .Ldst_unaligned
+       bnez    t1, .Ldst_unaligned\@
         nop
-       bnez    t0, .Lsrc_unaligned_dst_aligned
+       bnez    t0, .Lsrc_unaligned_dst_aligned\@
        /*
         * use delay slot for fall-through
         * src and dst are aligned; need to compute rem
         */
-.Lboth_aligned:
+.Lboth_aligned\@:
         SRL    t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
-       beqz    t0, .Lcleanup_both_aligned # len < 8*NBYTES
+       beqz    t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
         nop
        SUB     len, 8*NBYTES           # subtract here for bgez loop
        .align  4
 1:
-EXC(   LOAD    t0, UNIT(0)(src),       .Ll_exc)
-EXC(   LOAD    t1, UNIT(1)(src),       .Ll_exc_copy)
-EXC(   LOAD    t2, UNIT(2)(src),       .Ll_exc_copy)
-EXC(   LOAD    t3, UNIT(3)(src),       .Ll_exc_copy)
-EXC(   LOAD    t4, UNIT(4)(src),       .Ll_exc_copy)
-EXC(   LOAD    t5, UNIT(5)(src),       .Ll_exc_copy)
-EXC(   LOAD    t6, UNIT(6)(src),       .Ll_exc_copy)
-EXC(   LOAD    t7, UNIT(7)(src),       .Ll_exc_copy)
+       LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+       LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+       LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+       LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+       LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
+       LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
+       LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
+       LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
        SUB     len, len, 8*NBYTES
        ADD     src, src, 8*NBYTES
-EXC(   STORE   t0, UNIT(0)(dst),       .Ls_exc)
+       STORE(t0, UNIT(0)(dst), .Ls_exc\@)
        ADDC(sum, t0)
-EXC(   STORE   t1, UNIT(1)(dst),       .Ls_exc)
+       STORE(t1, UNIT(1)(dst), .Ls_exc\@)
        ADDC(sum, t1)
-EXC(   STORE   t2, UNIT(2)(dst),       .Ls_exc)
+       STORE(t2, UNIT(2)(dst), .Ls_exc\@)
        ADDC(sum, t2)
-EXC(   STORE   t3, UNIT(3)(dst),       .Ls_exc)
+       STORE(t3, UNIT(3)(dst), .Ls_exc\@)
        ADDC(sum, t3)
-EXC(   STORE   t4, UNIT(4)(dst),       .Ls_exc)
+       STORE(t4, UNIT(4)(dst), .Ls_exc\@)
        ADDC(sum, t4)
-EXC(   STORE   t5, UNIT(5)(dst),       .Ls_exc)
+       STORE(t5, UNIT(5)(dst), .Ls_exc\@)
        ADDC(sum, t5)
-EXC(   STORE   t6, UNIT(6)(dst),       .Ls_exc)
+       STORE(t6, UNIT(6)(dst), .Ls_exc\@)
        ADDC(sum, t6)
-EXC(   STORE   t7, UNIT(7)(dst),       .Ls_exc)
+       STORE(t7, UNIT(7)(dst), .Ls_exc\@)
        ADDC(sum, t7)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, 8*NBYTES
@@ -471,44 +519,44 @@ EXC(      STORE   t7, UNIT(7)(dst),       .Ls_exc)
        /*
         * len == the number of bytes left to copy < 8*NBYTES
         */
-.Lcleanup_both_aligned:
+.Lcleanup_both_aligned\@:
 #define rem t7
-       beqz    len, .Ldone
+       beqz    len, .Ldone\@
         sltu   t0, len, 4*NBYTES
-       bnez    t0, .Lless_than_4units
+       bnez    t0, .Lless_than_4units\@
         and    rem, len, (NBYTES-1)    # rem = len % NBYTES
        /*
         * len >= 4*NBYTES
         */
-EXC(   LOAD    t0, UNIT(0)(src),       .Ll_exc)
-EXC(   LOAD    t1, UNIT(1)(src),       .Ll_exc_copy)
-EXC(   LOAD    t2, UNIT(2)(src),       .Ll_exc_copy)
-EXC(   LOAD    t3, UNIT(3)(src),       .Ll_exc_copy)
+       LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+       LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+       LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+       LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
        SUB     len, len, 4*NBYTES
        ADD     src, src, 4*NBYTES
-EXC(   STORE   t0, UNIT(0)(dst),       .Ls_exc)
+       STORE(t0, UNIT(0)(dst), .Ls_exc\@)
        ADDC(sum, t0)
-EXC(   STORE   t1, UNIT(1)(dst),       .Ls_exc)
+       STORE(t1, UNIT(1)(dst), .Ls_exc\@)
        ADDC(sum, t1)
-EXC(   STORE   t2, UNIT(2)(dst),       .Ls_exc)
+       STORE(t2, UNIT(2)(dst), .Ls_exc\@)
        ADDC(sum, t2)
-EXC(   STORE   t3, UNIT(3)(dst),       .Ls_exc)
+       STORE(t3, UNIT(3)(dst), .Ls_exc\@)
        ADDC(sum, t3)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, 4*NBYTES
-       beqz    len, .Ldone
+       beqz    len, .Ldone\@
        .set    noreorder
-.Lless_than_4units:
+.Lless_than_4units\@:
        /*
         * rem = len % NBYTES
         */
-       beq     rem, len, .Lcopy_bytes
+       beq     rem, len, .Lcopy_bytes\@
         nop
 1:
-EXC(   LOAD    t0, 0(src),             .Ll_exc)
+       LOAD(t0, 0(src), .Ll_exc\@)
        ADD     src, src, NBYTES
        SUB     len, len, NBYTES
-EXC(   STORE   t0, 0(dst),             .Ls_exc)
+       STORE(t0, 0(dst), .Ls_exc\@)
        ADDC(sum, t0)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, NBYTES
@@ -527,20 +575,20 @@ EXC(      STORE   t0, 0(dst),             .Ls_exc)
         * more instruction-level parallelism.
         */
 #define bits t2
-       beqz    len, .Ldone
+       beqz    len, .Ldone\@
         ADD    t1, dst, len    # t1 is just past last byte of dst
        li      bits, 8*NBYTES
        SLL     rem, len, 3     # rem = number of bits to keep
-EXC(   LOAD    t0, 0(src),             .Ll_exc)
+       LOAD(t0, 0(src), .Ll_exc\@)
        SUB     bits, bits, rem # bits = number of bits to discard
        SHIFT_DISCARD t0, t0, bits
-EXC(   STREST  t0, -1(t1),             .Ls_exc)
+       STREST(t0, -1(t1), .Ls_exc\@)
        SHIFT_DISCARD_REVERT t0, t0, bits
        .set reorder
        ADDC(sum, t0)
-       b       .Ldone
+       b       .Ldone\@
        .set noreorder
-.Ldst_unaligned:
+.Ldst_unaligned\@:
        /*
         * dst is unaligned
         * t0 = src & ADDRMASK
@@ -551,25 +599,25 @@ EXC(      STREST  t0, -1(t1),             .Ls_exc)
         * Set match = (src and dst have same alignment)
         */
 #define match rem
-EXC(   LDFIRST t3, FIRST(0)(src),      .Ll_exc)
+       LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
        ADD     t2, zero, NBYTES
-EXC(   LDREST  t3, REST(0)(src),       .Ll_exc_copy)
+       LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
        SUB     t2, t2, t1      # t2 = number of bytes copied
        xor     match, t0, t1
-EXC(   STFIRST t3, FIRST(0)(dst),      .Ls_exc)
+       STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
        SLL     t4, t1, 3               # t4 = number of bits to discard
        SHIFT_DISCARD t3, t3, t4
        /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
        ADDC(sum, t3)
-       beq     len, t2, .Ldone
+       beq     len, t2, .Ldone\@
         SUB    len, len, t2
        ADD     dst, dst, t2
-       beqz    match, .Lboth_aligned
+       beqz    match, .Lboth_aligned\@
         ADD    src, src, t2
 
-.Lsrc_unaligned_dst_aligned:
+.Lsrc_unaligned_dst_aligned\@:
        SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
-       beqz    t0, .Lcleanup_src_unaligned
+       beqz    t0, .Lcleanup_src_unaligned\@
         and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
 1:
 /*
@@ -578,53 +626,53 @@ EXC(      STFIRST t3, FIRST(0)(dst),      .Ls_exc)
  * It's OK to load FIRST(N+1) before REST(N) because the two addresses
  * are to the same unit (unless src is aligned, but it's not).
  */
-EXC(   LDFIRST t0, FIRST(0)(src),      .Ll_exc)
-EXC(   LDFIRST t1, FIRST(1)(src),      .Ll_exc_copy)
+       LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+       LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
        SUB     len, len, 4*NBYTES
-EXC(   LDREST  t0, REST(0)(src),       .Ll_exc_copy)
-EXC(   LDREST  t1, REST(1)(src),       .Ll_exc_copy)
-EXC(   LDFIRST t2, FIRST(2)(src),      .Ll_exc_copy)
-EXC(   LDFIRST t3, FIRST(3)(src),      .Ll_exc_copy)
-EXC(   LDREST  t2, REST(2)(src),       .Ll_exc_copy)
-EXC(   LDREST  t3, REST(3)(src),       .Ll_exc_copy)
+       LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+       LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
+       LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
+       LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
+       LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
+       LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
        ADD     src, src, 4*NBYTES
 #ifdef CONFIG_CPU_SB1
        nop                             # improves slotting
 #endif
-EXC(   STORE   t0, UNIT(0)(dst),       .Ls_exc)
+       STORE(t0, UNIT(0)(dst), .Ls_exc\@)
        ADDC(sum, t0)
-EXC(   STORE   t1, UNIT(1)(dst),       .Ls_exc)
+       STORE(t1, UNIT(1)(dst), .Ls_exc\@)
        ADDC(sum, t1)
-EXC(   STORE   t2, UNIT(2)(dst),       .Ls_exc)
+       STORE(t2, UNIT(2)(dst), .Ls_exc\@)
        ADDC(sum, t2)
-EXC(   STORE   t3, UNIT(3)(dst),       .Ls_exc)
+       STORE(t3, UNIT(3)(dst), .Ls_exc\@)
        ADDC(sum, t3)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, 4*NBYTES
        bne     len, rem, 1b
        .set    noreorder
 
-.Lcleanup_src_unaligned:
-       beqz    len, .Ldone
+.Lcleanup_src_unaligned\@:
+       beqz    len, .Ldone\@
         and    rem, len, NBYTES-1  # rem = len % NBYTES
-       beq     rem, len, .Lcopy_bytes
+       beq     rem, len, .Lcopy_bytes\@
         nop
 1:
-EXC(   LDFIRST t0, FIRST(0)(src),      .Ll_exc)
-EXC(   LDREST  t0, REST(0)(src),       .Ll_exc_copy)
+       LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+       LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
        ADD     src, src, NBYTES
        SUB     len, len, NBYTES
-EXC(   STORE   t0, 0(dst),             .Ls_exc)
+       STORE(t0, 0(dst), .Ls_exc\@)
        ADDC(sum, t0)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, NBYTES
        bne     len, rem, 1b
        .set    noreorder
 
-.Lcopy_bytes_checklen:
-       beqz    len, .Ldone
+.Lcopy_bytes_checklen\@:
+       beqz    len, .Ldone\@
         nop
-.Lcopy_bytes:
+.Lcopy_bytes\@:
        /* 0 < len < NBYTES  */
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 #define SHIFT_START 0
@@ -637,12 +685,12 @@ EXC(      STORE   t0, 0(dst),             .Ls_exc)
        li      t3, SHIFT_START # shift
 /* use .Ll_exc_copy here to return correct sum on fault */
 #define COPY_BYTE(N)                   \
-EXC(   lbu     t0, N(src), .Ll_exc_copy);      \
+       LOADBU(t0, N(src), .Ll_exc_copy\@);     \
        SUB     len, len, 1;            \
-EXC(   sb      t0, N(dst), .Ls_exc);   \
+       STOREB(t0, N(dst), .Ls_exc\@);  \
        SLLV    t0, t0, t3;             \
        addu    t3, SHIFT_INC;          \
-       beqz    len, .Lcopy_bytes_done; \
+       beqz    len, .Lcopy_bytes_done\@; \
         or     t2, t0
 
        COPY_BYTE(0)
@@ -653,14 +701,14 @@ EXC(      sb      t0, N(dst), .Ls_exc);   \
        COPY_BYTE(4)
        COPY_BYTE(5)
 #endif
-EXC(   lbu     t0, NBYTES-2(src), .Ll_exc_copy)
+       LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
        SUB     len, len, 1
-EXC(   sb      t0, NBYTES-2(dst), .Ls_exc)
+       STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
        SLLV    t0, t0, t3
        or      t2, t0
-.Lcopy_bytes_done:
+.Lcopy_bytes_done\@:
        ADDC(sum, t2)
-.Ldone:
+.Ldone\@:
        /* fold checksum */
 #ifdef USE_DOUBLE
        dsll32  v1, sum, 0
@@ -689,7 +737,7 @@ EXC(        sb      t0, NBYTES-2(dst), .Ls_exc)
        jr      ra
        .set noreorder
 
-.Ll_exc_copy:
+.Ll_exc_copy\@:
        /*
         * Copy bytes from src until faulting load address (or until a
         * lb faults)
@@ -700,11 +748,11 @@ EXC(      sb      t0, NBYTES-2(dst), .Ls_exc)
         *
         * Assumes src < THREAD_BUADDR($28)
         */
-       LOAD    t0, TI_TASK($28)
+       LOADK   t0, TI_TASK($28)
         li     t2, SHIFT_START
-       LOAD    t0, THREAD_BUADDR(t0)
+       LOADK   t0, THREAD_BUADDR(t0)
 1:
-EXC(   lbu     t1, 0(src),     .Ll_exc)
+       LOADBU(t1, 0(src), .Ll_exc\@)
        ADD     src, src, 1
        sb      t1, 0(dst)      # can't fault -- we're copy_from_user
        SLLV    t1, t1, t2
@@ -714,10 +762,10 @@ EXC(      lbu     t1, 0(src),     .Ll_exc)
        ADD     dst, dst, 1
        bne     src, t0, 1b
        .set    noreorder
-.Ll_exc:
-       LOAD    t0, TI_TASK($28)
+.Ll_exc\@:
+       LOADK   t0, TI_TASK($28)
         nop
-       LOAD    t0, THREAD_BUADDR(t0)   # t0 is just past last good address
+       LOADK   t0, THREAD_BUADDR(t0)   # t0 is just past last good address
         nop
        SUB     len, AT, t0             # len number of uncopied bytes
        /*
@@ -733,7 +781,7 @@ EXC(        lbu     t1, 0(src),     .Ll_exc)
         */
        .set    reorder                         /* DADDI_WAR */
        SUB     src, len, 1
-       beqz    len, .Ldone
+       beqz    len, .Ldone\@
        .set    noreorder
 1:     sb      zero, 0(dst)
        ADD     dst, dst, 1
@@ -748,13 +796,31 @@ EXC(      lbu     t1, 0(src),     .Ll_exc)
         SUB    src, src, v1
 #endif
        li      v1, -EFAULT
-       b       .Ldone
+       b       .Ldone\@
         sw     v1, (errptr)
 
-.Ls_exc:
+.Ls_exc\@:
        li      v0, -1 /* invalid checksum */
        li      v1, -EFAULT
        jr      ra
         sw     v1, (errptr)
        .set    pop
-       END(__csum_partial_copy_user)
+       .endm
+
+LEAF(__csum_partial_copy_kernel)
+#ifndef CONFIG_EVA
+FEXPORT(__csum_partial_copy_to_user)
+FEXPORT(__csum_partial_copy_from_user)
+#endif
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
+END(__csum_partial_copy_kernel)
+
+#ifdef CONFIG_EVA
+LEAF(__csum_partial_copy_to_user)
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+END(__csum_partial_copy_to_user)
+
+LEAF(__csum_partial_copy_from_user)
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+END(__csum_partial_copy_from_user)
+#endif
index c5c40dad0bbf6791d13da727b7262063b4511de0..c17ef80cf65ab0f67946b515c24dab351f684acc 100644 (file)
@@ -10,6 +10,7 @@
  * Copyright (C) 2002 Broadcom, Inc.
  *   memcpy/copy_user author: Mark Vandevoorde
  * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
  *
  * Mnemonic names for arguments to memcpy/__copy_user
  */
  * they're not protected.
  */
 
-#define EXC(inst_reg,addr,handler)             \
-9:     inst_reg, addr;                         \
-       .section __ex_table,"a";                \
-       PTR     9b, handler;                    \
-       .previous
+/* Instruction type */
+#define LD_INSN 1
+#define ST_INSN 2
+/* Pretech type */
+#define SRC_PREFETCH 1
+#define DST_PREFETCH 2
+#define LEGACY_MODE 1
+#define EVA_MODE    2
+#define USEROP   1
+#define KERNELOP 2
+
+/*
+ * Wrapper to add an entry in the exception table
+ * in case the insn causes a memory exception.
+ * Arguments:
+ * insn    : Load/store instruction
+ * type    : Instruction type
+ * reg     : Register
+ * addr    : Address
+ * handler : Exception handler
+ */
+
+#define EXC(insn, type, reg, addr, handler)                    \
+       .if \mode == LEGACY_MODE;                               \
+9:             insn reg, addr;                                 \
+               .section __ex_table,"a";                        \
+               PTR     9b, handler;                            \
+               .previous;                                      \
+       /* This is assembled in EVA mode */                     \
+       .else;                                                  \
+               /* If loading from user or storing to user */   \
+               .if ((\from == USEROP) && (type == LD_INSN)) || \
+                   ((\to == USEROP) && (type == ST_INSN));     \
+9:                     __BUILD_EVA_INSN(insn##e, reg, addr);   \
+                       .section __ex_table,"a";                \
+                       PTR     9b, handler;                    \
+                       .previous;                              \
+               .else;                                          \
+                       /*                                      \
+                        *  Still in EVA, but no need for       \
+                        * exception handler or EVA insn        \
+                        */                                     \
+                       insn reg, addr;                         \
+               .endif;                                         \
+       .endif
 
 /*
  * Only on the 64-bit kernel we can made use of 64-bit registers.
 
 #ifdef USE_DOUBLE
 
-#define LOAD   ld
-#define LOADL  ldl
-#define LOADR  ldr
-#define STOREL sdl
-#define STORER sdr
-#define STORE  sd
+#define LOADK ld /* No exception */
+#define LOAD(reg, addr, handler)       EXC(ld, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler)      EXC(ldl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler)      EXC(ldr, LD_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler)     EXC(sdl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler)     EXC(sdr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler)      EXC(sd, ST_INSN, reg, addr, handler)
 #define ADD    daddu
 #define SUB    dsubu
 #define SRL    dsrl
 
 #else
 
-#define LOAD   lw
-#define LOADL  lwl
-#define LOADR  lwr
-#define STOREL swl
-#define STORER swr
-#define STORE  sw
+#define LOADK lw /* No exception */
+#define LOAD(reg, addr, handler)       EXC(lw, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler)      EXC(lwl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler)      EXC(lwr, LD_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler)     EXC(swl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler)     EXC(swr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler)      EXC(sw, ST_INSN, reg, addr, handler)
 #define ADD    addu
 #define SUB    subu
 #define SRL    srl
 
 #endif /* USE_DOUBLE */
 
+#define LOADB(reg, addr, handler)      EXC(lb, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler)     EXC(sb, ST_INSN, reg, addr, handler)
+
+#define _PREF(hint, addr, type)                                                \
+       .if \mode == LEGACY_MODE;                                       \
+               PREF(hint, addr);                                       \
+       .else;                                                          \
+               .if ((\from == USEROP) && (type == SRC_PREFETCH)) ||    \
+                   ((\to == USEROP) && (type == DST_PREFETCH));        \
+                       /*                                              \
+                        * PREFE has only 9 bits for the offset         \
+                        * compared to PREF which has 16, so it may     \
+                        * need to use the $at register but this        \
+                        * register should remain intact because it's   \
+                        * used later on. Therefore use $v1.            \
+                        */                                             \
+                       .set at=v1;                                     \
+                       PREFE(hint, addr);                              \
+                       .set noat;                                      \
+               .else;                                                  \
+                       PREF(hint, addr);                               \
+               .endif;                                                 \
+       .endif
+
+#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
+#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
+
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 #define LDFIRST LOADR
 #define LDREST LOADL
        .set    at=v1
 #endif
 
-/*
- * t6 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-       b       __copy_user_common
-        li     t6, 1
-       END(__copy_user_inatomic)
-
-/*
- * A combined memcpy/__copy_user
- * __copy_user sets len to 0 for success; else to an upper bound of
- * the number of uncopied bytes.
- * memcpy sets v0 to dst.
- */
        .align  5
-LEAF(memcpy)                                   /* a0=dst a1=src a2=len */
-       move    v0, dst                         /* return value */
-.L__memcpy:
-FEXPORT(__copy_user)
-       li      t6, 0   /* not inatomic */
-__copy_user_common:
+
+       /*
+        * Macro to build the __copy_user common code
+        * Arguements:
+        * mode : LEGACY_MODE or EVA_MODE
+        * from : Source operand. USEROP or KERNELOP
+        * to   : Destination operand. USEROP or KERNELOP
+        */
+       .macro __BUILD_COPY_USER mode, from, to
+
+       /* initialize __memcpy if this the first time we execute this macro */
+       .ifnotdef __memcpy
+       .set __memcpy, 1
+       .hidden __memcpy /* make sure it does not leak */
+       .endif
+
        /*
         * Note: dst & src may be unaligned, len may be 0
         * Temps
@@ -217,94 +283,94 @@ __copy_user_common:
         *
         * If len < NBYTES use byte operations.
         */
-       PREF  0, 0(src) )
-       PREF  1, 0(dst) )
+       PREFS(  0, 0(src) )
+       PREFD(  1, 0(dst) )
        sltu    t2, len, NBYTES
        and     t1, dst, ADDRMASK
-       PREF  0, 1*32(src) )
-       PREF  1, 1*32(dst) )
-       bnez    t2, .Lcopy_bytes_checklen
+       PREFS(  0, 1*32(src) )
+       PREFD(  1, 1*32(dst) )
+       bnez    t2, .Lcopy_bytes_checklen\@
         and    t0, src, ADDRMASK
-       PREF  0, 2*32(src) )
-       PREF  1, 2*32(dst) )
-       bnez    t1, .Ldst_unaligned
+       PREFS(  0, 2*32(src) )
+       PREFD(  1, 2*32(dst) )
+       bnez    t1, .Ldst_unaligned\@
         nop
-       bnez    t0, .Lsrc_unaligned_dst_aligned
+       bnez    t0, .Lsrc_unaligned_dst_aligned\@
        /*
         * use delay slot for fall-through
         * src and dst are aligned; need to compute rem
         */
-.Lboth_aligned:
+.Lboth_aligned\@:
         SRL    t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
-       beqz    t0, .Lcleanup_both_aligned # len < 8*NBYTES
+       beqz    t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
         and    rem, len, (8*NBYTES-1)   # rem = len % (8*NBYTES)
-       PREF  0, 3*32(src) )
-       PREF  1, 3*32(dst) )
+       PREFS(  0, 3*32(src) )
+       PREFD(  1, 3*32(dst) )
        .align  4
 1:
        R10KCBARRIER(0(ra))
-EXC(   LOAD    t0, UNIT(0)(src),       .Ll_exc)
-EXC(   LOAD    t1, UNIT(1)(src),       .Ll_exc_copy)
-EXC(   LOAD    t2, UNIT(2)(src),       .Ll_exc_copy)
-EXC(   LOAD    t3, UNIT(3)(src),       .Ll_exc_copy)
+       LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+       LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+       LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+       LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
        SUB     len, len, 8*NBYTES
-EXC(   LOAD    t4, UNIT(4)(src),       .Ll_exc_copy)
-EXC(   LOAD    t7, UNIT(5)(src),       .Ll_exc_copy)
-EXC(   STORE   t0, UNIT(0)(dst),       .Ls_exc_p8u)
-EXC(   STORE   t1, UNIT(1)(dst),       .Ls_exc_p7u)
-EXC(   LOAD    t0, UNIT(6)(src),       .Ll_exc_copy)
-EXC(   LOAD    t1, UNIT(7)(src),       .Ll_exc_copy)
+       LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
+       LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
+       STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@)
+       STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@)
+       LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
+       LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
        ADD     src, src, 8*NBYTES
        ADD     dst, dst, 8*NBYTES
-EXC(   STORE   t2, UNIT(-6)(dst),      .Ls_exc_p6u)
-EXC(   STORE   t3, UNIT(-5)(dst),      .Ls_exc_p5u)
-EXC(   STORE   t4, UNIT(-4)(dst),      .Ls_exc_p4u)
-EXC(   STORE   t7, UNIT(-3)(dst),      .Ls_exc_p3u)
-EXC(   STORE   t0, UNIT(-2)(dst),      .Ls_exc_p2u)
-EXC(   STORE   t1, UNIT(-1)(dst),      .Ls_exc_p1u)
-       PREF  0, 8*32(src) )
-       PREF  1, 8*32(dst) )
+       STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
+       STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
+       STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
+       STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
+       STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
+       STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
+       PREFS(  0, 8*32(src) )
+       PREFD(  1, 8*32(dst) )
        bne     len, rem, 1b
         nop
 
        /*
         * len == rem == the number of bytes left to copy < 8*NBYTES
         */
-.Lcleanup_both_aligned:
-       beqz    len, .Ldone
+.Lcleanup_both_aligned\@:
+       beqz    len, .Ldone\@
         sltu   t0, len, 4*NBYTES
-       bnez    t0, .Lless_than_4units
+       bnez    t0, .Lless_than_4units\@
         and    rem, len, (NBYTES-1)    # rem = len % NBYTES
        /*
         * len >= 4*NBYTES
         */
-EXC(   LOAD    t0, UNIT(0)(src),       .Ll_exc)
-EXC(   LOAD    t1, UNIT(1)(src),       .Ll_exc_copy)
-EXC(   LOAD    t2, UNIT(2)(src),       .Ll_exc_copy)
-EXC(   LOAD    t3, UNIT(3)(src),       .Ll_exc_copy)
+       LOAD( t0, UNIT(0)(src), .Ll_exc\@)
+       LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@)
+       LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@)
+       LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@)
        SUB     len, len, 4*NBYTES
        ADD     src, src, 4*NBYTES
        R10KCBARRIER(0(ra))
-EXC(   STORE   t0, UNIT(0)(dst),       .Ls_exc_p4u)
-EXC(   STORE   t1, UNIT(1)(dst),       .Ls_exc_p3u)
-EXC(   STORE   t2, UNIT(2)(dst),       .Ls_exc_p2u)
-EXC(   STORE   t3, UNIT(3)(dst),       .Ls_exc_p1u)
+       STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
+       STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
+       STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
+       STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, 4*NBYTES
-       beqz    len, .Ldone
+       beqz    len, .Ldone\@
        .set    noreorder
-.Lless_than_4units:
+.Lless_than_4units\@:
        /*
         * rem = len % NBYTES
         */
-       beq     rem, len, .Lcopy_bytes
+       beq     rem, len, .Lcopy_bytes\@
         nop
 1:
        R10KCBARRIER(0(ra))
-EXC(   LOAD    t0, 0(src),             .Ll_exc)
+       LOAD(t0, 0(src), .Ll_exc\@)
        ADD     src, src, NBYTES
        SUB     len, len, NBYTES
-EXC(   STORE   t0, 0(dst),             .Ls_exc_p1u)
+       STORE(t0, 0(dst), .Ls_exc_p1u\@)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, NBYTES
        bne     rem, len, 1b
@@ -322,17 +388,17 @@ EXC(      STORE   t0, 0(dst),             .Ls_exc_p1u)
         * more instruction-level parallelism.
         */
 #define bits t2
-       beqz    len, .Ldone
+       beqz    len, .Ldone\@
         ADD    t1, dst, len    # t1 is just past last byte of dst
        li      bits, 8*NBYTES
        SLL     rem, len, 3     # rem = number of bits to keep
-EXC(   LOAD    t0, 0(src),             .Ll_exc)
+       LOAD(t0, 0(src), .Ll_exc\@)
        SUB     bits, bits, rem # bits = number of bits to discard
        SHIFT_DISCARD t0, t0, bits
-EXC(   STREST  t0, -1(t1),             .Ls_exc)
+       STREST(t0, -1(t1), .Ls_exc\@)
        jr      ra
         move   len, zero
-.Ldst_unaligned:
+.Ldst_unaligned\@:
        /*
         * dst is unaligned
         * t0 = src & ADDRMASK
@@ -343,25 +409,25 @@ EXC(      STREST  t0, -1(t1),             .Ls_exc)
         * Set match = (src and dst have same alignment)
         */
 #define match rem
-EXC(   LDFIRST t3, FIRST(0)(src),      .Ll_exc)
+       LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
        ADD     t2, zero, NBYTES
-EXC(   LDREST  t3, REST(0)(src),       .Ll_exc_copy)
+       LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
        SUB     t2, t2, t1      # t2 = number of bytes copied
        xor     match, t0, t1
        R10KCBARRIER(0(ra))
-EXC(   STFIRST t3, FIRST(0)(dst),      .Ls_exc)
-       beq     len, t2, .Ldone
+       STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+       beq     len, t2, .Ldone\@
         SUB    len, len, t2
        ADD     dst, dst, t2
-       beqz    match, .Lboth_aligned
+       beqz    match, .Lboth_aligned\@
         ADD    src, src, t2
 
-.Lsrc_unaligned_dst_aligned:
+.Lsrc_unaligned_dst_aligned\@:
        SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
-       PREF  0, 3*32(src) )
-       beqz    t0, .Lcleanup_src_unaligned
+       PREFS(  0, 3*32(src) )
+       beqz    t0, .Lcleanup_src_unaligned\@
         and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
-       PREF  1, 3*32(dst) )
+       PREFD(  1, 3*32(dst) )
 1:
 /*
  * Avoid consecutive LD*'s to the same register since some mips
@@ -370,58 +436,58 @@ EXC(      STFIRST t3, FIRST(0)(dst),      .Ls_exc)
  * are to the same unit (unless src is aligned, but it's not).
  */
        R10KCBARRIER(0(ra))
-EXC(   LDFIRST t0, FIRST(0)(src),      .Ll_exc)
-EXC(   LDFIRST t1, FIRST(1)(src),      .Ll_exc_copy)
+       LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+       LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
        SUB     len, len, 4*NBYTES
-EXC(   LDREST  t0, REST(0)(src),       .Ll_exc_copy)
-EXC(   LDREST  t1, REST(1)(src),       .Ll_exc_copy)
-EXC(   LDFIRST t2, FIRST(2)(src),      .Ll_exc_copy)
-EXC(   LDFIRST t3, FIRST(3)(src),      .Ll_exc_copy)
-EXC(   LDREST  t2, REST(2)(src),       .Ll_exc_copy)
-EXC(   LDREST  t3, REST(3)(src),       .Ll_exc_copy)
-       PREF  0, 9*32(src) )          # 0 is PREF_LOAD  (not streamed)
+       LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+       LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
+       LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
+       LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
+       LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
+       LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+       PREFS(  0, 9*32(src) )          # 0 is PREF_LOAD  (not streamed)
        ADD     src, src, 4*NBYTES
 #ifdef CONFIG_CPU_SB1
        nop                             # improves slotting
 #endif
-EXC(   STORE   t0, UNIT(0)(dst),       .Ls_exc_p4u)
-EXC(   STORE   t1, UNIT(1)(dst),       .Ls_exc_p3u)
-EXC(   STORE   t2, UNIT(2)(dst),       .Ls_exc_p2u)
-EXC(   STORE   t3, UNIT(3)(dst),       .Ls_exc_p1u)
-       PREF  1, 9*32(dst) )          # 1 is PREF_STORE (not streamed)
+       STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
+       STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
+       STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
+       STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
+       PREFD(  1, 9*32(dst) )          # 1 is PREF_STORE (not streamed)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, 4*NBYTES
        bne     len, rem, 1b
        .set    noreorder
 
-.Lcleanup_src_unaligned:
-       beqz    len, .Ldone
+.Lcleanup_src_unaligned\@:
+       beqz    len, .Ldone\@
         and    rem, len, NBYTES-1  # rem = len % NBYTES
-       beq     rem, len, .Lcopy_bytes
+       beq     rem, len, .Lcopy_bytes\@
         nop
 1:
        R10KCBARRIER(0(ra))
-EXC(   LDFIRST t0, FIRST(0)(src),      .Ll_exc)
-EXC(   LDREST  t0, REST(0)(src),       .Ll_exc_copy)
+       LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+       LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
        ADD     src, src, NBYTES
        SUB     len, len, NBYTES
-EXC(   STORE   t0, 0(dst),             .Ls_exc_p1u)
+       STORE(t0, 0(dst), .Ls_exc_p1u\@)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, NBYTES
        bne     len, rem, 1b
        .set    noreorder
 
-.Lcopy_bytes_checklen:
-       beqz    len, .Ldone
+.Lcopy_bytes_checklen\@:
+       beqz    len, .Ldone\@
         nop
-.Lcopy_bytes:
+.Lcopy_bytes\@:
        /* 0 < len < NBYTES  */
        R10KCBARRIER(0(ra))
 #define COPY_BYTE(N)                   \
-EXC(   lb      t0, N(src), .Ll_exc);   \
+       LOADB(t0, N(src), .Ll_exc\@);   \
        SUB     len, len, 1;            \
-       beqz    len, .Ldone;            \
-EXC(    sb     t0, N(dst), .Ls_exc_p1)
+       beqz    len, .Ldone\@;          \
+       STOREB(t0, N(dst), .Ls_exc_p1\@)
 
        COPY_BYTE(0)
        COPY_BYTE(1)
@@ -431,16 +497,19 @@ EXC(       sb     t0, N(dst), .Ls_exc_p1)
        COPY_BYTE(4)
        COPY_BYTE(5)
 #endif
-EXC(   lb      t0, NBYTES-2(src), .Ll_exc)
+       LOADB(t0, NBYTES-2(src), .Ll_exc\@)
        SUB     len, len, 1
        jr      ra
-EXC(    sb     t0, NBYTES-2(dst), .Ls_exc_p1)
-.Ldone:
+       STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
+.Ldone\@:
        jr      ra
-        nop
+       .if __memcpy == 1
        END(memcpy)
+       .set __memcpy, 0
+       .hidden __memcpy
+       .endif
 
-.Ll_exc_copy:
+.Ll_exc_copy\@:
        /*
         * Copy bytes from src until faulting load address (or until a
         * lb faults)
@@ -451,24 +520,24 @@ EXC(       sb     t0, NBYTES-2(dst), .Ls_exc_p1)
         *
         * Assumes src < THREAD_BUADDR($28)
         */
-       LOAD    t0, TI_TASK($28)
+       LOADK   t0, TI_TASK($28)
         nop
-       LOAD    t0, THREAD_BUADDR(t0)
+       LOADK   t0, THREAD_BUADDR(t0)
 1:
-EXC(   lb      t1, 0(src),     .Ll_exc)
+       LOADB(t1, 0(src), .Ll_exc\@)
        ADD     src, src, 1
        sb      t1, 0(dst)      # can't fault -- we're copy_from_user
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, 1
        bne     src, t0, 1b
        .set    noreorder
-.Ll_exc:
-       LOAD    t0, TI_TASK($28)
+.Ll_exc\@:
+       LOADK   t0, TI_TASK($28)
         nop
-       LOAD    t0, THREAD_BUADDR(t0)   # t0 is just past last good address
+       LOADK   t0, THREAD_BUADDR(t0)   # t0 is just past last good address
         nop
        SUB     len, AT, t0             # len number of uncopied bytes
-       bnez    t6, .Ldone      /* Skip the zeroing part if inatomic */
+       bnez    t6, .Ldone\@    /* Skip the zeroing part if inatomic */
        /*
         * Here's where we rely on src and dst being incremented in tandem,
         *   See (3) above.
@@ -482,7 +551,7 @@ EXC(        lb      t1, 0(src),     .Ll_exc)
         */
        .set    reorder                         /* DADDI_WAR */
        SUB     src, len, 1
-       beqz    len, .Ldone
+       beqz    len, .Ldone\@
        .set    noreorder
 1:     sb      zero, 0(dst)
        ADD     dst, dst, 1
@@ -503,7 +572,7 @@ EXC(        lb      t1, 0(src),     .Ll_exc)
 
 #define SEXC(n)                                                        \
        .set    reorder;                        /* DADDI_WAR */ \
-.Ls_exc_p ## n ## u:                                           \
+.Ls_exc_p ## n ## u\@:                                         \
        ADD     len, len, n*NBYTES;                             \
        jr      ra;                                             \
        .set    noreorder
@@ -517,14 +586,15 @@ SEXC(3)
 SEXC(2)
 SEXC(1)
 
-.Ls_exc_p1:
+.Ls_exc_p1\@:
        .set    reorder                         /* DADDI_WAR */
        ADD     len, len, 1
        jr      ra
        .set    noreorder
-.Ls_exc:
+.Ls_exc\@:
        jr      ra
         nop
+       .endm
 
        .align  5
 LEAF(memmove)
@@ -575,3 +645,71 @@ LEAF(__rmemcpy)                                    /* a0=dst a1=src a2=len */
        jr      ra
         move   a2, zero
        END(__rmemcpy)
+
+/*
+ * t6 is used as a flag to note inatomic mode.
+ */
+LEAF(__copy_user_inatomic)
+       b       __copy_user_common
+       li      t6, 1
+       END(__copy_user_inatomic)
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+       .align  5
+LEAF(memcpy)                                   /* a0=dst a1=src a2=len */
+       move    v0, dst                         /* return value */
+.L__memcpy:
+FEXPORT(__copy_user)
+       li      t6, 0   /* not inatomic */
+__copy_user_common:
+       /* Legacy Mode, user <-> user */
+       __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
+
+#ifdef CONFIG_EVA
+
+/*
+ * For EVA we need distinct symbols for reading and writing to user space.
+ * This is because we need to use specific EVA instructions to perform the
+ * virtual <-> physical translation when a virtual address is actually in user
+ * space
+ */
+
+LEAF(__copy_user_inatomic_eva)
+       b       __copy_from_user_common
+       li      t6, 1
+       END(__copy_user_inatomic_eva)
+
+/*
+ * __copy_from_user (EVA)
+ */
+
+LEAF(__copy_from_user_eva)
+       li      t6, 0   /* not inatomic */
+__copy_from_user_common:
+       __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
+END(__copy_from_user_eva)
+
+
+
+/*
+ * __copy_to_user (EVA)
+ */
+
+LEAF(__copy_to_user_eva)
+__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
+END(__copy_to_user_eva)
+
+/*
+ * __copy_in_user (EVA)
+ */
+
+LEAF(__copy_in_user_eva)
+__BUILD_COPY_USER EVA_MODE USEROP USEROP
+END(__copy_in_user_eva)
+
+#endif
index 0580194e7402aa6508c579e2ff925833fd421948..7b0e5462ca517ac2c8411ef35d06f35b7867d73d 100644 (file)
 #define FILLPTRG t0
 #endif
 
+#define LEGACY_MODE 1
+#define EVA_MODE    2
+
+/*
+ * No need to protect it with EVA #ifdefery. The generated block of code
+ * will never be assembled if EVA is not enabled.
+ */
+#define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr)
+#define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr)
+
 #define EX(insn,reg,addr,handler)                      \
-9:     insn    reg, addr;                              \
+       .if \mode == LEGACY_MODE;                       \
+9:             insn    reg, addr;                      \
+       .else;                                          \
+9:             ___BUILD_EVA_INSN(insn, reg, addr);     \
+       .endif;                                         \
        .section __ex_table,"a";                        \
        PTR     9b, handler;                            \
        .previous
 
-       .macro  f_fill64 dst, offset, val, fixup
+       .macro  f_fill64 dst, offset, val, fixup, mode
        EX(LONG_S, \val, (\offset +  0 * STORSIZE)(\dst), \fixup)
        EX(LONG_S, \val, (\offset +  1 * STORSIZE)(\dst), \fixup)
        EX(LONG_S, \val, (\offset +  2 * STORSIZE)(\dst), \fixup)
 #endif
        .endm
 
-/*
- * memset(void *s, int c, size_t n)
- *
- * a0: start of area to clear
- * a1: char to fill with
- * a2: size of area to clear
- */
        .set    noreorder
        .align  5
-LEAF(memset)
-       beqz            a1, 1f
-        move           v0, a0                  /* result */
 
-       andi            a1, 0xff                /* spread fillword */
-       LONG_SLL                t1, a1, 8
-       or              a1, t1
-       LONG_SLL                t1, a1, 16
-#if LONGSIZE == 8
-       or              a1, t1
-       LONG_SLL                t1, a1, 32
-#endif
-       or              a1, t1
-1:
+       /*
+        * Macro to generate the __bzero{,_user} symbol
+        * Arguments:
+        * mode: LEGACY_MODE or EVA_MODE
+        */
+       .macro __BUILD_BZERO mode
+       /* Initialize __memset if this is the first time we call this macro */
+       .ifnotdef __memset
+       .set __memset, 1
+       .hidden __memset /* Make sure it does not leak */
+       .endif
 
-FEXPORT(__bzero)
        sltiu           t0, a2, STORSIZE        /* very small region? */
-       bnez            t0, .Lsmall_memset
-        andi           t0, a0, STORMASK        /* aligned? */
+       bnez            t0, .Lsmall_memset\@
+       andi            t0, a0, STORMASK        /* aligned? */
 
 #ifdef CONFIG_CPU_MICROMIPS
        move            t8, a1                  /* used by 'swp' instruction */
@@ -98,39 +102,39 @@ FEXPORT(__bzero)
 #endif
 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
        beqz            t0, 1f
-        PTR_SUBU       t0, STORSIZE            /* alignment in bytes */
+       PTR_SUBU        t0, STORSIZE            /* alignment in bytes */
 #else
        .set            noat
        li              AT, STORSIZE
        beqz            t0, 1f
-        PTR_SUBU       t0, AT                  /* alignment in bytes */
+       PTR_SUBU        t0, AT                  /* alignment in bytes */
        .set            at
 #endif
 
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
-       EX(LONG_S_L, a1, (a0), .Lfirst_fixup)   /* make word/dword aligned */
+       EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
 #endif
 #ifdef __MIPSEL__
-       EX(LONG_S_R, a1, (a0), .Lfirst_fixup)   /* make word/dword aligned */
+       EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
 #endif
        PTR_SUBU        a0, t0                  /* long align ptr */
        PTR_ADDU        a2, t0                  /* correct size */
 
 1:     ori             t1, a2, 0x3f            /* # of full blocks */
        xori            t1, 0x3f
-       beqz            t1, .Lmemset_partial    /* no block to fill */
-        andi           t0, a2, 0x40-STORSIZE
+       beqz            t1, .Lmemset_partial\@  /* no block to fill */
+       andi            t0, a2, 0x40-STORSIZE
 
        PTR_ADDU        t1, a0                  /* end address */
        .set            reorder
 1:     PTR_ADDIU       a0, 64
        R10KCBARRIER(0(ra))
-       f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
+       f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode
        bne             t1, a0, 1b
        .set            noreorder
 
-.Lmemset_partial:
+.Lmemset_partial\@:
        R10KCBARRIER(0(ra))
        PTR_LA          t1, 2f                  /* where to start */
 #ifdef CONFIG_CPU_MICROMIPS
@@ -145,60 +149,100 @@ FEXPORT(__bzero)
        .set            at
 #endif
        jr              t1
-        PTR_ADDU       a0, t0                  /* dest ptr */
+       PTR_ADDU        a0, t0                  /* dest ptr */
 
        .set            push
        .set            noreorder
        .set            nomacro
-       f_fill64 a0, -64, FILL64RG, .Lpartial_fixup     /* ... but first do longs ... */
+       /* ... but first do longs ... */
+       f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode
 2:     .set            pop
        andi            a2, STORMASK            /* At most one long to go */
 
        beqz            a2, 1f
-        PTR_ADDU       a0, a2                  /* What's left */
+       PTR_ADDU        a0, a2                  /* What's left */
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
-       EX(LONG_S_R, a1, -1(a0), .Llast_fixup)
+       EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
 #endif
 #ifdef __MIPSEL__
-       EX(LONG_S_L, a1, -1(a0), .Llast_fixup)
+       EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
 #endif
 1:     jr              ra
-        move           a2, zero
+       move            a2, zero
 
-.Lsmall_memset:
+.Lsmall_memset\@:
        beqz            a2, 2f
-        PTR_ADDU       t1, a0, a2
+       PTR_ADDU        t1, a0, a2
 
 1:     PTR_ADDIU       a0, 1                   /* fill bytewise */
        R10KCBARRIER(0(ra))
        bne             t1, a0, 1b
-        sb             a1, -1(a0)
+       sb              a1, -1(a0)
 
 2:     jr              ra                      /* done */
-        move           a2, zero
+       move            a2, zero
+       .if __memset == 1
        END(memset)
+       .set __memset, 0
+       .hidden __memset
+       .endif
 
-.Lfirst_fixup:
+.Lfirst_fixup\@:
        jr      ra
-        nop
+       nop
 
-.Lfwd_fixup:
+.Lfwd_fixup\@:
        PTR_L           t0, TI_TASK($28)
        andi            a2, 0x3f
        LONG_L          t0, THREAD_BUADDR(t0)
        LONG_ADDU       a2, t1
        jr              ra
-        LONG_SUBU      a2, t0
+       LONG_SUBU       a2, t0
 
-.Lpartial_fixup:
+.Lpartial_fixup\@:
        PTR_L           t0, TI_TASK($28)
        andi            a2, STORMASK
        LONG_L          t0, THREAD_BUADDR(t0)
        LONG_ADDU       a2, t1
        jr              ra
-        LONG_SUBU      a2, t0
+       LONG_SUBU       a2, t0
 
-.Llast_fixup:
+.Llast_fixup\@:
        jr              ra
-        andi           v1, a2, STORMASK
+       andi            v1, a2, STORMASK
+
+       .endm
+
+/*
+ * memset(void *s, int c, size_t n)
+ *
+ * a0: start of area to clear
+ * a1: char to fill with
+ * a2: size of area to clear
+ */
+
+LEAF(memset)
+       beqz            a1, 1f
+       move            v0, a0                  /* result */
+
+       andi            a1, 0xff                /* spread fillword */
+       LONG_SLL                t1, a1, 8
+       or              a1, t1
+       LONG_SLL                t1, a1, 16
+#if LONGSIZE == 8
+       or              a1, t1
+       LONG_SLL                t1, a1, 32
+#endif
+       or              a1, t1
+1:
+#ifndef CONFIG_EVA
+FEXPORT(__bzero)
+#endif
+       __BUILD_BZERO LEGACY_MODE
+
+#ifdef CONFIG_EVA
+LEAF(__bzero)
+       __BUILD_BZERO EVA_MODE
+END(__bzero)
+#endif
index e362dcdc69d1617486ee5627f055ffc3075b5548..bef65c98df59cb95c0c4a56cfb4a73cbe7690237 100644 (file)
  *
  * Return 0 for error
  */
-LEAF(__strlen_user_asm)
+       .macro __BUILD_STRLEN_ASM func
+LEAF(__strlen_\func\()_asm)
        LONG_L          v0, TI_ADDR_LIMIT($28)  # pointer ok?
        and             v0, a0
-       bnez            v0, .Lfault
+       bnez            v0, .Lfault\@
 
-FEXPORT(__strlen_user_nocheck_asm)
+FEXPORT(__strlen_\func\()_nocheck_asm)
        move            v0, a0
-1:     EX(lbu, v1, (v0), .Lfault)
+.ifeqs "\func", "kernel"
+1:     EX(lbu, v1, (v0), .Lfault\@)
+.else
+1:     EX(lbue, v1, (v0), .Lfault\@)
+.endif
        PTR_ADDIU       v0, 1
        bnez            v1, 1b
        PTR_SUBU        v0, a0
        jr              ra
-       END(__strlen_user_asm)
+       END(__strlen_\func\()_asm)
 
-.Lfault:       move            v0, zero
+.Lfault\@:     move            v0, zero
        jr              ra
+       .endm
+
+#ifndef CONFIG_EVA
+       /* Set aliases */
+       .global __strlen_user_asm
+       .global __strlen_user_nocheck_asm
+       .set __strlen_user_asm, __strlen_kernel_asm
+       .set __strlen_user_nocheck_asm, __strlen_kernel_nocheck_asm
+#endif
+
+__BUILD_STRLEN_ASM kernel
+
+#ifdef CONFIG_EVA
+
+       .set push
+       .set eva
+__BUILD_STRLEN_ASM user
+       .set pop
+#endif
index 92870b6b53eaeee044a0424ef1cdd16870ec445a..d3301cd1e9a51b4c387a7f8c3d4a46e2632761e2 100644 (file)
  * it happens at most some bytes of the exceptions handlers will be copied.
  */
 
-LEAF(__strncpy_from_user_asm)
+       .macro __BUILD_STRNCPY_ASM func
+LEAF(__strncpy_from_\func\()_asm)
        LONG_L          v0, TI_ADDR_LIMIT($28)  # pointer ok?
        and             v0, a1
-       bnez            v0, .Lfault
+       bnez            v0, .Lfault\@
 
-FEXPORT(__strncpy_from_user_nocheck_asm)
+FEXPORT(__strncpy_from_\func\()_nocheck_asm)
        .set            noreorder
        move            t0, zero
        move            v1, a1
-1:     EX(lbu, v0, (v1), .Lfault)
+.ifeqs "\func","kernel"
+1:     EX(lbu, v0, (v1), .Lfault\@)
+.else
+1:     EX(lbue, v0, (v1), .Lfault\@)
+.endif
        PTR_ADDIU       v1, 1
        R10KCBARRIER(0(ra))
        beqz            v0, 2f
@@ -47,15 +52,34 @@ FEXPORT(__strncpy_from_user_nocheck_asm)
         PTR_ADDIU      a0, 1
 2:     PTR_ADDU        v0, a1, t0
        xor             v0, a1
-       bltz            v0, .Lfault
+       bltz            v0, .Lfault\@
         nop
        jr              ra                      # return n
         move           v0, t0
-       END(__strncpy_from_user_asm)
+       END(__strncpy_from_\func\()_asm)
 
-.Lfault: jr            ra
+.Lfault\@: jr          ra
          li            v0, -EFAULT
 
        .section        __ex_table,"a"
-       PTR             1b, .Lfault
+       PTR             1b, .Lfault\@
        .previous
+
+       .endm
+
+#ifndef CONFIG_EVA
+       /* Set aliases */
+       .global __strncpy_from_user_asm
+       .global __strncpy_from_user_nocheck_asm
+       .set __strncpy_from_user_asm, __strncpy_from_kernel_asm
+       .set __strncpy_from_user_nocheck_asm, __strncpy_from_kernel_nocheck_asm
+#endif
+
+__BUILD_STRNCPY_ASM kernel
+
+#ifdef CONFIG_EVA
+       .set push
+       .set eva
+__BUILD_STRNCPY_ASM user
+       .set pop
+#endif
index fcacea5e61f1e685891e8e4af7ec8f701c596fd2..f3af6995e2a6e9546175add0d38d0b555ecd1b61 100644 (file)
  *      bytes.  There's nothing secret there.  On 64-bit accessing beyond
  *      the maximum is a tad hairier ...
  */
-LEAF(__strnlen_user_asm)
+       .macro __BUILD_STRNLEN_ASM func
+LEAF(__strnlen_\func\()_asm)
        LONG_L          v0, TI_ADDR_LIMIT($28)  # pointer ok?
        and             v0, a0
-       bnez            v0, .Lfault
+       bnez            v0, .Lfault\@
 
-FEXPORT(__strnlen_user_nocheck_asm)
+FEXPORT(__strnlen_\func\()_nocheck_asm)
        move            v0, a0
        PTR_ADDU        a1, a0                  # stop pointer
 1:     beq             v0, a1, 1f              # limit reached?
-       EX(lb, t0, (v0), .Lfault)
+.ifeqs "\func", "kernel"
+       EX(lb, t0, (v0), .Lfault\@)
+.else
+       EX(lbe, t0, (v0), .Lfault\@)
+.endif
        PTR_ADDIU       v0, 1
        bnez            t0, 1b
 1:     PTR_SUBU        v0, a0
        jr              ra
-       END(__strnlen_user_asm)
+       END(__strnlen_\func\()_asm)
 
-.Lfault:
+.Lfault\@:
        move            v0, zero
        jr              ra
+       .endm
+
+#ifndef CONFIG_EVA
+       /* Set aliases */
+       .global __strnlen_user_asm
+       .global __strnlen_user_nocheck_asm
+       .set __strnlen_user_asm, __strnlen_kernel_asm
+       .set __strnlen_user_nocheck_asm, __strnlen_kernel_nocheck_asm
+#endif
+
+__BUILD_STRNLEN_ASM kernel
+
+#ifdef CONFIG_EVA
+
+       .set push
+       .set eva
+__BUILD_STRNLEN_ASM user
+       .set pop
+#endif
index 263beb9322a8218a79ffef69a9aba244feab9d03..7397be226a06a2a7d0481fac7b2dd476d4fe6d3a 100644 (file)
@@ -59,6 +59,36 @@ config LEMOTE_MACH2F
 
          These family machines include fuloong2f mini PC, yeeloong2f notebook,
          LingLoong allinone PC and so forth.
+
+config LEMOTE_MACH3A
+       bool "Lemote Loongson 3A family machines"
+       select ARCH_SPARSEMEM_ENABLE
+       select GENERIC_ISA_DMA_SUPPORT_BROKEN
+       select GENERIC_HARDIRQS_NO__DO_IRQ
+       select BOOT_ELF32
+       select BOARD_SCACHE
+       select CSRC_R4K
+       select CEVT_R4K
+       select CPU_HAS_WB
+       select HW_HAS_PCI
+       select ISA
+       select HT_PCI
+       select I8259
+       select IRQ_CPU
+       select NR_CPUS_DEFAULT_4
+       select SYS_HAS_CPU_LOONGSON3
+       select SYS_HAS_EARLY_PRINTK
+       select SYS_SUPPORTS_SMP
+       select SYS_SUPPORTS_HOTPLUG_CPU
+       select SYS_SUPPORTS_64BIT_KERNEL
+       select SYS_SUPPORTS_HIGHMEM
+       select SYS_SUPPORTS_LITTLE_ENDIAN
+       select LOONGSON_MC146818
+       select ZONE_DMA32
+       select LEFI_FIRMWARE_INTERFACE
+       help
+               Lemote Loongson 3A family machines utilize the 3A revision of
+               Loongson processor and RS780/SBX00 chipset.
 endchoice
 
 config CS5536
@@ -86,8 +116,25 @@ config LOONGSON_UART_BASE
        default y
        depends on EARLY_PRINTK || SERIAL_8250
 
+config IOMMU_HELPER
+       bool
+
+config NEED_SG_DMA_LENGTH
+       bool
+
+config SWIOTLB
+       bool "Soft IOMMU Support for All-Memory DMA"
+       default y
+       depends on CPU_LOONGSON3
+       select IOMMU_HELPER
+       select NEED_SG_DMA_LENGTH
+       select NEED_DMA_MAP_STATE
+
 config LOONGSON_MC146818
        bool
        default n
 
+config LEFI_FIRMWARE_INTERFACE
+       bool
+
 endif # MACH_LOONGSON
index 0dc0055754cd0d96d7f94782e72ed1660ebcef8e..7429994e76041c3b603d012067d03eb4d21cc972 100644 (file)
@@ -15,3 +15,9 @@ obj-$(CONFIG_LEMOTE_FULOONG2E)        += fuloong-2e/
 #
 
 obj-$(CONFIG_LEMOTE_MACH2F)  += lemote-2f/
+
+#
+# All Loongson-3 family machines
+#
+
+obj-$(CONFIG_CPU_LOONGSON3)  += loongson-3/
index 29692e5433b19d0d5f00d41bd61f0008242d46eb..6205372b6c2d7c0c4918c9f904c930c41eac9615 100644 (file)
@@ -30,3 +30,4 @@ platform-$(CONFIG_MACH_LOONGSON) += loongson/
 cflags-$(CONFIG_MACH_LOONGSON) += -I$(srctree)/arch/mips/include/asm/mach-loongson -mno-branch-likely
 load-$(CONFIG_LEMOTE_FULOONG2E) += 0xffffffff80100000
 load-$(CONFIG_LEMOTE_MACH2F) += 0xffffffff80200000
+load-$(CONFIG_CPU_LOONGSON3) += 0xffffffff80200000
index 9e4484ccbb036e91c0dc849421d24accc1d06729..0bb9cc9dc621f705dd77b139b0985c01213f9c8e 100644 (file)
@@ -26,3 +26,8 @@ obj-$(CONFIG_CS5536) += cs5536/
 #
 
 obj-$(CONFIG_LOONGSON_SUSPEND) += pm.o
+
+#
+# Big Memory (SWIOTLB) Support
+#
+obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/mips/loongson/common/dma-swiotlb.c b/arch/mips/loongson/common/dma-swiotlb.c
new file mode 100644 (file)
index 0000000..c2be01f
--- /dev/null
@@ -0,0 +1,136 @@
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
+#include <linux/bootmem.h>
+
+#include <asm/bootinfo.h>
+#include <boot_param.h>
+#include <dma-coherence.h>
+
+static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+       void *ret;
+
+       if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+               return ret;
+
+       /* ignore region specifiers */
+       gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ISA
+       if (dev == NULL)
+               gfp |= __GFP_DMA;
+       else
+#endif
+#ifdef CONFIG_ZONE_DMA
+       if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+               gfp |= __GFP_DMA;
+       else
+#endif
+#ifdef CONFIG_ZONE_DMA32
+       if (dev->coherent_dma_mask < DMA_BIT_MASK(40))
+               gfp |= __GFP_DMA32;
+       else
+#endif
+       ;
+       gfp |= __GFP_NORETRY;
+
+       ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+       mb();
+       return ret;
+}
+
+static void loongson_dma_free_coherent(struct device *dev, size_t size,
+               void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+{
+       int order = get_order(size);
+
+       if (dma_release_from_coherent(dev, order, vaddr))
+               return;
+
+       swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
+                               unsigned long offset, size_t size,
+                               enum dma_data_direction dir,
+                               struct dma_attrs *attrs)
+{
+       dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
+                                       dir, attrs);
+       mb();
+       return daddr;
+}
+
+static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
+                               int nents, enum dma_data_direction dir,
+                               struct dma_attrs *attrs)
+{
+       int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL);
+       mb();
+
+       return r;
+}
+
+static void loongson_dma_sync_single_for_device(struct device *dev,
+                               dma_addr_t dma_handle, size_t size,
+                               enum dma_data_direction dir)
+{
+       swiotlb_sync_single_for_device(dev, dma_handle, size, dir);
+       mb();
+}
+
+static void loongson_dma_sync_sg_for_device(struct device *dev,
+                               struct scatterlist *sg, int nents,
+                               enum dma_data_direction dir)
+{
+       swiotlb_sync_sg_for_device(dev, sg, nents, dir);
+       mb();
+}
+
+static int loongson_dma_set_mask(struct device *dev, u64 mask)
+{
+       if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) {
+               *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits);
+               return -EIO;
+       }
+
+       *dev->dma_mask = mask;
+
+       return 0;
+}
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+       return paddr;
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+       return daddr;
+}
+
+static struct dma_map_ops loongson_dma_map_ops = {
+       .alloc = loongson_dma_alloc_coherent,
+       .free = loongson_dma_free_coherent,
+       .map_page = loongson_dma_map_page,
+       .unmap_page = swiotlb_unmap_page,
+       .map_sg = loongson_dma_map_sg,
+       .unmap_sg = swiotlb_unmap_sg_attrs,
+       .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+       .sync_single_for_device = loongson_dma_sync_single_for_device,
+       .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+       .sync_sg_for_device = loongson_dma_sync_sg_for_device,
+       .mapping_error = swiotlb_dma_mapping_error,
+       .dma_supported = swiotlb_dma_supported,
+       .set_dma_mask = loongson_dma_set_mask
+};
+
+void __init plat_swiotlb_setup(void)
+{
+       swiotlb_init(1);
+       mips_dma_map_ops = &loongson_dma_map_ops;
+}
index 0a18fcf2d3723973d87288877bae8785e6c1b2be..0c543eae49bfd059b3b6900062deede511c80bbc 100644 (file)
  * option) any later version.
  */
 #include <linux/module.h>
-
 #include <asm/bootinfo.h>
-
 #include <loongson.h>
+#include <boot_param.h>
 
-unsigned long cpu_clock_freq;
+u32 cpu_clock_freq;
 EXPORT_SYMBOL(cpu_clock_freq);
-unsigned long memsize, highmemsize;
+struct efi_memory_map_loongson *loongson_memmap;
+struct loongson_system_configuration loongson_sysconf;
 
 #define parse_even_earlier(res, option, p)                             \
 do {                                                                   \
        unsigned int tmp __maybe_unused;                                \
                                                                        \
        if (strncmp(option, (char *)p, strlen(option)) == 0)            \
-               tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \
+               tmp = kstrtou32((char *)p + strlen(option"="), 10, &res); \
 } while (0)
 
 void __init prom_init_env(void)
 {
        /* pmon passes arguments in 32bit pointers */
-       int *_prom_envp;
-       unsigned long bus_clock;
        unsigned int processor_id;
+
+#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
+       int *_prom_envp;
        long l;
 
        /* firmware arguments are initialized in head.S */
@@ -48,7 +49,6 @@ void __init prom_init_env(void)
 
        l = (long)*_prom_envp;
        while (l != 0) {
-               parse_even_earlier(bus_clock, "busclock", l);
                parse_even_earlier(cpu_clock_freq, "cpuclock", l);
                parse_even_earlier(memsize, "memsize", l);
                parse_even_earlier(highmemsize, "highmemsize", l);
@@ -57,8 +57,48 @@ void __init prom_init_env(void)
        }
        if (memsize == 0)
                memsize = 256;
-       if (bus_clock == 0)
-               bus_clock = 66000000;
+       pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
+#else
+       struct boot_params *boot_p;
+       struct loongson_params *loongson_p;
+       struct efi_cpuinfo_loongson *ecpu;
+       struct irq_source_routing_table *eirq_source;
+
+       /* firmware arguments are initialized in head.S */
+       boot_p = (struct boot_params *)fw_arg2;
+       loongson_p = &(boot_p->efi.smbios.lp);
+
+       ecpu = (struct efi_cpuinfo_loongson *)
+               ((u64)loongson_p + loongson_p->cpu_offset);
+       eirq_source = (struct irq_source_routing_table *)
+               ((u64)loongson_p + loongson_p->irq_offset);
+       loongson_memmap = (struct efi_memory_map_loongson *)
+               ((u64)loongson_p + loongson_p->memory_offset);
+
+       cpu_clock_freq = ecpu->cpu_clock_freq;
+       loongson_sysconf.cputype = ecpu->cputype;
+       loongson_sysconf.nr_cpus = ecpu->nr_cpus;
+       if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0)
+               loongson_sysconf.nr_cpus = NR_CPUS;
+
+       loongson_sysconf.pci_mem_start_addr = eirq_source->pci_mem_start_addr;
+       loongson_sysconf.pci_mem_end_addr = eirq_source->pci_mem_end_addr;
+       loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr;
+       loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
+       if (loongson_sysconf.dma_mask_bits < 32 ||
+               loongson_sysconf.dma_mask_bits > 64)
+               loongson_sysconf.dma_mask_bits = 32;
+
+       loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
+       loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
+       loongson_sysconf.suspend_addr = boot_p->reset_system.DoSuspend;
+
+       loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
+       loongson_sysconf.vgabios_addr = boot_p->efi.smbios.vga_bios;
+       pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n",
+               loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr,
+               loongson_sysconf.vgabios_addr);
+#endif
        if (cpu_clock_freq == 0) {
                processor_id = (&current_cpu_data)->processor_id;
                switch (processor_id & PRID_REV_MASK) {
@@ -68,12 +108,13 @@ void __init prom_init_env(void)
                case PRID_REV_LOONGSON2F:
                        cpu_clock_freq = 797000000;
                        break;
+               case PRID_REV_LOONGSON3A:
+                       cpu_clock_freq = 900000000;
+                       break;
                default:
                        cpu_clock_freq = 100000000;
                        break;
                }
        }
-
-       pr_info("busclock=%ld, cpuclock=%ld, memsize=%ld, highmemsize=%ld\n",
-               bus_clock, cpu_clock_freq, memsize, highmemsize);
+       pr_info("CpuClock = %u\n", cpu_clock_freq);
 }
index ae7af1fd5d59612b66d3fdff7dd11e6cf54058d6..f37fe5413b7305d17c38bcc057e66686948b53b5 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/bootmem.h>
+#include <asm/smp-ops.h>
 
 #include <loongson.h>
 
@@ -17,10 +18,6 @@ unsigned long __maybe_unused _loongson_addrwincfg_base;
 
 void __init prom_init(void)
 {
-       /* init base address of io space */
-       set_io_port_base((unsigned long)
-               ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
-
 #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
        _loongson_addrwincfg_base = (unsigned long)
                ioremap(LOONGSON_ADDRWINCFG_BASE, LOONGSON_ADDRWINCFG_SIZE);
@@ -28,10 +25,16 @@ void __init prom_init(void)
 
        prom_init_cmdline();
        prom_init_env();
+
+       /* init base address of io space */
+       set_io_port_base((unsigned long)
+               ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
+
        prom_init_memory();
 
        /*init the uart base address */
        prom_init_uart_base();
+       register_smp_ops(&loongson3_smp_ops);
 }
 
 void __init prom_free_prom_memory(void)
index 4becd4f9ef2ea1a8d0e0c0301e5db0fcc40bacb7..1a4797984b8d3f0c1d03681d5c059095d9847125 100644 (file)
@@ -27,6 +27,10 @@ static const char *system_types[] = {
        [MACH_DEXXON_GDIUM2F10]         "dexxon-gdium-2f",
        [MACH_LEMOTE_NAS]               "lemote-nas-2f",
        [MACH_LEMOTE_LL2F]              "lemote-lynloong-2f",
+       [MACH_LEMOTE_A1004]             "lemote-3a-notebook-a1004",
+       [MACH_LEMOTE_A1101]             "lemote-3a-itx-a1101",
+       [MACH_LEMOTE_A1201]             "lemote-2gq-notebook-a1201",
+       [MACH_LEMOTE_A1205]             "lemote-2gq-aio-a1205",
        [MACH_LOONGSON_END]             NULL,
 };
 
index 8626a42f5b948ead7c7e9ffc9518179f0cc29006..b01d52473da81450d9486cf8da36f8b48cee082e 100644 (file)
 #include <asm/bootinfo.h>
 
 #include <loongson.h>
+#include <boot_param.h>
 #include <mem.h>
 #include <pci.h>
 
+#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
+
+u32 memsize, highmemsize;
+
 void __init prom_init_memory(void)
 {
        add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
@@ -49,6 +54,43 @@ void __init prom_init_memory(void)
 #endif /* !CONFIG_64BIT */
 }
 
+#else /* CONFIG_LEFI_FIRMWARE_INTERFACE */
+
+void __init prom_init_memory(void)
+{
+       int i;
+       u32 node_id;
+       u32 mem_type;
+
+       /* parse memory information */
+       for (i = 0; i < loongson_memmap->nr_map; i++) {
+               node_id = loongson_memmap->map[i].node_id;
+               mem_type = loongson_memmap->map[i].mem_type;
+
+               if (node_id == 0) {
+                       switch (mem_type) {
+                       case SYSTEM_RAM_LOW:
+                               add_memory_region(loongson_memmap->map[i].mem_start,
+                                       (u64)loongson_memmap->map[i].mem_size << 20,
+                                       BOOT_MEM_RAM);
+                               break;
+                       case SYSTEM_RAM_HIGH:
+                               add_memory_region(loongson_memmap->map[i].mem_start,
+                                       (u64)loongson_memmap->map[i].mem_size << 20,
+                                       BOOT_MEM_RAM);
+                               break;
+                       case MEM_RESERVED:
+                               add_memory_region(loongson_memmap->map[i].mem_start,
+                                       (u64)loongson_memmap->map[i].mem_size << 20,
+                                       BOOT_MEM_RESERVED);
+                               break;
+                       }
+               }
+       }
+}
+
+#endif /* CONFIG_LEFI_FIRMWARE_INTERFACE */
+
 /* override of arch/mips/mm/cache.c: __uncached_access */
 int __uncached_access(struct file *file, unsigned long addr)
 {
index fa77844597211519300b4f38150b3c6331414913..003ab4e618b38ebe4deaa9799a5b09d14b1f9ac9 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <pci.h>
 #include <loongson.h>
+#include <boot_param.h>
 
 static struct resource loongson_pci_mem_resource = {
        .name   = "pci memory space",
@@ -82,7 +83,10 @@ static int __init pcibios_init(void)
        setup_pcimap();
 
        loongson_pci_controller.io_map_base = mips_io_port_base;
-
+#ifdef CONFIG_LEFI_FIRMWARE_INTERFACE
+       loongson_pci_mem_resource.start = loongson_sysconf.pci_mem_start_addr;
+       loongson_pci_mem_resource.end = loongson_sysconf.pci_mem_end_addr;
+#endif
        register_pci_controller(&loongson_pci_controller);
 
        return 0;
index 65bfbb5d06f442efbd6601600097f94124cc171c..a60715e11306b272bc0402a8f8351a2afec5fc48 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/reboot.h>
 
 #include <loongson.h>
+#include <boot_param.h>
 
 static inline void loongson_reboot(void)
 {
@@ -37,17 +38,37 @@ static inline void loongson_reboot(void)
 
 static void loongson_restart(char *command)
 {
+#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
        /* do preparation for reboot */
        mach_prepare_reboot();
 
        /* reboot via jumping to boot base address */
        loongson_reboot();
+#else
+       void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr;
+
+       fw_restart();
+       while (1) {
+               if (cpu_wait)
+                       cpu_wait();
+       }
+#endif
 }
 
 static void loongson_poweroff(void)
 {
+#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
        mach_prepare_shutdown();
        unreachable();
+#else
+       void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
+
+       fw_poweroff();
+       while (1) {
+               if (cpu_wait)
+                       cpu_wait();
+       }
+#endif
 }
 
 static void loongson_halt(void)
index 5f2b78ae97ccb574123827e7c24e999a09a741aa..bd2b7095b6dce30087c3a629a400b4108ff8404e 100644 (file)
 #include <loongson.h>
 #include <machine.h>
 
-#define PORT(int)                      \
+#define PORT(int, clk)                 \
 {                                                              \
        .irq            = int,                                  \
-       .uartclk        = 1843200,                              \
+       .uartclk        = clk,                                  \
        .iotype         = UPIO_PORT,                            \
        .flags          = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,    \
        .regshift       = 0,                                    \
 }
 
-#define PORT_M(int)                            \
+#define PORT_M(int, clk)                               \
 {                                                              \
        .irq            = MIPS_CPU_IRQ_BASE + (int),            \
-       .uartclk        = 3686400,                              \
+       .uartclk        = clk,                                  \
        .iotype         = UPIO_MEM,                             \
        .membase        = (void __iomem *)NULL,                 \
        .flags          = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,    \
 
 static struct plat_serial8250_port uart8250_data[][2] = {
        [MACH_LOONGSON_UNKNOWN]         {},
-       [MACH_LEMOTE_FL2E]              {PORT(4), {} },
-       [MACH_LEMOTE_FL2F]              {PORT(3), {} },
-       [MACH_LEMOTE_ML2F7]             {PORT_M(3), {} },
-       [MACH_LEMOTE_YL2F89]            {PORT_M(3), {} },
-       [MACH_DEXXON_GDIUM2F10]         {PORT_M(3), {} },
-       [MACH_LEMOTE_NAS]               {PORT_M(3), {} },
-       [MACH_LEMOTE_LL2F]              {PORT(3), {} },
+       [MACH_LEMOTE_FL2E]              {PORT(4, 1843200), {} },
+       [MACH_LEMOTE_FL2F]              {PORT(3, 1843200), {} },
+       [MACH_LEMOTE_ML2F7]             {PORT_M(3, 3686400), {} },
+       [MACH_LEMOTE_YL2F89]            {PORT_M(3, 3686400), {} },
+       [MACH_DEXXON_GDIUM2F10]         {PORT_M(3, 3686400), {} },
+       [MACH_LEMOTE_NAS]               {PORT_M(3, 3686400), {} },
+       [MACH_LEMOTE_LL2F]              {PORT(3, 1843200), {} },
+       [MACH_LEMOTE_A1004]             {PORT_M(2, 33177600), {} },
+       [MACH_LEMOTE_A1101]             {PORT_M(2, 25000000), {} },
+       [MACH_LEMOTE_A1201]             {PORT_M(2, 25000000), {} },
+       [MACH_LEMOTE_A1205]             {PORT_M(2, 25000000), {} },
        [MACH_LOONGSON_END]             {},
 };
 
index 8223f8acfd59b374a02e3b1cd36858f1b47c84b3..bb4ac922e47a900d91d6f3b8ba431b58ae4275cd 100644 (file)
@@ -18,9 +18,6 @@
 #include <linux/screen_info.h>
 #endif
 
-void (*__wbflush)(void);
-EXPORT_SYMBOL(__wbflush);
-
 static void wbflush_loongson(void)
 {
        asm(".set\tpush\n\t"
@@ -32,10 +29,11 @@ static void wbflush_loongson(void)
            ".set mips0\n\t");
 }
 
+void (*__wbflush)(void) = wbflush_loongson;
+EXPORT_SYMBOL(__wbflush);
+
 void __init plat_mem_setup(void)
 {
-       __wbflush = wbflush_loongson;
-
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
        conswitchp = &vga_con;
index e192ad021edc56cba24b8a90e0be2796fedcce84..1e1eeea73fde7d79eb32e9cc983f63eec4053e49 100644 (file)
@@ -35,9 +35,16 @@ void prom_init_loongson_uart_base(void)
        case MACH_DEXXON_GDIUM2F10:
        case MACH_LEMOTE_NAS:
        default:
-               /* The CPU provided serial port */
+               /* The CPU provided serial port (LPC) */
                loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8;
                break;
+       case MACH_LEMOTE_A1004:
+       case MACH_LEMOTE_A1101:
+       case MACH_LEMOTE_A1201:
+       case MACH_LEMOTE_A1205:
+               /* The CPU provided serial port (CPU) */
+               loongson_uart_base = LOONGSON_REG_BASE + 0x1e0;
+               break;
        }
 
        _loongson_uart_base =
diff --git a/arch/mips/loongson/loongson-3/Makefile b/arch/mips/loongson/loongson-3/Makefile
new file mode 100644 (file)
index 0000000..70152b2
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Makefile for Loongson-3 family machines
+#
+obj-y                  += irq.o
+
+obj-$(CONFIG_SMP)      += smp.o
diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
new file mode 100644 (file)
index 0000000..f240828
--- /dev/null
@@ -0,0 +1,126 @@
+#include <loongson.h>
+#include <irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+
+unsigned int ht_irq[] = {1, 3, 4, 5, 6, 7, 8, 12, 14, 15};
+
+static void ht_irqdispatch(void)
+{
+       unsigned int i, irq;
+
+       irq = LOONGSON_HT1_INT_VECTOR(0);
+       LOONGSON_HT1_INT_VECTOR(0) = irq; /* Acknowledge the IRQs */
+
+       for (i = 0; i < ARRAY_SIZE(ht_irq); i++) {
+               if (irq & (0x1 << ht_irq[i]))
+                       do_IRQ(ht_irq[i]);
+       }
+}
+
+void mach_irq_dispatch(unsigned int pending)
+{
+       if (pending & CAUSEF_IP7)
+               do_IRQ(LOONGSON_TIMER_IRQ);
+#if defined(CONFIG_SMP)
+       else if (pending & CAUSEF_IP6)
+               loongson3_ipi_interrupt(NULL);
+#endif
+       else if (pending & CAUSEF_IP3)
+               ht_irqdispatch();
+       else if (pending & CAUSEF_IP2)
+               do_IRQ(LOONGSON_UART_IRQ);
+       else {
+               pr_err("%s : spurious interrupt\n", __func__);
+               spurious_interrupt();
+       }
+}
+
+static struct irqaction cascade_irqaction = {
+       .handler = no_action,
+       .name = "cascade",
+};
+
+static inline void mask_loongson_irq(struct irq_data *d)
+{
+       clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
+       irq_disable_hazard();
+
+       /* Workaround: UART IRQ may deliver to any core */
+       if (d->irq == LOONGSON_UART_IRQ) {
+               int cpu = smp_processor_id();
+
+               LOONGSON_INT_ROUTER_INTENCLR = 1 << 10;
+               LOONGSON_INT_ROUTER_LPC = 0x10 + (1<<cpu);
+       }
+}
+
+static inline void unmask_loongson_irq(struct irq_data *d)
+{
+       /* Workaround: UART IRQ may deliver to any core */
+       if (d->irq == LOONGSON_UART_IRQ) {
+               int cpu = smp_processor_id();
+
+               LOONGSON_INT_ROUTER_INTENSET = 1 << 10;
+               LOONGSON_INT_ROUTER_LPC = 0x10 + (1<<cpu);
+       }
+
+       set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
+       irq_enable_hazard();
+}
+
+ /* For MIPS IRQs which shared by all cores */
+static struct irq_chip loongson_irq_chip = {
+       .name           = "Loongson",
+       .irq_ack        = mask_loongson_irq,
+       .irq_mask       = mask_loongson_irq,
+       .irq_mask_ack   = mask_loongson_irq,
+       .irq_unmask     = unmask_loongson_irq,
+       .irq_eoi        = unmask_loongson_irq,
+};
+
+void irq_router_init(void)
+{
+       int i;
+
+       /* route LPC int to cpu core0 int 0 */
+       LOONGSON_INT_ROUTER_LPC = LOONGSON_INT_CORE0_INT0;
+       /* route HT1 int0 ~ int7 to cpu core0 INT1*/
+       for (i = 0; i < 8; i++)
+               LOONGSON_INT_ROUTER_HT1(i) = LOONGSON_INT_CORE0_INT1;
+       /* enable HT1 interrupt */
+       LOONGSON_HT1_INTN_EN(0) = 0xffffffff;
+       /* enable router interrupt intenset */
+       LOONGSON_INT_ROUTER_INTENSET =
+               LOONGSON_INT_ROUTER_INTEN | (0xffff << 16) | 0x1 << 10;
+}
+
+void __init mach_init_irq(void)
+{
+       clear_c0_status(ST0_IM | ST0_BEV);
+
+       irq_router_init();
+       mips_cpu_irq_init();
+       init_i8259_irqs();
+       irq_set_chip_and_handler(LOONGSON_UART_IRQ,
+                       &loongson_irq_chip, handle_level_irq);
+
+       /* setup HT1 irq */
+       setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
+
+       set_c0_status(STATUSF_IP2 | STATUSF_IP6);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void fixup_irqs(void)
+{
+       irq_cpu_offline();
+       clear_c0_status(ST0_IM);
+}
+
+#endif
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c
new file mode 100644 (file)
index 0000000..c665fe1
--- /dev/null
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2010, 2011, 2012, Lemote, Inc.
+ * Author: Chen Huacai, chenhc@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+#include <asm/clock.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <loongson.h>
+
+#include "smp.h"
+
+DEFINE_PER_CPU(int, cpu_state);
+DEFINE_PER_CPU(uint32_t, core0_c0count);
+
+/* read a 32bit value from ipi register */
+#define loongson3_ipi_read32(addr) readl(addr)
+/* read a 64bit value from ipi register */
+#define loongson3_ipi_read64(addr) readq(addr)
+/* write a 32bit value to ipi register */
+#define loongson3_ipi_write32(action, addr)    \
+       do {                                    \
+               writel(action, addr);           \
+               __wbflush();                    \
+       } while (0)
+/* write a 64bit value to ipi register */
+#define loongson3_ipi_write64(action, addr)    \
+       do {                                    \
+               writeq(action, addr);           \
+               __wbflush();                    \
+       } while (0)
+
+static void *ipi_set0_regs[] = {
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + SET0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + SET0),
+};
+
+static void *ipi_clear0_regs[] = {
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + CLEAR0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + CLEAR0),
+};
+
+static void *ipi_status0_regs[] = {
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + STATUS0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + STATUS0),
+};
+
+static void *ipi_en0_regs[] = {
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + EN0),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + EN0),
+};
+
+static void *ipi_mailbox_buf[] = {
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + BUF),
+       (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + BUF),
+};
+
+/*
+ * Simple enough, just poke the appropriate ipi register
+ */
+static void loongson3_send_ipi_single(int cpu, unsigned int action)
+{
+       loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu]);
+}
+
+static void
+loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+       unsigned int i;
+
+       for_each_cpu(i, mask)
+               loongson3_ipi_write32((u32)action, ipi_set0_regs[i]);
+}
+
+void loongson3_ipi_interrupt(struct pt_regs *regs)
+{
+       int i, cpu = smp_processor_id();
+       unsigned int action, c0count;
+
+       /* Load the ipi register to figure out what we're supposed to do */
+       action = loongson3_ipi_read32(ipi_status0_regs[cpu]);
+
+       /* Clear the ipi register to clear the interrupt */
+       loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu]);
+
+       if (action & SMP_RESCHEDULE_YOURSELF)
+               scheduler_ipi();
+
+       if (action & SMP_CALL_FUNCTION)
+               smp_call_function_interrupt();
+
+       if (action & SMP_ASK_C0COUNT) {
+               BUG_ON(cpu != 0);
+               c0count = read_c0_count();
+               for (i = 1; i < loongson_sysconf.nr_cpus; i++)
+                       per_cpu(core0_c0count, i) = c0count;
+       }
+}
+
+#define MAX_LOOPS 1111
+/*
+ * SMP init and finish on secondary CPUs
+ */
+static void loongson3_init_secondary(void)
+{
+       int i;
+       uint32_t initcount;
+       unsigned int cpu = smp_processor_id();
+       unsigned int imask = STATUSF_IP7 | STATUSF_IP6 |
+                            STATUSF_IP3 | STATUSF_IP2;
+
+       /* Set interrupt mask, but don't enable */
+       change_c0_status(ST0_IM, imask);
+
+       for (i = 0; i < loongson_sysconf.nr_cpus; i++)
+               loongson3_ipi_write32(0xffffffff, ipi_en0_regs[i]);
+
+       per_cpu(cpu_state, cpu) = CPU_ONLINE;
+
+       i = 0;
+       __get_cpu_var(core0_c0count) = 0;
+       loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
+       while (!__get_cpu_var(core0_c0count)) {
+               i++;
+               cpu_relax();
+       }
+
+       if (i > MAX_LOOPS)
+               i = MAX_LOOPS;
+       initcount = __get_cpu_var(core0_c0count) + i;
+       write_c0_count(initcount);
+}
+
+static void loongson3_smp_finish(void)
+{
+       write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
+       local_irq_enable();
+       loongson3_ipi_write64(0,
+                       (void *)(ipi_mailbox_buf[smp_processor_id()]+0x0));
+       pr_info("CPU#%d finished, CP0_ST=%x\n",
+                       smp_processor_id(), read_c0_status());
+}
+
+static void __init loongson3_smp_setup(void)
+{
+       int i, num;
+
+       init_cpu_possible(cpu_none_mask);
+       set_cpu_possible(0, true);
+
+       __cpu_number_map[0] = 0;
+       __cpu_logical_map[0] = 0;
+
+       /* For unified kernel, NR_CPUS is the maximum possible value,
+        * loongson_sysconf.nr_cpus is the really present value */
+       for (i = 1, num = 0; i < loongson_sysconf.nr_cpus; i++) {
+               set_cpu_possible(i, true);
+               __cpu_number_map[i] = ++num;
+               __cpu_logical_map[num] = i;
+       }
+       pr_info("Detected %i available secondary CPU(s)\n", num);
+}
+
+static void __init loongson3_prepare_cpus(unsigned int max_cpus)
+{
+       init_cpu_present(cpu_possible_mask);
+       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it runing!
+ */
+static void loongson3_boot_secondary(int cpu, struct task_struct *idle)
+{
+       unsigned long startargs[4];
+
+       pr_info("Booting CPU#%d...\n", cpu);
+
+       /* startargs[] are initial PC, SP and GP for secondary CPU */
+       startargs[0] = (unsigned long)&smp_bootstrap;
+       startargs[1] = (unsigned long)__KSTK_TOS(idle);
+       startargs[2] = (unsigned long)task_thread_info(idle);
+       startargs[3] = 0;
+
+       pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
+                       cpu, startargs[0], startargs[1], startargs[2]);
+
+       loongson3_ipi_write64(startargs[3], (void *)(ipi_mailbox_buf[cpu]+0x18));
+       loongson3_ipi_write64(startargs[2], (void *)(ipi_mailbox_buf[cpu]+0x10));
+       loongson3_ipi_write64(startargs[1], (void *)(ipi_mailbox_buf[cpu]+0x8));
+       loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu]+0x0));
+}
+
+/*
+ * Final cleanup after all secondaries booted
+ */
+static void __init loongson3_cpus_done(void)
+{
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int loongson3_cpu_disable(void)
+{
+       unsigned long flags;
+       unsigned int cpu = smp_processor_id();
+
+       if (cpu == 0)
+               return -EBUSY;
+
+       set_cpu_online(cpu, false);
+       cpu_clear(cpu, cpu_callin_map);
+       local_irq_save(flags);
+       fixup_irqs();
+       local_irq_restore(flags);
+       flush_cache_all();
+       local_flush_tlb_all();
+
+       return 0;
+}
+
+
+static void loongson3_cpu_die(unsigned int cpu)
+{
+       while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+               cpu_relax();
+
+       mb();
+}
+
+/* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and
+ * flush all L1 entries at first. Then, another core (usually Core 0) can
+ * safely disable the clock of the target core. loongson3_play_dead() is
+ * called via CKSEG1 (uncached and unmmaped) */
+static void loongson3_play_dead(int *state_addr)
+{
+       register int val;
+       register long cpuid, core, node, count;
+       register void *addr, *base, *initfunc;
+
+       __asm__ __volatile__(
+               "   .set push                     \n"
+               "   .set noreorder                \n"
+               "   li %[addr], 0x80000000        \n" /* KSEG0 */
+               "1: cache 0, 0(%[addr])           \n" /* flush L1 ICache */
+               "   cache 0, 1(%[addr])           \n"
+               "   cache 0, 2(%[addr])           \n"
+               "   cache 0, 3(%[addr])           \n"
+               "   cache 1, 0(%[addr])           \n" /* flush L1 DCache */
+               "   cache 1, 1(%[addr])           \n"
+               "   cache 1, 2(%[addr])           \n"
+               "   cache 1, 3(%[addr])           \n"
+               "   addiu %[sets], %[sets], -1    \n"
+               "   bnez  %[sets], 1b             \n"
+               "   addiu %[addr], %[addr], 0x20  \n"
+               "   li    %[val], 0x7             \n" /* *state_addr = CPU_DEAD; */
+               "   sw    %[val], (%[state_addr]) \n"
+               "   sync                          \n"
+               "   cache 21, (%[state_addr])     \n" /* flush entry of *state_addr */
+               "   .set pop                      \n"
+               : [addr] "=&r" (addr), [val] "=&r" (val)
+               : [state_addr] "r" (state_addr),
+                 [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
+
+       __asm__ __volatile__(
+               "   .set push                         \n"
+               "   .set noreorder                    \n"
+               "   .set mips64                       \n"
+               "   mfc0  %[cpuid], $15, 1            \n"
+               "   andi  %[cpuid], 0x3ff             \n"
+               "   dli   %[base], 0x900000003ff01000 \n"
+               "   andi  %[core], %[cpuid], 0x3      \n"
+               "   sll   %[core], 8                  \n" /* get core id */
+               "   or    %[base], %[base], %[core]   \n"
+               "   andi  %[node], %[cpuid], 0xc      \n"
+               "   dsll  %[node], 42                 \n" /* get node id */
+               "   or    %[base], %[base], %[node]   \n"
+               "1: li    %[count], 0x100             \n" /* wait for init loop */
+               "2: bnez  %[count], 2b                \n" /* limit mailbox access */
+               "   addiu %[count], -1                \n"
+               "   ld    %[initfunc], 0x20(%[base])  \n" /* get PC via mailbox */
+               "   beqz  %[initfunc], 1b             \n"
+               "   nop                               \n"
+               "   ld    $sp, 0x28(%[base])          \n" /* get SP via mailbox */
+               "   ld    $gp, 0x30(%[base])          \n" /* get GP via mailbox */
+               "   ld    $a1, 0x38(%[base])          \n"
+               "   jr    %[initfunc]                 \n" /* jump to initial PC */
+               "   nop                               \n"
+               "   .set pop                          \n"
+               : [core] "=&r" (core), [node] "=&r" (node),
+                 [base] "=&r" (base), [cpuid] "=&r" (cpuid),
+                 [count] "=&r" (count), [initfunc] "=&r" (initfunc)
+               : /* No Input */
+               : "a1");
+}
+
+void play_dead(void)
+{
+       int *state_addr;
+       unsigned int cpu = smp_processor_id();
+       void (*play_dead_at_ckseg1)(int *);
+
+       idle_task_exit();
+       play_dead_at_ckseg1 =
+               (void *)CKSEG1ADDR((unsigned long)loongson3_play_dead);
+       state_addr = &per_cpu(cpu_state, cpu);
+       mb();
+       play_dead_at_ckseg1(state_addr);
+}
+
+#define CPU_POST_DEAD_FROZEN   (CPU_POST_DEAD | CPU_TASKS_FROZEN)
+static int loongson3_cpu_callback(struct notifier_block *nfb,
+       unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+
+       switch (action) {
+       case CPU_POST_DEAD:
+       case CPU_POST_DEAD_FROZEN:
+               pr_info("Disable clock for CPU#%d\n", cpu);
+               LOONGSON_CHIPCFG0 &= ~(1 << (12 + cpu));
+               break;
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               pr_info("Enable clock for CPU#%d\n", cpu);
+               LOONGSON_CHIPCFG0 |= 1 << (12 + cpu);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static int register_loongson3_notifier(void)
+{
+       hotcpu_notifier(loongson3_cpu_callback, 0);
+       return 0;
+}
+early_initcall(register_loongson3_notifier);
+
+#endif
+
+struct plat_smp_ops loongson3_smp_ops = {
+       .send_ipi_single = loongson3_send_ipi_single,
+       .send_ipi_mask = loongson3_send_ipi_mask,
+       .init_secondary = loongson3_init_secondary,
+       .smp_finish = loongson3_smp_finish,
+       .cpus_done = loongson3_cpus_done,
+       .boot_secondary = loongson3_boot_secondary,
+       .smp_setup = loongson3_smp_setup,
+       .prepare_cpus = loongson3_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+       .cpu_disable = loongson3_cpu_disable,
+       .cpu_die = loongson3_cpu_die,
+#endif
+};
diff --git a/arch/mips/loongson/loongson-3/smp.h b/arch/mips/loongson/loongson-3/smp.h
new file mode 100644 (file)
index 0000000..3453e8c
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef __LOONGSON_SMP_H_
+#define __LOONGSON_SMP_H_
+
+/* for Loongson-3A smp support */
+
+/* 4 groups(nodes) in maximum in numa case */
+#define  SMP_CORE_GROUP0_BASE    0x900000003ff01000
+#define  SMP_CORE_GROUP1_BASE    0x900010003ff01000
+#define  SMP_CORE_GROUP2_BASE    0x900020003ff01000
+#define  SMP_CORE_GROUP3_BASE    0x900030003ff01000
+
+/* 4 cores in each group(node) */
+#define  SMP_CORE0_OFFSET  0x000
+#define  SMP_CORE1_OFFSET  0x100
+#define  SMP_CORE2_OFFSET  0x200
+#define  SMP_CORE3_OFFSET  0x300
+
+/* ipi registers offsets */
+#define  STATUS0  0x00
+#define  EN0      0x04
+#define  SET0     0x08
+#define  CLEAR0   0x0c
+#define  STATUS1  0x10
+#define  MASK1    0x14
+#define  SET1     0x18
+#define  CLEAR1   0x1c
+#define  BUF      0x20
+
+#endif
index 0b4e2e38294bf174132fcb4036cc958a370cfbec..7b3c9acae6895120e751dc4efe19b7ee8a6572ea 100644 (file)
@@ -876,20 +876,43 @@ static inline int cop1_64bit(struct pt_regs *xcp)
 #endif
 }
 
-#define SIFROMREG(si, x) ((si) = cop1_64bit(xcp) || !(x & 1) ? \
-                       (int)ctx->fpr[x] : (int)(ctx->fpr[x & ~1] >> 32))
-
-#define SITOREG(si, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = \
-                       cop1_64bit(xcp) || !(x & 1) ? \
-                       ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \
-                       ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32)
-
-#define SIFROMHREG(si, x)      ((si) = (int)(ctx->fpr[x] >> 32))
-#define SITOHREG(si, x)                (ctx->fpr[x] = \
-                               ctx->fpr[x] << 32 >> 32 | (u64)(si) << 32)
-
-#define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)])
-#define DITOREG(di, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di))
+#define SIFROMREG(si, x) do {                                          \
+       if (cop1_64bit(xcp))                                            \
+               (si) = get_fpr32(&ctx->fpr[x], 0);                      \
+       else                                                            \
+               (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1);         \
+} while (0)
+
+#define SITOREG(si, x) do {                                            \
+       if (cop1_64bit(xcp)) {                                          \
+               unsigned i;                                             \
+               set_fpr32(&ctx->fpr[x], 0, si);                         \
+               for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++)     \
+                       set_fpr32(&ctx->fpr[x], i, 0);                  \
+       } else {                                                        \
+               set_fpr32(&ctx->fpr[(x) & ~1], (x) & 1, si);            \
+       }                                                               \
+} while (0)
+
+#define SIFROMHREG(si, x)      ((si) = get_fpr32(&ctx->fpr[x], 1))
+
+#define SITOHREG(si, x) do {                                           \
+       unsigned i;                                                     \
+       set_fpr32(&ctx->fpr[x], 1, si);                                 \
+       for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++)             \
+               set_fpr32(&ctx->fpr[x], i, 0);                          \
+} while (0)
+
+#define DIFROMREG(di, x) \
+       ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0))
+
+#define DITOREG(di, x) do {                                            \
+       unsigned fpr, i;                                                \
+       fpr = (x) & ~(cop1_64bit(xcp) == 0);                            \
+       set_fpr64(&ctx->fpr[fpr], 0, di);                               \
+       for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++)             \
+               set_fpr64(&ctx->fpr[fpr], i, 0);                        \
+} while (0)
 
 #define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
 #define SPTOREG(sp, x) SITOREG((sp).bits, x)
@@ -1960,15 +1983,18 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 
 #if defined(__mips64)
        case l_fmt:{
+               u64 bits;
+               DIFROMREG(bits, MIPSInst_FS(ir));
+
                switch (MIPSInst_FUNC(ir)) {
                case fcvts_op:
                        /* convert long to single precision real */
-                       rv.s = ieee754sp_flong(ctx->fpr[MIPSInst_FS(ir)]);
+                       rv.s = ieee754sp_flong(bits);
                        rfmt = s_fmt;
                        goto copcsr;
                case fcvtd_op:
                        /* convert long to double precision real */
-                       rv.d = ieee754dp_flong(ctx->fpr[MIPSInst_FS(ir)]);
+                       rv.d = ieee754dp_flong(bits);
                        rfmt = d_fmt;
                        goto copcsr;
                default:
index 3aeae07ed5b8db9eef5f697b93ebcef3e6856cf3..eb58a85b3157584c267d583554670afa2b12cf01 100644 (file)
@@ -40,78 +40,6 @@ void fpu_emulator_init_fpu(void)
        }
 
        current->thread.fpu.fcr31 = 0;
-       for (i = 0; i < 32; i++) {
-               current->thread.fpu.fpr[i] = SIGNALLING_NAN;
-       }
-}
-
-
-/*
- * Emulator context save/restore to/from a signal context
- * presumed to be on the user stack, and therefore accessed
- * with appropriate macros from uaccess.h
- */
-
-int fpu_emulator_save_context(struct sigcontext __user *sc)
-{
-       int i;
-       int err = 0;
-
-       for (i = 0; i < 32; i++) {
-               err |=
-                   __put_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
-       }
-       err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-       return err;
-}
-
-int fpu_emulator_restore_context(struct sigcontext __user *sc)
-{
-       int i;
-       int err = 0;
-
-       for (i = 0; i < 32; i++) {
-               err |=
-                   __get_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
-       }
-       err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-       return err;
-}
-
-#ifdef CONFIG_64BIT
-/*
- * This is the o32 version
- */
-
-int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
-{
-       int i;
-       int err = 0;
-       int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-
-       for (i = 0; i < 32; i += inc) {
-               err |=
-                   __put_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
-       }
-       err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-       return err;
-}
-
-int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
-{
-       int i;
-       int err = 0;
-       int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-
-       for (i = 0; i < 32; i += inc) {
-               err |=
-                   __get_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
-       }
-       err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-       return err;
+       for (i = 0; i < 32; i++)
+               set_fpr64(&current->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
 }
-#endif
index c14259edd53f40539ce41cf73025ddce6cbcb277..1c74a6ad072a984be8005d7047ed7eb815ddb115 100644 (file)
@@ -57,7 +57,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
        preempt_enable();
 }
 
-#if defined(CONFIG_MIPS_CMP)
+#if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
 #define cpu_has_safe_index_cacheops 0
 #else
 #define cpu_has_safe_index_cacheops 1
@@ -123,6 +123,28 @@ static void r4k_blast_dcache_page_setup(void)
                r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
 }
 
+#ifndef CONFIG_EVA
+#define r4k_blast_dcache_user_page  r4k_blast_dcache_page
+#else
+
+static void (*r4k_blast_dcache_user_page)(unsigned long addr);
+
+static void r4k_blast_dcache_user_page_setup(void)
+{
+       unsigned long  dc_lsize = cpu_dcache_line_size();
+
+       if (dc_lsize == 0)
+               r4k_blast_dcache_user_page = (void *)cache_noop;
+       else if (dc_lsize == 16)
+               r4k_blast_dcache_user_page = blast_dcache16_user_page;
+       else if (dc_lsize == 32)
+               r4k_blast_dcache_user_page = blast_dcache32_user_page;
+       else if (dc_lsize == 64)
+               r4k_blast_dcache_user_page = blast_dcache64_user_page;
+}
+
+#endif
+
 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
 
 static void r4k_blast_dcache_page_indexed_setup(void)
@@ -245,6 +267,27 @@ static void r4k_blast_icache_page_setup(void)
                r4k_blast_icache_page = blast_icache64_page;
 }
 
+#ifndef CONFIG_EVA
+#define r4k_blast_icache_user_page  r4k_blast_icache_page
+#else
+
+static void (*r4k_blast_icache_user_page)(unsigned long addr);
+
+static void __cpuinit r4k_blast_icache_user_page_setup(void)
+{
+       unsigned long ic_lsize = cpu_icache_line_size();
+
+       if (ic_lsize == 0)
+               r4k_blast_icache_user_page = (void *)cache_noop;
+       else if (ic_lsize == 16)
+               r4k_blast_icache_user_page = blast_icache16_user_page;
+       else if (ic_lsize == 32)
+               r4k_blast_icache_user_page = blast_icache32_user_page;
+       else if (ic_lsize == 64)
+               r4k_blast_icache_user_page = blast_icache64_user_page;
+}
+
+#endif
 
 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
 
@@ -355,6 +398,7 @@ static inline void local_r4k___flush_cache_all(void * args)
 {
        switch (current_cpu_type()) {
        case CPU_LOONGSON2:
+       case CPU_LOONGSON3:
        case CPU_R4000SC:
        case CPU_R4000MC:
        case CPU_R4400SC:
@@ -519,7 +563,8 @@ static inline void local_r4k_flush_cache_page(void *args)
        }
 
        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
-               r4k_blast_dcache_page(addr);
+               vaddr ? r4k_blast_dcache_page(addr) :
+                       r4k_blast_dcache_user_page(addr);
                if (exec && !cpu_icache_snoops_remote_store)
                        r4k_blast_scache_page(addr);
        }
@@ -530,7 +575,8 @@ static inline void local_r4k_flush_cache_page(void *args)
                        if (cpu_context(cpu, mm) != 0)
                                drop_mmu_context(mm, cpu);
                } else
-                       r4k_blast_icache_page(addr);
+                       vaddr ? r4k_blast_icache_page(addr) :
+                               r4k_blast_icache_user_page(addr);
        }
 
        if (vaddr) {
@@ -595,6 +641,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
                        break;
                }
        }
+#ifdef CONFIG_EVA
+       /*
+        * Due to all possible segment mappings, there might cache aliases
+        * caused by the bootloader being in non-EVA mode, and the CPU switching
+        * to EVA during early kernel init. It's best to flush the scache
+        * to avoid having secondary cores fetching stale data and lead to
+        * kernel crashes.
+        */
+       bc_wback_inv(start, (end - start));
+       __sync();
+#endif
 }
 
 static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -617,7 +674,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
        instruction_hazard();
 }
 
-#ifdef CONFIG_DMA_NONCOHERENT
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
 
 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 {
@@ -688,7 +745,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
        bc_inv(addr, size);
        __sync();
 }
-#endif /* CONFIG_DMA_NONCOHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
 
 /*
  * While we're protected against bad userland addresses we don't care
@@ -1010,6 +1067,33 @@ static void probe_pcache(void)
                c->dcache.waybit = 0;
                break;
 
+       case CPU_LOONGSON3:
+               config1 = read_c0_config1();
+               lsize = (config1 >> 19) & 7;
+               if (lsize)
+                       c->icache.linesz = 2 << lsize;
+               else
+                       c->icache.linesz = 0;
+               c->icache.sets = 64 << ((config1 >> 22) & 7);
+               c->icache.ways = 1 + ((config1 >> 16) & 7);
+               icache_size = c->icache.sets *
+                                         c->icache.ways *
+                                         c->icache.linesz;
+               c->icache.waybit = 0;
+
+               lsize = (config1 >> 10) & 7;
+               if (lsize)
+                       c->dcache.linesz = 2 << lsize;
+               else
+                       c->dcache.linesz = 0;
+               c->dcache.sets = 64 << ((config1 >> 13) & 7);
+               c->dcache.ways = 1 + ((config1 >> 7) & 7);
+               dcache_size = c->dcache.sets *
+                                         c->dcache.ways *
+                                         c->dcache.linesz;
+               c->dcache.waybit = 0;
+               break;
+
        default:
                if (!(config & MIPS_CONF_M))
                        panic("Don't know how to probe P-caches on this cpu.");
@@ -1113,13 +1197,21 @@ static void probe_pcache(void)
        case CPU_34K:
        case CPU_74K:
        case CPU_1004K:
+       case CPU_1074K:
        case CPU_INTERAPTIV:
+       case CPU_P5600:
        case CPU_PROAPTIV:
-               if (current_cpu_type() == CPU_74K)
+       case CPU_M5150:
+               if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K))
                        alias_74k_erratum(c);
-               if ((read_c0_config7() & (1 << 16))) {
-                       /* effectively physically indexed dcache,
-                          thus no virtual aliases. */
+               if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
+                   (c->icache.waysize > PAGE_SIZE))
+                       c->icache.flags |= MIPS_CACHE_ALIASES;
+               if (read_c0_config7() & MIPS_CONF7_AR) {
+                       /*
+                        * Effectively physically indexed dcache,
+                        * thus no virtual aliases.
+                       */
                        c->dcache.flags |= MIPS_CACHE_PINDEX;
                        break;
                }
@@ -1239,6 +1331,33 @@ static void __init loongson2_sc_init(void)
        c->options |= MIPS_CPU_INCLUSIVE_CACHES;
 }
 
+static void __init loongson3_sc_init(void)
+{
+       struct cpuinfo_mips *c = &current_cpu_data;
+       unsigned int config2, lsize;
+
+       config2 = read_c0_config2();
+       lsize = (config2 >> 4) & 15;
+       if (lsize)
+               c->scache.linesz = 2 << lsize;
+       else
+               c->scache.linesz = 0;
+       c->scache.sets = 64 << ((config2 >> 8) & 15);
+       c->scache.ways = 1 + (config2 & 15);
+
+       scache_size = c->scache.sets *
+                                 c->scache.ways *
+                                 c->scache.linesz;
+       /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
+       scache_size *= 4;
+       c->scache.waybit = 0;
+       pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+              scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+       if (scache_size)
+               c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+       return;
+}
+
 extern int r5k_sc_init(void);
 extern int rm7k_sc_init(void);
 extern int mips_sc_init(void);
@@ -1291,6 +1410,10 @@ static void setup_scache(void)
                loongson2_sc_init();
                return;
 
+       case CPU_LOONGSON3:
+               loongson3_sc_init();
+               return;
+
        case CPU_XLP:
                /* don't need to worry about L2, fully coherent */
                return;
@@ -1461,6 +1584,10 @@ void r4k_cache_init(void)
        r4k_blast_scache_page_setup();
        r4k_blast_scache_page_indexed_setup();
        r4k_blast_scache_setup();
+#ifdef CONFIG_EVA
+       r4k_blast_dcache_user_page_setup();
+       r4k_blast_icache_user_page_setup();
+#endif
 
        /*
         * Some MIPS32 and MIPS64 processors have physically indexed caches.
@@ -1492,7 +1619,7 @@ void r4k_cache_init(void)
        flush_icache_range      = r4k_flush_icache_range;
        local_flush_icache_range        = local_r4k_flush_icache_range;
 
-#if defined(CONFIG_DMA_NONCOHERENT)
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
        if (coherentio) {
                _dma_cache_wback_inv    = (void *)cache_noop;
                _dma_cache_wback        = (void *)cache_noop;
index fde7e56d13fe3c0d6079d8ad76283f0e13a75a8e..e422b38d3113b0c38bd7d6c495ccc99849e0da71 100644 (file)
@@ -49,7 +49,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 EXPORT_SYMBOL(flush_data_cache_page);
 EXPORT_SYMBOL(flush_icache_all);
 
-#ifdef CONFIG_DMA_NONCOHERENT
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
 
 /* DMA cache operations. */
 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
@@ -58,7 +58,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 
 EXPORT_SYMBOL(_dma_cache_wback_inv);
 
-#endif /* CONFIG_DMA_NONCOHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
 
 /*
  * We could optimize the case where the cache argument is not BCACHE but
index 6b59617760c1933223337581dd0359feef084b37..4fc74c78265a9af156f260a6efd83e908aa8b000 100644 (file)
@@ -422,10 +422,20 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
+void (*free_init_pages_eva)(void *begin, void *end) = NULL;
+
 void __init_refok free_initmem(void)
 {
        prom_free_prom_memory();
-       free_initmem_default(POISON_FREE_INITMEM);
+       /*
+        * Let the platform define a specific function to free the
+        * init section since EVA may have used any possible mapping
+        * between virtual and physical addresses.
+        */
+       if (free_init_pages_eva)
+               free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
+       else
+               free_initmem_default(POISON_FREE_INITMEM);
 }
 
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
index 7a56aee5fce70ebc5c7fb627a262f1e84b2176f2..99eb8fabab606afe28781620301f430e4b776fd4 100644 (file)
@@ -76,8 +76,10 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
        case CPU_34K:
        case CPU_74K:
        case CPU_1004K:
+       case CPU_1074K:
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
+       case CPU_P5600:
        case CPU_BMIPS5000:
                if (config2 & (1 << 12))
                        return 0;
index ae4ca24507072f8cd3a94b02a935442091aae76b..eeaf50f5df2b7fcb909252a655b435a780adfa23 100644 (file)
@@ -48,13 +48,14 @@ extern void build_tlb_refill_handler(void);
 #endif /* CONFIG_MIPS_MT_SMTC */
 
 /*
- * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
- * unfortrunately, itlb is not totally transparent to software.
+ * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
+ * unfortunately, itlb is not totally transparent to software.
  */
 static inline void flush_itlb(void)
 {
        switch (current_cpu_type()) {
        case CPU_LOONGSON2:
+       case CPU_LOONGSON3:
                write_c0_diag(4);
                break;
        default:
index b234b1b5ccada646b9286d8cc31c531d3b9f2a8a..ee88367ab3addcda0e4aba412f524493dddd079a 100644 (file)
@@ -509,7 +509,10 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
                switch (current_cpu_type()) {
                case CPU_M14KC:
                case CPU_74K:
+               case CPU_1074K:
                case CPU_PROAPTIV:
+               case CPU_P5600:
+               case CPU_M5150:
                        break;
 
                default:
@@ -579,6 +582,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case CPU_BMIPS4380:
        case CPU_BMIPS5000:
        case CPU_LOONGSON2:
+       case CPU_LOONGSON3:
        case CPU_R5500:
                if (m4kc_tlbp_war())
                        uasm_i_nop(p);
@@ -621,7 +625,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
 
        default:
                panic("No TLB refill handler yet (CPU type: %d)",
-                     current_cpu_data.cputype);
+                     current_cpu_type());
                break;
        }
 }
index fcebfced26d0c929aed514ca9b310677b3ba3247..4f9e44d358b7415b98a9bc8588163e88cb8015da 100644 (file)
@@ -20,7 +20,8 @@
 #include <asm/smp-ops.h>
 #include <asm/traps.h>
 #include <asm/fw/fw.h>
-#include <asm/gcmpregs.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/malta.h>
 
@@ -110,6 +111,11 @@ static void __init mips_ejtag_setup(void)
        flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
+phys_t mips_cpc_default_phys_base(void)
+{
+       return CPC_BASE_ADDR;
+}
+
 extern struct plat_smp_ops msmtc_smp_ops;
 
 void __init prom_init(void)
@@ -238,10 +244,23 @@ mips_pci_controller:
                          MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
                          MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
 #endif
+#ifndef CONFIG_EVA
                /* Fix up target memory mapping.  */
                MSC_READ(MSC01_PCI_BAR0, mask);
                MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK);
+#else
+               /*
+                * Setup the Malta max (2GB) memory for PCI DMA in host bridge
+                * in transparent addressing mode, starting from 0x80000000.
+                */
+               mask = PHYS_OFFSET | (1<<3);
+               MSC_WRITE(MSC01_PCI_BAR0, mask);
 
+               mask = PHYS_OFFSET;
+               MSC_WRITE(MSC01_PCI_HEAD4, mask);
+               MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
+               MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
+#endif
                /* Don't handle target retries indefinitely.  */
                if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
                    MSC01_PCI_CFG_MAXRTRY_MSK)
@@ -276,10 +295,13 @@ mips_pci_controller:
        console_config();
 #endif
        /* Early detection of CMP support */
-       if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ))
-               if (!register_cmp_smp_ops())
-                       return;
+       mips_cm_probe();
+       mips_cpc_probe();
 
+       if (!register_cps_smp_ops())
+               return;
+       if (!register_cmp_smp_ops())
+               return;
        if (!register_vsmp_smp_ops())
                return;
 
index 2242181a62841e2c86cd87ab3859bee35ca91099..b71ee809191a2f3beaf8734b727decb634a39bdf 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/i8259.h>
 #include <asm/irq_cpu.h>
 #include <asm/irq_regs.h>
+#include <asm/mips-cm.h>
 #include <asm/mips-boards/malta.h>
 #include <asm/mips-boards/maltaint.h>
 #include <asm/gt64120.h>
 #include <asm/mips-boards/msc01_pci.h>
 #include <asm/msc01_ic.h>
 #include <asm/gic.h>
-#include <asm/gcmpregs.h>
 #include <asm/setup.h>
 #include <asm/rtlx.h>
 
-int gcmp_present = -1;
 static unsigned long _msc01_biu_base;
-static unsigned long _gcmp_base;
 static unsigned int ipi_map[NR_CPUS];
 
 static DEFINE_RAW_SPINLOCK(mips_irq_lock);
@@ -288,10 +286,6 @@ asmlinkage void plat_irq_dispatch(void)
 
 #ifdef CONFIG_MIPS_MT_SMP
 
-
-#define GIC_MIPS_CPU_IPI_RESCHED_IRQ   3
-#define GIC_MIPS_CPU_IPI_CALL_IRQ      4
-
 #define MIPS_CPU_IPI_RESCHED_IRQ 0     /* SW int 0 for resched */
 #define C_RESCHED C_SW0
 #define MIPS_CPU_IPI_CALL_IRQ 1                /* SW int 1 for resched */
@@ -308,6 +302,13 @@ static void ipi_call_dispatch(void)
        do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
 }
 
+#endif /* CONFIG_MIPS_MT_SMP */
+
+#ifdef CONFIG_MIPS_GIC_IPI
+
+#define GIC_MIPS_CPU_IPI_RESCHED_IRQ   3
+#define GIC_MIPS_CPU_IPI_CALL_IRQ      4
+
 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 {
 #ifdef CONFIG_MIPS_VPE_APSP_API_CMP
@@ -338,7 +339,7 @@ static struct irqaction irq_call = {
        .flags          = IRQF_PERCPU,
        .name           = "IPI_call"
 };
-#endif /* CONFIG_MIPS_MT_SMP */
+#endif /* CONFIG_MIPS_GIC_IPI */
 
 static int gic_resched_int_base;
 static int gic_call_int_base;
@@ -418,49 +419,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
 };
 #undef X
 
-/*
- * GCMP needs to be detected before any SMP initialisation
- */
-int __init gcmp_probe(unsigned long addr, unsigned long size)
-{
-       if ((mips_revision_sconid != MIPS_REVISION_SCON_ROCIT)  &&
-           (mips_revision_sconid != MIPS_REVISION_SCON_GT64120)) {
-               gcmp_present = 0;
-               pr_debug("GCMP NOT present\n");
-               return gcmp_present;
-       }
-
-       if (gcmp_present >= 0)
-               return gcmp_present;
-
-       _gcmp_base = (unsigned long) ioremap_nocache(GCMP_BASE_ADDR,
-               GCMP_ADDRSPACE_SZ);
-       _msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE,
-               MSC01_BIU_ADDRSPACE_SZ);
-       gcmp_present = ((GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) ==
-               GCMP_BASE_ADDR);
-
-       if (gcmp_present)
-               pr_debug("GCMP present\n");
-       return gcmp_present;
-}
-
-/* Return the number of IOCU's present */
-int __init gcmp_niocu(void)
-{
-       return gcmp_present ? ((GCMPGCB(GC) & GCMP_GCB_GC_NUMIOCU_MSK) >>
-               GCMP_GCB_GC_NUMIOCU_SHF) : 0;
-}
-
-/* Set GCMP region attributes */
-void __init gcmp_setregion(int region, unsigned long base,
-                          unsigned long mask, int type)
-{
-       GCMPGCBn(CMxBASE, region) = base;
-       GCMPGCBn(CMxMASK, region) = mask | type;
-}
-
-#if defined(CONFIG_MIPS_MT_SMP)
+#ifdef CONFIG_MIPS_GIC_IPI
 static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin)
 {
        int intr = baseintr + cpu;
@@ -496,8 +455,8 @@ void __init arch_init_irq(void)
        if (!cpu_has_veic)
                mips_cpu_irq_init();
 
-       if (gcmp_present)  {
-               GCMPGCB(GICBA) = GIC_BASE_ADDR | GCMP_GCB_GICBA_EN_MSK;
+       if (mips_cm_present()) {
+               write_gcr_gic_base(GIC_BASE_ADDR | CM_GCR_GIC_BASE_GICEN_MSK);
                gic_present = 1;
        } else {
                if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) {
@@ -576,7 +535,7 @@ void __init arch_init_irq(void)
        if (gic_present) {
                /* FIXME */
                int i;
-#if defined(CONFIG_MIPS_MT_SMP)
+#if defined(CONFIG_MIPS_GIC_IPI)
                gic_call_int_base = GIC_NUM_INTRS -
                        (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids;
                gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
@@ -584,14 +543,14 @@ void __init arch_init_irq(void)
 #endif
                gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
                                ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
-               if (!gcmp_present) {
+               if (!mips_cm_present()) {
                        /* Enable the GIC */
                        i = REG(_msc01_biu_base, MSC01_SC_CFG);
                        REG(_msc01_biu_base, MSC01_SC_CFG) =
                                (i | (0x1 << MSC01_SC_CFG_GICENA_SHF));
                        pr_debug("GIC Enabled\n");
                }
-#if defined(CONFIG_MIPS_MT_SMP)
+#if defined(CONFIG_MIPS_GIC_IPI)
                /* set up ipi interrupts */
                if (cpu_has_vint) {
                        set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch);
@@ -708,16 +667,16 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup)
        /* This duplicates the handling in do_be which seems wrong */
        int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
 
-       if (gcmp_present) {
-               unsigned long cm_error = GCMPGCB(GCMEC);
-               unsigned long cm_addr = GCMPGCB(GCMEA);
-               unsigned long cm_other = GCMPGCB(GCMEO);
+       if (mips_cm_present()) {
+               unsigned long cm_error = read_gcr_error_cause();
+               unsigned long cm_addr = read_gcr_error_addr();
+               unsigned long cm_other = read_gcr_error_mult();
                unsigned long cause, ocause;
                char buf[256];
 
-               cause = (cm_error & GCMP_GCB_GMEC_ERROR_TYPE_MSK);
+               cause = cm_error & CM_GCR_ERROR_CAUSE_ERRTYPE_MSK;
                if (cause != 0) {
-                       cause >>= GCMP_GCB_GMEC_ERROR_TYPE_SHF;
+                       cause >>= CM_GCR_ERROR_CAUSE_ERRTYPE_SHF;
                        if (cause < 16) {
                                unsigned long cca_bits = (cm_error >> 15) & 7;
                                unsigned long tr_bits = (cm_error >> 12) & 7;
@@ -748,8 +707,8 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup)
                                         mcmd[cmd_bits], sport_bits);
                        }
 
-                       ocause = (cm_other & GCMP_GCB_GMEO_ERROR_2ND_MSK) >>
-                                GCMP_GCB_GMEO_ERROR_2ND_SHF;
+                       ocause = (cm_other & CM_GCR_ERROR_MULT_ERR2ND_MSK) >>
+                                CM_GCR_ERROR_MULT_ERR2ND_SHF;
 
                        pr_err("CM_ERROR=%08lx %s <%s>\n", cm_error,
                               causes[cause], buf);
@@ -757,7 +716,7 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup)
                        pr_err("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
 
                        /* reprime cause register */
-                       GCMPGCB(GCMEC) = 0;
+                       write_gcr_error_cause(0);
                }
        }
 
index 1f73d63e92a765d3ab1d829244a19e57dab8bd8e..6d0f4ab3632d01fa4b8474f3e02e388d0a1f0f89 100644 (file)
@@ -24,22 +24,30 @@ static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
 /* determined physical memory size, not overridden by command line args         */
 unsigned long physical_memsize = 0L;
 
-fw_memblock_t * __init fw_getmdesc(void)
+fw_memblock_t * __init fw_getmdesc(int eva)
 {
-       char *memsize_str, *ptr;
-       unsigned int memsize;
+       char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr;
+       unsigned long memsize, ememsize __maybe_unused = 0;
        static char cmdline[COMMAND_LINE_SIZE] __initdata;
-       long val;
        int tmp;
 
        /* otherwise look in the environment */
+
        memsize_str = fw_getenv("memsize");
-       if (!memsize_str) {
+       if (memsize_str)
+               tmp = kstrtol(memsize_str, 0, &memsize);
+       if (eva) {
+       /* Look for ememsize for EVA */
+               ememsize_str = fw_getenv("ememsize");
+               if (ememsize_str)
+                       tmp = kstrtol(ememsize_str, 0, &ememsize);
+       }
+       if (!memsize && !ememsize) {
                pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
                physical_memsize = 0x02000000;
        } else {
-               tmp = kstrtol(memsize_str, 0, &val);
-               physical_memsize = (unsigned long)val;
+               /* If ememsize is set, then set physical_memsize to that */
+               physical_memsize = ememsize ? : memsize;
        }
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -54,20 +62,30 @@ fw_memblock_t * __init fw_getmdesc(void)
        ptr = strstr(cmdline, "memsize=");
        if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
                ptr = strstr(ptr, " memsize=");
+       /* And now look for ememsize */
+       if (eva) {
+               ptr = strstr(cmdline, "ememsize=");
+               if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
+                       ptr = strstr(ptr, " ememsize=");
+       }
 
        if (ptr)
-               memsize = memparse(ptr + 8, &ptr);
+               memsize = memparse(ptr + 8 + (eva ? 1 : 0), &ptr);
        else
                memsize = physical_memsize;
 
+       /* Last 64K for HIGHMEM arithmetics */
+       if (memsize > 0x7fff0000)
+               memsize = 0x7fff0000;
+
        memset(mdesc, 0, sizeof(mdesc));
 
        mdesc[0].type = fw_dontuse;
-       mdesc[0].base = 0x00000000;
+       mdesc[0].base = PHYS_OFFSET;
        mdesc[0].size = 0x00001000;
 
        mdesc[1].type = fw_code;
-       mdesc[1].base = 0x00001000;
+       mdesc[1].base = mdesc[0].base + 0x00001000UL;
        mdesc[1].size = 0x000ef000;
 
        /*
@@ -78,21 +96,27 @@ fw_memblock_t * __init fw_getmdesc(void)
         * devices.
         */
        mdesc[2].type = fw_dontuse;
-       mdesc[2].base = 0x000f0000;
+       mdesc[2].base = mdesc[0].base + 0x000f0000UL;
        mdesc[2].size = 0x00010000;
 
        mdesc[3].type = fw_dontuse;
-       mdesc[3].base = 0x00100000;
+       mdesc[3].base = mdesc[0].base + 0x00100000UL;
        mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
-               mdesc[3].base;
+               0x00100000UL;
 
        mdesc[4].type = fw_free;
-       mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
-       mdesc[4].size = memsize - mdesc[4].base;
+       mdesc[4].base = mdesc[0].base + CPHYSADDR(PFN_ALIGN(&_end));
+       mdesc[4].size = memsize - CPHYSADDR(mdesc[4].base);
 
        return &mdesc[0];
 }
 
+static void free_init_pages_eva_malta(void *begin, void *end)
+{
+       free_init_pages("unused kernel", __pa_symbol((unsigned long *)begin),
+                       __pa_symbol((unsigned long *)end));
+}
+
 static int __init fw_memtype_classify(unsigned int type)
 {
        switch (type) {
@@ -109,7 +133,9 @@ void __init fw_meminit(void)
 {
        fw_memblock_t *p;
 
-       p = fw_getmdesc();
+       p = fw_getmdesc(config_enabled(CONFIG_EVA));
+       free_init_pages_eva = (config_enabled(CONFIG_EVA) ?
+                              free_init_pages_eva_malta : NULL);
 
        while (p->size) {
                long type;
index c72a069367819d1ca8c91532862dbfbd890de1b1..bf621516affff9583d4c3e4ed0bb3a28f936749a 100644 (file)
 #include <linux/time.h>
 
 #include <asm/fw/fw.h>
+#include <asm/mips-cm.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/malta.h>
 #include <asm/mips-boards/maltaint.h>
 #include <asm/dma.h>
 #include <asm/traps.h>
-#include <asm/gcmpregs.h>
 #ifdef CONFIG_VT
 #include <linux/console.h>
 #endif
@@ -127,7 +127,7 @@ static int __init plat_enable_iocoherency(void)
                                 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
                        pr_info("Enabled Bonito IOBC coherency\n");
                }
-       } else if (gcmp_niocu() != 0) {
+       } else if (mips_cm_numiocu() != 0) {
                /* Nothing special needs to be done to enable coherency */
                pr_info("CMP IOCU detected\n");
                if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
@@ -165,7 +165,6 @@ static void __init plat_setup_iocoherency(void)
 #endif
 }
 
-#ifdef CONFIG_BLK_DEV_IDE
 static void __init pci_clock_check(void)
 {
        unsigned int __iomem *jmpr_p =
@@ -175,18 +174,25 @@ static void __init pci_clock_check(void)
                33, 20, 25, 30, 12, 16, 37, 10
        };
        int pciclock = pciclocks[jmpr];
-       char *argptr = fw_getcmdline();
+       char *optptr, *argptr = fw_getcmdline();
 
-       if (pciclock != 33 && !strstr(argptr, "idebus=")) {
-               pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n",
+       /*
+        * If user passed a pci_clock= option, don't tack on another one
+        */
+       optptr = strstr(argptr, "pci_clock=");
+       if (optptr && (optptr == argptr || optptr[-1] == ' '))
+               return;
+
+       if (pciclock != 33) {
+               pr_warn("WARNING: PCI clock is %dMHz, setting pci_clock\n",
                        pciclock);
                argptr += strlen(argptr);
-               sprintf(argptr, " idebus=%d", pciclock);
+               sprintf(argptr, " pci_clock=%d", pciclock);
                if (pciclock < 20 || pciclock > 66)
-                       pr_warn("WARNING: IDE timing calculations will be incorrect\n");
+                       pr_warn("WARNING: IDE timing calculations will be "
+                               "incorrect\n");
        }
 }
-#endif
 
 #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
 static void __init screen_info_setup(void)
@@ -247,6 +253,10 @@ void __init plat_mem_setup(void)
 {
        unsigned int i;
 
+       if (config_enabled(CONFIG_EVA))
+               /* EVA has already been configured in mach-malta/kernel-init.h */
+               pr_info("Enhanced Virtual Addressing (EVA) activated\n");
+
        mips_pcibios_init();
 
        /* Request I/O space for devices used on the Malta board. */
@@ -268,9 +278,7 @@ void __init plat_mem_setup(void)
 
        plat_setup_iocoherency();
 
-#ifdef CONFIG_BLK_DEV_IDE
        pci_clock_check();
-#endif
 
 #ifdef CONFIG_BLK_DEV_FD
        fd_activate();
index ffa35f509789d67e60f517f91d761367266121cb..f9c890d726779447b8f6f757f816d0bd25da0dab 100644 (file)
@@ -50,5 +50,4 @@ static int __init sead3_mtd_init(void)
 
        return 0;
 }
-
-module_init(sead3_mtd_init)
+device_initcall(sead3_mtd_init);
index 2a86e38872a725a89e31f2a5ad1907eb84a3b854..e7473244947831bf7c46a54db40d33e886642929 100644 (file)
@@ -86,8 +86,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        case CPU_34K:
        case CPU_1004K:
        case CPU_74K:
+       case CPU_1074K:
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
+       case CPU_P5600:
+       case CPU_M5150:
        case CPU_LOONGSON1:
        case CPU_SB1:
        case CPU_SB1A:
index 4d94d75ec6f98f47351afd695202be986355ba24..42821ae2d77e586155eb2554232af87481423731 100644 (file)
@@ -372,6 +372,7 @@ static int __init mipsxx_init(void)
                op_model_mipsxx_ops.cpu_type = "mips/34K";
                break;
 
+       case CPU_1074K:
        case CPU_74K:
                op_model_mipsxx_ops.cpu_type = "mips/74K";
                break;
@@ -384,6 +385,14 @@ static int __init mipsxx_init(void)
                op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
                break;
 
+       case CPU_P5600:
+               op_model_mipsxx_ops.cpu_type = "mips/P5600";
+               break;
+
+       case CPU_M5150:
+               op_model_mipsxx_ops.cpu_type = "mips/M5150";
+               break;
+
        case CPU_5KC:
                op_model_mipsxx_ops.cpu_type = "mips/5K";
                break;
index 137f2a6feb257a5e1164e40000c672adc98f695b..d61138a177cce7206550b4b4266ac56bee9b55b1 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_LASAT)           += pci-lasat.o
 obj-$(CONFIG_MIPS_COBALT)      += fixup-cobalt.o
 obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
 obj-$(CONFIG_LEMOTE_MACH2F)    += fixup-lemote2f.o ops-loongson2.o
+obj-$(CONFIG_LEMOTE_MACH3A)    += fixup-loongson3.o ops-loongson3.o
 obj-$(CONFIG_MIPS_MALTA)       += fixup-malta.o pci-malta.o
 obj-$(CONFIG_PMC_MSP7120_GW)   += fixup-pmcmsp.o ops-pmcmsp.o
 obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o
diff --git a/arch/mips/pci/fixup-loongson3.c b/arch/mips/pci/fixup-loongson3.c
new file mode 100644 (file)
index 0000000..d708ae4
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * fixup-loongson3.c
+ *
+ * Copyright (C) 2012 Lemote, Inc.
+ * Author: Xiang Yu, xiangy@lemote.com
+ *         Chen Huacai, chenhc@lemote.com
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ * WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ * USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <boot_param.h>
+
+static void print_fixup_info(const struct pci_dev *pdev)
+{
+       dev_info(&pdev->dev, "Device %x:%x, irq %d\n",
+                       pdev->vendor, pdev->device, pdev->irq);
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+       print_fixup_info(dev);
+       return dev->irq;
+}
+
+static void pci_fixup_radeon(struct pci_dev *pdev)
+{
+       if (pdev->resource[PCI_ROM_RESOURCE].start)
+               return;
+
+       if (!loongson_sysconf.vgabios_addr)
+               return;
+
+       pdev->resource[PCI_ROM_RESOURCE].start =
+               loongson_sysconf.vgabios_addr;
+       pdev->resource[PCI_ROM_RESOURCE].end   =
+               loongson_sysconf.vgabios_addr + 256*1024 - 1;
+       pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_COPY;
+
+       dev_info(&pdev->dev, "BAR %d: assigned %pR for Radeon ROM\n",
+                       PCI_ROM_RESOURCE, &pdev->resource[PCI_ROM_RESOURCE]);
+}
+
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                               PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_radeon);
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+       return 0;
+}
index 7a0eda782e35cf8c51d894c1b832dd9692f41c15..2f9e52a1a7504e884ef84a407943c5d510b55a4b 100644 (file)
@@ -51,6 +51,19 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
+static void malta_piix_func3_base_fixup(struct pci_dev *dev)
+{
+       /* Set a sane PM I/O base address */
+       pci_write_config_word(dev, PIIX4_FUNC3_PMBA, 0x1000);
+
+       /* Enable access to the PM I/O region */
+       pci_write_config_byte(dev, PIIX4_FUNC3_PMREGMISC,
+                             PIIX4_FUNC3_PMREGMISC_EN);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
+                       malta_piix_func3_base_fixup);
+
 static void malta_piix_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
diff --git a/arch/mips/pci/ops-loongson3.c b/arch/mips/pci/ops-loongson3.c
new file mode 100644 (file)
index 0000000..46ed541
--- /dev/null
@@ -0,0 +1,101 @@
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+
+#include <asm/mips-boards/bonito64.h>
+
+#include <loongson.h>
+
+#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_WRITE 1
+
+#define HT1LO_PCICFG_BASE      0x1a000000
+#define HT1LO_PCICFG_BASE_TP1  0x1b000000
+
+static int loongson3_pci_config_access(unsigned char access_type,
+               struct pci_bus *bus, unsigned int devfn,
+               int where, u32 *data)
+{
+       unsigned char busnum = bus->number;
+       u_int64_t addr, type;
+       void *addrp;
+       int device = PCI_SLOT(devfn);
+       int function = PCI_FUNC(devfn);
+       int reg = where & ~3;
+
+       addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
+       if (busnum == 0) {
+               if (device > 31)
+                       return PCIBIOS_DEVICE_NOT_FOUND;
+               addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE) | (addr & 0xffff));
+               type = 0;
+
+       } else {
+               addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE_TP1) | (addr));
+               type = 0x10000;
+       }
+
+       if (access_type == PCI_ACCESS_WRITE)
+               writel(*data, addrp);
+       else {
+               *data = readl(addrp);
+               if (*data == 0xffffffff) {
+                       *data = -1;
+                       return PCIBIOS_DEVICE_NOT_FOUND;
+               }
+       }
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int loongson3_pci_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+                                int where, int size, u32 *val)
+{
+       u32 data = 0;
+       int ret = loongson3_pci_config_access(PCI_ACCESS_READ,
+                       bus, devfn, where, &data);
+
+       if (ret != PCIBIOS_SUCCESSFUL)
+               return ret;
+
+       if (size == 1)
+               *val = (data >> ((where & 3) << 3)) & 0xff;
+       else if (size == 2)
+               *val = (data >> ((where & 3) << 3)) & 0xffff;
+       else
+               *val = data;
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int loongson3_pci_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+                                 int where, int size, u32 val)
+{
+       u32 data = 0;
+       int ret;
+
+       if (size == 4)
+               data = val;
+       else {
+               ret = loongson3_pci_config_access(PCI_ACCESS_READ,
+                               bus, devfn, where, &data);
+               if (ret != PCIBIOS_SUCCESSFUL)
+                       return ret;
+
+               if (size == 1)
+                       data = (data & ~(0xff << ((where & 3) << 3))) |
+                           (val << ((where & 3) << 3));
+               else if (size == 2)
+                       data = (data & ~(0xffff << ((where & 3) << 3))) |
+                           (val << ((where & 3) << 3));
+       }
+
+       ret = loongson3_pci_config_access(PCI_ACCESS_WRITE,
+                       bus, devfn, where, &data);
+
+       return ret;
+}
+
+struct pci_ops loongson_pci_ops = {
+       .read = loongson3_pci_pcibios_read,
+       .write = loongson3_pci_pcibios_write
+};
index d1faece21b6a78f4c79e20e3cfbaec448d281fad..563d1f61d6eeae50f79144f66f28ecfefea09775 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/vmalloc.h>
 
+#include <asm/dma-coherence.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/tlbmisc.h>
 
@@ -411,17 +412,15 @@ static int alchemy_pci_probe(struct platform_device *pdev)
        }
        ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io;
 
-#ifdef CONFIG_DMA_NONCOHERENT
        /* Au1500 revisions older than AD have borked coherent PCI */
        if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
-           (read_c0_prid() < 0x01030202)) {
+           (read_c0_prid() < 0x01030202) && !coherentio) {
                val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
                val |= PCI_CONFIG_NC;
                __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
                wmb();
                dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n");
        }
-#endif
 
        if (pd->board_map_irq)
                ctx->board_map_irq = pd->board_map_irq;
index f1a73890dd4f104b87f3165ce1796d1be7bda92a..cfbbc3e3e914526e8086e156d426a82c043032a2 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/init.h>
 
 #include <asm/gt64120.h>
-#include <asm/gcmpregs.h>
+#include <asm/mips-cm.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/bonito64.h>
 #include <asm/mips-boards/msc01_pci.h>
@@ -201,11 +201,11 @@ void __init mips_pcibios_init(void)
                msc_mem_resource.start = start & mask;
                msc_mem_resource.end = (start & mask) | ~mask;
                msc_controller.mem_offset = (start & mask) - (map & mask);
-#ifdef CONFIG_MIPS_CMP
-               if (gcmp_niocu())
-                       gcmp_setregion(0, start, mask,
-                               GCMP_GCB_GCMPB_CMDEFTGT_IOCU1);
-#endif
+               if (mips_cm_numiocu()) {
+                       write_gcr_reg0_base(start);
+                       write_gcr_reg0_mask(mask |
+                                           CM_GCR_REGn_MASK_CMTGT_IOCU0);
+               }
                MSC_READ(MSC01_PCI_SC2PIOBASL, start);
                MSC_READ(MSC01_PCI_SC2PIOMSKL, mask);
                MSC_READ(MSC01_PCI_SC2PIOMAPL, map);
@@ -213,11 +213,11 @@ void __init mips_pcibios_init(void)
                msc_io_resource.end = (map & mask) | ~mask;
                msc_controller.io_offset = 0;
                ioport_resource.end = ~mask;
-#ifdef CONFIG_MIPS_CMP
-               if (gcmp_niocu())
-                       gcmp_setregion(1, start, mask,
-                               GCMP_GCB_GCMPB_CMDEFTGT_IOCU1);
-#endif
+               if (mips_cm_numiocu()) {
+                       write_gcr_reg1_base(start);
+                       write_gcr_reg1_mask(mask |
+                                           CM_GCR_REGn_MASK_CMTGT_IOCU0);
+               }
                /* If ranges overlap I/O takes precedence.  */
                start = start & mask;
                end = start | ~mask;
index 396b2967ad856bb974da6ff8055d27037cd9b654..7e980767679c09fc2e5c4e7c63e87b71f7906d5e 100644 (file)
@@ -49,7 +49,7 @@ void msp7120_reset(void)
        /* Cache the reset code of this function */
        __asm__ __volatile__ (
                "       .set    push                            \n"
-               "       .set    mips3                           \n"
+               "       .set    arch=r4000                      \n"
                "       la      %0,startpoint                   \n"
                "       la      %1,endpoint                     \n"
                "       .set    pop                             \n"
index 93412d6b3af1703ccfd220aa759fbf8d553eeec7..4bd10f94f0683a3c14e44f0fb8e5f41bb92ab83e 100644 (file)
@@ -5,12 +5,16 @@
  *
  * Copyright (C) 2009 Wind River Systems,
  *   written by Ralf Baechle <ralf@linux-mips.org>
+ *
+ * Copyright (c) 2013 by Cisco Systems, Inc.
+ * All rights reserved.
  */
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/edac.h>
+#include <linux/ctype.h>
 
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/cvmx-lmcx-defs.h>
 
 #define OCTEON_MAX_MC 4
 
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+struct octeon_lmc_pvt {
+       unsigned long inject;
+       unsigned long error_type;
+       unsigned long dimm;
+       unsigned long rank;
+       unsigned long bank;
+       unsigned long row;
+       unsigned long col;
+};
+
 static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
 {
        union cvmx_lmcx_mem_cfg0 cfg0;
@@ -55,14 +71,31 @@ static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
 
 static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
 {
+       struct octeon_lmc_pvt *pvt = mci->pvt_info;
        union cvmx_lmcx_int int_reg;
        bool do_clear = false;
        char msg[64];
 
-       int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+       if (!pvt->inject)
+               int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+       else {
+               if (pvt->error_type == 1)
+                       int_reg.s.sec_err = 1;
+               if (pvt->error_type == 2)
+                       int_reg.s.ded_err = 1;
+       }
+
        if (int_reg.s.sec_err || int_reg.s.ded_err) {
                union cvmx_lmcx_fadr fadr;
-               fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+               if (likely(!pvt->inject))
+                       fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+               else {
+                       fadr.cn61xx.fdimm = pvt->dimm;
+                       fadr.cn61xx.fbunk = pvt->rank;
+                       fadr.cn61xx.fbank = pvt->bank;
+                       fadr.cn61xx.frow = pvt->row;
+                       fadr.cn61xx.fcol = pvt->col;
+               }
                snprintf(msg, sizeof(msg),
                         "DIMM %d rank %d bank %d row %d col %d",
                         fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
@@ -82,8 +115,128 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
                int_reg.s.ded_err = -1; /* Done, re-arm */
                do_clear = true;
        }
-       if (do_clear)
-               cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+
+       if (do_clear) {
+               if (likely(!pvt->inject))
+                       cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+               else
+                       pvt->inject = 0;
+       }
+}
+
+/************************ MC SYSFS parts ***********************************/
+
+/* Only a couple naming differences per template, so very similar */
+#define TEMPLATE_SHOW(reg)                                             \
+static ssize_t octeon_mc_inject_##reg##_show(struct device *dev,       \
+                              struct device_attribute *attr,           \
+                              char *data)                              \
+{                                                                      \
+       struct mem_ctl_info *mci = to_mci(dev);                         \
+       struct octeon_lmc_pvt *pvt = mci->pvt_info;                     \
+       return sprintf(data, "%016llu\n", (u64)pvt->reg);               \
+}
+
+#define TEMPLATE_STORE(reg)                                            \
+static ssize_t octeon_mc_inject_##reg##_store(struct device *dev,      \
+                              struct device_attribute *attr,           \
+                              const char *data, size_t count)          \
+{                                                                      \
+       struct mem_ctl_info *mci = to_mci(dev);                         \
+       struct octeon_lmc_pvt *pvt = mci->pvt_info;                     \
+       if (isdigit(*data)) {                                           \
+               if (!kstrtoul(data, 0, &pvt->reg))                      \
+                       return count;                                   \
+       }                                                               \
+       return 0;                                                       \
+}
+
+TEMPLATE_SHOW(inject);
+TEMPLATE_STORE(inject);
+TEMPLATE_SHOW(dimm);
+TEMPLATE_STORE(dimm);
+TEMPLATE_SHOW(bank);
+TEMPLATE_STORE(bank);
+TEMPLATE_SHOW(rank);
+TEMPLATE_STORE(rank);
+TEMPLATE_SHOW(row);
+TEMPLATE_STORE(row);
+TEMPLATE_SHOW(col);
+TEMPLATE_STORE(col);
+
+static ssize_t octeon_mc_inject_error_type_store(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *data,
+                                         size_t count)
+{
+       struct mem_ctl_info *mci = to_mci(dev);
+       struct octeon_lmc_pvt *pvt = mci->pvt_info;
+
+       if (!strncmp(data, "single", 6))
+               pvt->error_type = 1;
+       else if (!strncmp(data, "double", 6))
+               pvt->error_type = 2;
+
+       return count;
+}
+
+static ssize_t octeon_mc_inject_error_type_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *data)
+{
+       struct mem_ctl_info *mci = to_mci(dev);
+       struct octeon_lmc_pvt *pvt = mci->pvt_info;
+       if (pvt->error_type == 1)
+               return sprintf(data, "single");
+       else if (pvt->error_type == 2)
+               return sprintf(data, "double");
+
+       return 0;
+}
+
+static DEVICE_ATTR(inject, S_IRUGO | S_IWUSR,
+                  octeon_mc_inject_inject_show, octeon_mc_inject_inject_store);
+static DEVICE_ATTR(error_type, S_IRUGO | S_IWUSR,
+                  octeon_mc_inject_error_type_show, octeon_mc_inject_error_type_store);
+static DEVICE_ATTR(dimm, S_IRUGO | S_IWUSR,
+                  octeon_mc_inject_dimm_show, octeon_mc_inject_dimm_store);
+static DEVICE_ATTR(rank, S_IRUGO | S_IWUSR,
+                  octeon_mc_inject_rank_show, octeon_mc_inject_rank_store);
+static DEVICE_ATTR(bank, S_IRUGO | S_IWUSR,
+                  octeon_mc_inject_bank_show, octeon_mc_inject_bank_store);
+static DEVICE_ATTR(row, S_IRUGO | S_IWUSR,
+                  octeon_mc_inject_row_show, octeon_mc_inject_row_store);
+static DEVICE_ATTR(col, S_IRUGO | S_IWUSR,
+                  octeon_mc_inject_col_show, octeon_mc_inject_col_store);
+
+
+static int octeon_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+       int rc;
+
+       rc = device_create_file(&mci->dev, &dev_attr_inject);
+       if (rc < 0)
+               return rc;
+       rc = device_create_file(&mci->dev, &dev_attr_error_type);
+       if (rc < 0)
+               return rc;
+       rc = device_create_file(&mci->dev, &dev_attr_dimm);
+       if (rc < 0)
+               return rc;
+       rc = device_create_file(&mci->dev, &dev_attr_rank);
+       if (rc < 0)
+               return rc;
+       rc = device_create_file(&mci->dev, &dev_attr_bank);
+       if (rc < 0)
+               return rc;
+       rc = device_create_file(&mci->dev, &dev_attr_row);
+       if (rc < 0)
+               return rc;
+       rc = device_create_file(&mci->dev, &dev_attr_col);
+       if (rc < 0)
+               return rc;
+
+       return 0;
 }
 
 static int octeon_lmc_edac_probe(struct platform_device *pdev)
@@ -92,6 +245,8 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
        struct edac_mc_layer layers[1];
        int mc = pdev->id;
 
+       opstate_init();
+
        layers[0].type = EDAC_MC_LAYER_CHANNEL;
        layers[0].size = 1;
        layers[0].is_virt_csrow = false;
@@ -105,7 +260,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
                        return 0;
                }
 
-               mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+               mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
                if (!mci)
                        return -ENXIO;
 
@@ -122,6 +277,12 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
                        return -ENXIO;
                }
 
+               if (octeon_set_mc_sysfs_attributes(mci)) {
+                       dev_err(&pdev->dev, "octeon_set_mc_sysfs_attributes() failed\n");
+                       return -ENXIO;
+               }
+
+
                cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
                cfg0.s.intr_ded_ena = 0;        /* We poll */
                cfg0.s.intr_sec_ena = 0;
@@ -137,7 +298,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
                        return 0;
                }
 
-               mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+               mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
                if (!mci)
                        return -ENXIO;
 
@@ -154,6 +315,12 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
                        return -ENXIO;
                }
 
+               if (octeon_set_mc_sysfs_attributes(mci)) {
+                       dev_err(&pdev->dev, "octeon_set_mc_sysfs_attributes() failed\n");
+                       return -ENXIO;
+               }
+
+
                en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
                en.s.intr_ded_ena = 0;  /* We poll */
                en.s.intr_sec_ena = 0;
index 9902732a382d5064e6fcd99aab31618da21f9a2d..66cbcc108e62f0d032b556fd4ffe2b8ac3e2aadf 100644 (file)
@@ -81,6 +81,7 @@ static DEFINE_SPINLOCK(giu_lock);
 static unsigned long giu_flags;
 
 static void __iomem *giu_base;
+static struct gpio_chip vr41xx_gpio_chip;
 
 #define giu_read(offset)               readw(giu_base + (offset))
 #define giu_write(offset, value)       writew((value), giu_base + (offset))
@@ -135,12 +136,31 @@ static void unmask_giuint_low(struct irq_data *d)
        giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
+static unsigned int startup_giuint(struct irq_data *data)
+{
+       if (gpio_lock_as_irq(&vr41xx_gpio_chip, data->hwirq))
+               dev_err(vr41xx_gpio_chip.dev,
+                       "unable to lock HW IRQ %lu for IRQ\n",
+                       data->hwirq);
+       /* Satisfy the .enable semantics by unmasking the line */
+       unmask_giuint_low(data);
+       return 0;
+}
+
+static void shutdown_giuint(struct irq_data *data)
+{
+       mask_giuint_low(data);
+       gpio_unlock_as_irq(&vr41xx_gpio_chip, data->hwirq);
+}
+
 static struct irq_chip giuint_low_irq_chip = {
        .name           = "GIUINTL",
        .irq_ack        = ack_giuint_low,
        .irq_mask       = mask_giuint_low,
        .irq_mask_ack   = mask_ack_giuint_low,
        .irq_unmask     = unmask_giuint_low,
+       .irq_startup    = startup_giuint,
+       .irq_shutdown   = shutdown_giuint,
 };
 
 static void ack_giuint_high(struct irq_data *d)
index c4141c92bcff5734c4f228e94c5ce38a665c0d94..2ca4ee24755b180bad801d991a63302f3fd73093 100644 (file)
@@ -999,6 +999,15 @@ static int __init au1550_spi_init(void)
         * create memory device with 8 bits dev_devwidth
         * needed for proper byte ordering to spi fifo
         */
+       switch (alchemy_get_cputype()) {
+       case ALCHEMY_CPU_AU1550:
+       case ALCHEMY_CPU_AU1200:
+       case ALCHEMY_CPU_AU1300:
+               break;
+       default:
+               return -ENODEV;
+       }
+
        if (usedma) {
                ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
                if (!ddma_memid)
This page took 0.401226 seconds and 5 git commands to generate.