[PATCH] powerpc: Eliminate "exceeds stub group size" linker warning
[deliverable/linux.git] / arch / powerpc / kernel / head_64.S
index e16eb2a33173142c828cfba102d248b8e982c7e8..e720729f3e5536c6d267b68677a3c1a2ba514432 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/hvcall.h>
 #include <asm/iseries/lpar_map.h>
 #include <asm/thread_info.h>
+#include <asm/firmware.h>
 
 #ifdef CONFIG_PPC_ISERIES
 #define DO_SOFT_DISABLE
@@ -132,7 +133,7 @@ _GLOBAL(__secondary_hold)
        bne     100b
 
 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
-       LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
+       LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
        mtctr   r4
        mr      r3,r24
        bctr
@@ -191,6 +192,37 @@ exception_marker:
        ori     reg,reg,(label)@l;      /* virt addr of handler ... */
 #endif
 
+/*
+ * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode.
+ * The firmware calls the registered system_reset_fwnmi and
+ * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run
+ * a 32bit application at the time of the event.
+ * This firmware bug is present on POWER4 and JS20.
+ */
+#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label)              \
+       mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
+       std     r9,area+EX_R9(r13);     /* save r9 - r12 */             \
+       std     r10,area+EX_R10(r13);                                   \
+       std     r11,area+EX_R11(r13);                                   \
+       std     r12,area+EX_R12(r13);                                   \
+       mfspr   r9,SPRN_SPRG1;                                          \
+       std     r9,area+EX_R13(r13);                                    \
+       mfcr    r9;                                                     \
+       clrrdi  r12,r13,32;             /* get high part of &label */   \
+       mfmsr   r10;                                                    \
+       /* force 64bit mode */                                          \
+       li      r11,5;                  /* MSR_SF_LG|MSR_ISF_LG */      \
+       rldimi  r10,r11,61,0;           /* insert into top 3 bits */    \
+       /* done 64bit mode */                                           \
+       mfspr   r11,SPRN_SRR0;          /* save SRR0 */                 \
+       LOAD_HANDLER(r12,label)                                         \
+       ori     r10,r10,MSR_IR|MSR_DR|MSR_RI;                           \
+       mtspr   SPRN_SRR0,r12;                                          \
+       mfspr   r12,SPRN_SRR1;          /* and SRR1 */                  \
+       mtspr   SPRN_SRR1,r10;                                          \
+       rfid;                                                           \
+       b       .       /* prevent speculative execution */
+
 #define EXCEPTION_PROLOG_PSERIES(area, label)                          \
        mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
        std     r9,area+EX_R9(r13);     /* save r9 - r12 */             \
@@ -334,19 +366,28 @@ label##_iSeries:                                                  \
 
 #ifdef DO_SOFT_DISABLE
 #define DISABLE_INTS                           \
+BEGIN_FW_FTR_SECTION;                          \
        lbz     r10,PACAPROCENABLED(r13);       \
        li      r11,0;                          \
        std     r10,SOFTE(r1);                  \
        mfmsr   r10;                            \
        stb     r11,PACAPROCENABLED(r13);       \
        ori     r10,r10,MSR_EE;                 \
-       mtmsrd  r10,1
+       mtmsrd  r10,1;                          \
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 
 #define ENABLE_INTS                            \
+BEGIN_FW_FTR_SECTION;                          \
        lbz     r10,PACAPROCENABLED(r13);       \
        mfmsr   r11;                            \
        std     r10,SOFTE(r1);                  \
        ori     r11,r11,MSR_EE;                 \
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES);  \
+BEGIN_FW_FTR_SECTION;                          \
+       ld      r12,_MSR(r1);                   \
+       mfmsr   r11;                            \
+       rlwimi  r11,r12,0,MSR_EE;               \
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES);  \
        mtmsrd  r11,1
 
 #else  /* hard enable/disable interrupts */
@@ -446,7 +487,7 @@ BEGIN_FTR_SECTION
        rlwimi  r13,r12,16,0x20
        mfcr    r12
        cmpwi   r13,0x2c
-       beq     .do_stab_bolted_pSeries
+       beq     do_stab_bolted_pSeries
        mtcrf   0x80,r12
        mfspr   r12,SPRN_SPRG2
 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
@@ -559,7 +600,7 @@ system_call_pSeries:
        STD_EXCEPTION_PSERIES(., performance_monitor)
 
        .align  7
-_GLOBAL(do_stab_bolted_pSeries)
+do_stab_bolted_pSeries:
        mtcrf   0x80,r12
        mfspr   r12,SPRN_SPRG2
        EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
@@ -604,14 +645,14 @@ slb_miss_user_pseries:
 system_reset_fwnmi:
        HMT_MEDIUM
        mtspr   SPRN_SPRG1,r13          /* save r13 */
-       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+       EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
 
        .globl machine_check_fwnmi
       .align 7
 machine_check_fwnmi:
        HMT_MEDIUM
        mtspr   SPRN_SPRG1,r13          /* save r13 */
-       EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+       EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
 
 #ifdef CONFIG_PPC_ISERIES
 /***  ISeries-LPAR interrupt handlers ***/
@@ -1005,7 +1046,7 @@ slb_miss_fault:
        li      r5,0
        std     r4,_DAR(r1)
        std     r5,_DSISR(r1)
-       b       .handle_page_fault
+       b       handle_page_fault
 
 unrecov_user_slb:
        EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
@@ -1040,8 +1081,10 @@ _GLOBAL(slb_miss_realmode)
        ld      r3,PACA_EXSLB+EX_R3(r13)
        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
 #ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
        ld      r11,PACALPPACAPTR(r13)
        ld      r11,LPPACASRR0(r11)             /* get SRR0 value */
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif /* CONFIG_PPC_ISERIES */
 
        mtlr    r10
@@ -1056,8 +1099,10 @@ _GLOBAL(slb_miss_realmode)
 .machine       pop
 
 #ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
        mtspr   SPRN_SRR0,r11
        mtspr   SPRN_SRR1,r12
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif /* CONFIG_PPC_ISERIES */
        ld      r9,PACA_EXSLB+EX_R9(r13)
        ld      r10,PACA_EXSLB+EX_R10(r13)
@@ -1129,12 +1174,13 @@ program_check_common:
        .globl fp_unavailable_common
 fp_unavailable_common:
        EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
-       bne     .load_up_fpu            /* if from user, just load it up */
+       bne     1f                      /* if from user, just load it up */
        bl      .save_nvgprs
        addi    r3,r1,STACK_FRAME_OVERHEAD
        ENABLE_INTS
        bl      .kernel_fp_unavailable_exception
        BUG_OPCODE
+1:     b       .load_up_fpu
 
        .align  7
        .globl altivec_unavailable_common
@@ -1234,10 +1280,10 @@ _GLOBAL(do_hash_page)
        std     r4,_DSISR(r1)
 
        andis.  r0,r4,0xa450            /* weird error? */
-       bne-    .handle_page_fault      /* if not, try to insert a HPTE */
+       bne-    handle_page_fault       /* if not, try to insert a HPTE */
 BEGIN_FTR_SECTION
        andis.  r0,r4,0x0020            /* Is it a segment table fault? */
-       bne-    .do_ste_alloc           /* If so handle it */
+       bne-    do_ste_alloc            /* If so handle it */
 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
 
        /*
@@ -1270,6 +1316,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
        cmpdi   r3,0                    /* see if hash_page succeeded */
 
 #ifdef DO_SOFT_DISABLE
+BEGIN_FW_FTR_SECTION
        /*
         * If we had interrupts soft-enabled at the point where the
         * DSI/ISI occurred, and an interrupt came in during hash_page,
@@ -1278,7 +1325,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
         * because ret_from_except_lite will check for and handle pending
         * interrupts if necessary.
         */
-       beq     .ret_from_except_lite
+       beq     13f
        /* For a hash failure, we don't bother re-enabling interrupts */
        ble-    12f
 
@@ -1290,22 +1337,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
        ld      r3,SOFTE(r1)
        bl      .local_irq_restore
        b       11f
-#else
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif
+BEGIN_FW_FTR_SECTION
        beq     fast_exception_return   /* Return from exception on success */
        ble-    12f                     /* Failure return from hash_page */
 
        /* fall through */
-#endif
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
 
 /* Here we have a page fault that hash_page can't handle. */
-_GLOBAL(handle_page_fault)
+handle_page_fault:
        ENABLE_INTS
 11:    ld      r4,_DAR(r1)
        ld      r5,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      .do_page_fault
        cmpdi   r3,0
-       beq+    .ret_from_except_lite
+       beq+    13f
        bl      .save_nvgprs
        mr      r5,r3
        addi    r3,r1,STACK_FRAME_OVERHEAD
@@ -1322,12 +1371,14 @@ _GLOBAL(handle_page_fault)
        bl      .low_hash_fault
        b       .ret_from_except
 
+13:    b       .ret_from_except_lite
+
        /* here we have a segment miss */
-_GLOBAL(do_ste_alloc)
+do_ste_alloc:
        bl      .ste_allocate           /* try to insert stab entry */
        cmpdi   r3,0
-       beq+    fast_exception_return
-       b       .handle_page_fault
+       bne-    handle_page_fault
+       b       fast_exception_return
 
 /*
  * r13 points to the PACA, r9 contains the saved CR,
@@ -1453,19 +1504,17 @@ fwnmi_data_area:
         . = 0x8000
 
 /*
- * On pSeries, secondary processors spin in the following code.
+ * On pSeries and most other platforms, secondary processors spin
+ * in the following code.
  * At entry, r3 = this processor's number (physical cpu id)
  */
-_GLOBAL(pSeries_secondary_smp_init)
+_GLOBAL(generic_secondary_smp_init)
        mr      r24,r3
        
        /* turn on 64-bit mode */
        bl      .enable_64b_mode
        isync
 
-       /* Copy some CPU settings from CPU 0 */
-       bl      .__restore_cpu_setup
-
        /* Set up a paca value for this processor. Since we have the
         * physical cpu id in r24, we need to search the pacas to find
         * which logical id maps to our physical one.
@@ -1491,15 +1540,28 @@ _GLOBAL(pSeries_secondary_smp_init)
                                        /* start.                        */
        sync
 
-       /* Create a temp kernel stack for use before relocation is on.  */
+#ifndef CONFIG_SMP
+       b       3b                      /* Never go on non-SMP           */
+#else
+       cmpwi   0,r23,0
+       beq     3b                      /* Loop until told to go         */
+
+       /* See if we need to call a cpu state restore handler */
+       LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
+       ld      r23,0(r23)
+       ld      r23,CPU_SPEC_RESTORE(r23)
+       cmpdi   0,r23,0
+       beq     4f
+       ld      r23,0(r23)
+       mtctr   r23
+       bctrl
+
+4:     /* Create a temp kernel stack for use before relocation is on.  */
        ld      r1,PACAEMERGSP(r13)
        subi    r1,r1,STACK_FRAME_OVERHEAD
 
-       cmpwi   0,r23,0
-#ifdef CONFIG_SMP
-       bne     .__secondary_start
+       b       .__secondary_start
 #endif
-       b       3b                      /* Loop until told to go         */
 
 #ifdef CONFIG_PPC_ISERIES
 _STATIC(__start_initialization_iSeries)
@@ -1521,11 +1583,6 @@ _STATIC(__start_initialization_iSeries)
        li      r0,0
        stdu    r0,-STACK_FRAME_OVERHEAD(r1)
 
-       LOAD_REG_IMMEDIATE(r3,cpu_specs)
-       LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
-       li      r5,0
-       bl      .identify_cpu
-
        LOAD_REG_IMMEDIATE(r2,__toc_start)
        addi    r2,r2,0x4000
        addi    r2,r2,0x4000
@@ -1580,7 +1637,18 @@ _GLOBAL(__start_initialization_multiplatform)
        bl      .enable_64b_mode
 
        /* Setup some critical 970 SPRs before switching MMU off */
-       bl      .__970_cpu_preinit
+       mfspr   r0,SPRN_PVR
+       srwi    r0,r0,16
+       cmpwi   r0,0x39         /* 970 */
+       beq     1f
+       cmpwi   r0,0x3c         /* 970FX */
+       beq     1f
+       cmpwi   r0,0x44         /* 970MP */
+       beq     1f
+       cmpwi   r0,0x45         /* 970GX */
+       bne     2f
+1:     bl      .__cpu_preinit_ppc970
+2:
 
        /* Switch off MMU if not already */
        LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
@@ -1697,7 +1765,7 @@ _STATIC(__after_prom_start)
 _GLOBAL(copy_and_flush)
        addi    r5,r5,-8
        addi    r6,r6,-8
-4:     li      r0,16                   /* Use the least common         */
+4:     li      r0,8                    /* Use the smallest common      */
                                        /* denominator cache line       */
                                        /* size.  This results in       */
                                        /* extra cache line flushes     */
@@ -1751,7 +1819,7 @@ _GLOBAL(pmac_secondary_start)
        isync
 
        /* Copy some CPU settings from CPU 0 */
-       bl      .__restore_cpu_setup
+       bl      .__restore_cpu_ppc970
 
        /* pSeries do that early though I don't think we really need it */
        mfmsr   r3
@@ -1810,7 +1878,9 @@ _GLOBAL(__secondary_start)
        LOAD_REG_ADDR(r3, .start_secondary_prolog)
        LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
 #ifdef DO_SOFT_DISABLE
+BEGIN_FW_FTR_SECTION
        ori     r4,r4,MSR_EE
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
        mtspr   SPRN_SRR0,r3
        mtspr   SPRN_SRR1,r4
@@ -1894,19 +1964,6 @@ _STATIC(start_here_multiplatform)
        addi    r2,r2,0x4000
        add     r2,r2,r26
 
-       LOAD_REG_IMMEDIATE(r3, cpu_specs)
-       add     r3,r3,r26
-       LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
-       add     r4,r4,r26
-       mr      r5,r26
-       bl      .identify_cpu
-
-       /* Save some low level config HIDs of CPU0 to be copied to
-        * other CPUs later on, or used for suspend/resume
-        */
-       bl      .__save_cpu_setup
-       sync
-
        /* Do very early kernel initializations, including initial hash table,
         * stab and slb setup before we turn on relocation.     */
 
@@ -1936,12 +1993,6 @@ _STATIC(start_here_common)
        li      r0,0
        stdu    r0,-STACK_FRAME_OVERHEAD(r1)
 
-       /* Apply the CPUs-specific fixups (nop out sections not relevant
-        * to this CPU
-        */
-       li      r3,0
-       bl      .do_cpu_ftr_fixups
-
        /* ptr to current */
        LOAD_REG_IMMEDIATE(r4, init_task)
        std     r4,PACACURRENT(r13)
@@ -1955,11 +2006,13 @@ _STATIC(start_here_common)
        /* Load up the kernel context */
 5:
 #ifdef DO_SOFT_DISABLE
+BEGIN_FW_FTR_SECTION
        li      r5,0
        stb     r5,PACAPROCENABLED(r13) /* Soft Disabled */
        mfmsr   r5
        ori     r5,r5,MSR_EE            /* Hard Enabled */
        mtmsrd  r5
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
 
        bl .start_kernel
This page took 0.031257 seconds and 5 git commands to generate.