powerpc: Provide a way to defer perf counter work until interrupts are enabled
authorPaul Mackerras <paulus@samba.org>
Fri, 9 Jan 2009 05:52:19 +0000 (16:52 +1100)
committerPaul Mackerras <paulus@samba.org>
Fri, 9 Jan 2009 08:48:17 +0000 (19:48 +1100)
Because 64-bit powerpc uses lazy (soft) interrupt disabling, it is
possible for a performance monitor exception to come in when the
kernel thinks interrupts are disabled (i.e. when they are
soft-disabled but hard-enabled).  In such a situation the performance
monitor exception handler might have some processing to do (such as
process wakeups) which can't be done in what is effectively an NMI
handler.

This provides a way to defer that work until interrupts get enabled,
either in raw_local_irq_restore() or by returning from an interrupt
handler to code that had interrupts enabled.  We have a per-processor
flag that indicates that there is work pending to do when interrupts
subsequently get re-enabled.  This flag is checked in the interrupt
return path and in raw_local_irq_restore(), and if it is set,
perf_counter_do_pending() is called to do the pending work.

Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/paca.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/irq.c

index f75a5fc64d2e64c8eb117c3faacb0c35f804b919..e10f151c3db647b3e4f0c76835dba8879c942707 100644 (file)
@@ -131,5 +131,36 @@ static inline int irqs_disabled_flags(unsigned long flags)
  */
 struct hw_interrupt_type;
 
+#ifdef CONFIG_PERF_COUNTERS
+static inline unsigned long get_perf_counter_pending(void)
+{
+       unsigned long x;
+
+       asm volatile("lbz %0,%1(13)"
+               : "=r" (x)
+               : "i" (offsetof(struct paca_struct, perf_counter_pending)));
+       return x;
+}
+
+static inline void set_perf_counter_pending(int x)
+{
+       asm volatile("stb %0,%1(13)" : :
+               "r" (x),
+               "i" (offsetof(struct paca_struct, perf_counter_pending)));
+}
+
+extern void perf_counter_do_pending(void);
+
+#else
+
+static inline unsigned long get_perf_counter_pending(void)
+{
+       return 0;
+}
+
+static inline void set_perf_counter_pending(int x) {}
+static inline void perf_counter_do_pending(void) {}
+#endif /* CONFIG_PERF_COUNTERS */
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_HW_IRQ_H */
index 082b3aedf145b4fe3d368f39829cc87911e9b62a..6ef055723019f3048cb14866b8eedddce179ba04 100644 (file)
@@ -99,6 +99,7 @@ struct paca_struct {
        u8 soft_enabled;                /* irq soft-enable flag */
        u8 hard_enabled;                /* set if irqs are enabled in MSR */
        u8 io_sync;                     /* writel() needs spin_unlock sync */
+       u8 perf_counter_pending;        /* PM interrupt while soft-disabled */
 
        /* Stuff for accurate time accounting */
        u64 user_time;                  /* accumulated usermode TB ticks */
index 661d07d2146bcbead9cb44e2a43aff1b5151e562..cea462900119f04779dc32fbdbfff1dec23a62e2 100644 (file)
@@ -127,6 +127,7 @@ int main(void)
        DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
        DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
        DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
+       DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
        DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
        DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
        DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
index 383ed6eb00850d2a7ab89b3b54b4677a3b9b1478..f30b4e553c53ba8276a313381240dead62e9cb34 100644 (file)
@@ -526,6 +526,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
 2:
        TRACE_AND_RESTORE_IRQ(r5);
 
+#ifdef CONFIG_PERF_COUNTERS
+       /* check paca->perf_counter_pending if we're enabling ints */
+       lbz     r3,PACAPERFPEND(r13)
+       and.    r3,r3,r5
+       beq     27f
+       bl      .perf_counter_do_pending
+27:
+#endif /* CONFIG_PERF_COUNTERS */
+
        /* extract EE bit and use it to restore paca->hard_enabled */
        ld      r3,_MSR(r1)
        rldicl  r4,r3,49,63             /* r0 = (r3 >> 15) & 1 */
index ac222d0ab12e86406bcbc6247adb3b8150ed49ee..4efb886ea4393e2f9aa90c24abc64813e7cfafbe 100644 (file)
@@ -104,6 +104,13 @@ static inline notrace void set_soft_enabled(unsigned long enable)
        : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
 }
 
+#ifdef CONFIG_PERF_COUNTERS
+notrace void __weak perf_counter_do_pending(void)
+{
+       set_perf_counter_pending(0);
+}
+#endif
+
 notrace void raw_local_irq_restore(unsigned long en)
 {
        /*
@@ -135,6 +142,9 @@ notrace void raw_local_irq_restore(unsigned long en)
                        iseries_handle_interrupts();
        }
 
+       if (get_perf_counter_pending())
+               perf_counter_do_pending();
+
        /*
         * if (get_paca()->hard_enabled) return;
         * But again we need to take care that gcc gets hard_enabled directly
This page took 0.029462 seconds and 5 git commands to generate.