Merge branch 'perf/urgent' into perf/core
authorIngo Molnar <mingo@elte.hu>
Wed, 17 Mar 2010 10:31:45 +0000 (11:31 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 17 Mar 2010 10:31:48 +0000 (11:31 +0100)
Merge reason: We'll be queueing dependent changes.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
36 files changed:
Documentation/kprobes.txt
arch/Kconfig
arch/x86/Kconfig
arch/x86/include/asm/insn.h
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/perf_event_p4.h [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_intel_lbr.c [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_p4.c [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_p6.c
arch/x86/lib/Makefile
include/linux/perf_event.h
kernel/perf_event.c
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-top.c
tools/perf/perf.c
tools/perf/perf.h
tools/perf/util/cache.h
tools/perf/util/color.c
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/kernel.h
tools/perf/util/newt.c [new file with mode: 0644]
tools/perf/util/parse-events.c
tools/perf/util/session.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h

index 2f9115c0ae627f044f4a22266cd7037c9ba10fd8..61c291cddf1873f1a17ac7d65e4404d6612b7b0d 100644 (file)
@@ -165,8 +165,8 @@ the user entry_handler invocation is also skipped.
 
 1.4 How Does Jump Optimization Work?
 
-If you configured your kernel with CONFIG_OPTPROBES=y (currently
-this option is supported on x86/x86-64, non-preemptive kernel) and
+If your kernel is built with CONFIG_OPTPROBES=y (currently this flag
+is automatically set 'y' on x86/x86-64, non-preemptive kernel) and
 the "debug.kprobes_optimization" kernel parameter is set to 1 (see
 sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
 instruction instead of a breakpoint instruction at each probepoint.
@@ -271,8 +271,6 @@ tweak the kernel's execution path, you need to suppress optimization,
 using one of the following techniques:
 - Specify an empty function for the kprobe's post_handler or break_handler.
  or
-- Config CONFIG_OPTPROBES=n.
- or
 - Execute 'sysctl -w debug.kprobes_optimization=n'
 
 2. Architectures Supported
@@ -307,10 +305,6 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
 so you can use "objdump -d -l vmlinux" to see the source-to-object
 code mapping.
 
-If you want to reduce probing overhead, set "Kprobes jump optimization
-support" (CONFIG_OPTPROBES) to "y". You can find this option under the
-"Kprobes" line.
-
 4. API Reference
 
 The Kprobes API includes a "register" function and an "unregister"
index e5eb1337a5377f5b131b2ba70efd8263b8057404..f06010fb48381d7a0ecfdfdc3c9a833a0da5cc0c 100644 (file)
@@ -42,15 +42,10 @@ config KPROBES
          If in doubt, say "N".
 
 config OPTPROBES
-       bool "Kprobes jump optimization support (EXPERIMENTAL)"
-       default y
-       depends on KPROBES
+       def_bool y
+       depends on KPROBES && HAVE_OPTPROBES
        depends on !PREEMPT
-       depends on HAVE_OPTPROBES
        select KALLSYMS_ALL
-       help
-         This option will allow kprobes to optimize breakpoint to
-         a jump for reducing its overhead.
 
 config HAVE_EFFICIENT_UNALIGNED_ACCESS
        bool
index e98440371525a968c1afa04cc69350401c497ec0..e1240f652a9b89f4dd8a77b700928acf1a74d1ea 100644 (file)
@@ -58,6 +58,9 @@ config X86
        select HAVE_ARCH_KMEMCHECK
        select HAVE_USER_RETURN_NOTIFIER
 
+config INSTRUCTION_DECODER
+       def_bool (KPROBES || PERF_EVENTS)
+
 config OUTPUT_FORMAT
        string
        default "elf32-i386" if X86_32
index 96c2e0ad04ca04f94d2a7974007dd647a4142549..88c765e16410beecf7be2bfdad19b0cf4918ec46 100644 (file)
@@ -68,6 +68,8 @@ struct insn {
        const insn_byte_t *next_byte;
 };
 
+#define MAX_INSN_SIZE  16
+
 #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
 #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
 #define X86_MODRM_RM(modrm) ((modrm) & 0x07)
index 4ffa345a8ccbd7d2b7db8debd7caa090aceaf8a5..54788253915739a2a07faa87f8186ab261e0a065 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/types.h>
 #include <linux/ptrace.h>
 #include <linux/percpu.h>
+#include <asm/insn.h>
 
 #define  __ARCH_WANT_KPROBES_INSN_SLOT
 
@@ -36,7 +37,6 @@ typedef u8 kprobe_opcode_t;
 #define RELATIVEJUMP_SIZE 5
 #define RELATIVECALL_OPCODE 0xe8
 #define RELATIVE_ADDR_SIZE 4
-#define MAX_INSN_SIZE 16
 #define MAX_STACK_SIZE 64
 #define MIN_STACK_SIZE(ADDR)                                          \
        (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
index db6109a885a76a6ecd116c59baae0088007ffc45..124dddd598f3ed7b7d1063b97a5a193733b2cdc3 100644 (file)
@@ -5,7 +5,7 @@
  * Performance event hw details:
  */
 
-#define X86_PMC_MAX_GENERIC                                    8
+#define X86_PMC_MAX_GENERIC                                   32
 #define X86_PMC_MAX_FIXED                                      3
 
 #define X86_PMC_IDX_GENERIC                                    0
@@ -136,6 +136,25 @@ extern void perf_events_lapic_init(void);
 
 #define PERF_EVENT_INDEX_OFFSET                        0
 
+/*
+ * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
+ * This flag is otherwise unused and ABI specified to be 0, so nobody should
+ * care what we do with it.
+ */
+#define PERF_EFLAGS_EXACT      (1UL << 3)
+
+#define perf_misc_flags(regs)                          \
+({     int misc = 0;                                   \
+       if (user_mode(regs))                            \
+               misc |= PERF_RECORD_MISC_USER;          \
+       else                                            \
+               misc |= PERF_RECORD_MISC_KERNEL;        \
+       if (regs->flags & PERF_EFLAGS_EXACT)            \
+               misc |= PERF_RECORD_MISC_EXACT;         \
+       misc; })
+
+#define perf_instruction_pointer(regs) ((regs)->ip)
+
 #else
 static inline void init_hw_perf_events(void)           { }
 static inline void perf_events_lapic_init(void)        { }
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
new file mode 100644 (file)
index 0000000..b842b32
--- /dev/null
@@ -0,0 +1,708 @@
+/*
+ * Netburst Perfomance Events (P4, old Xeon)
+ */
+
+#ifndef PERF_EVENT_P4_H
+#define PERF_EVENT_P4_H
+
+#include <linux/cpu.h>
+#include <linux/bitops.h>
+
+/*
+ * NetBurst has perfomance MSRs shared between
+ * threads if HT is turned on, ie for both logical
+ * processors (mem: in turn in Atom with HT support
+ * perf-MSRs are not shared and every thread has its
+ * own perf-MSRs set)
+ */
+#define ARCH_P4_TOTAL_ESCR             (46)
+#define ARCH_P4_RESERVED_ESCR          (2) /* IQ_ESCR(0,1) not always present */
+#define ARCH_P4_MAX_ESCR               (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
+#define ARCH_P4_MAX_CCCR               (18)
+#define ARCH_P4_MAX_COUNTER            (ARCH_P4_MAX_CCCR / 2)
+
+#define P4_EVNTSEL_EVENT_MASK          0x7e000000U
+#define P4_EVNTSEL_EVENT_SHIFT         25
+#define P4_EVNTSEL_EVENTMASK_MASK      0x01fffe00U
+#define P4_EVNTSEL_EVENTMASK_SHIFT     9
+#define P4_EVNTSEL_TAG_MASK            0x000001e0U
+#define P4_EVNTSEL_TAG_SHIFT           5
+#define P4_EVNTSEL_TAG_ENABLE          0x00000010U
+#define P4_EVNTSEL_T0_OS               0x00000008U
+#define P4_EVNTSEL_T0_USR              0x00000004U
+#define P4_EVNTSEL_T1_OS               0x00000002U
+#define P4_EVNTSEL_T1_USR              0x00000001U
+
+/* Non HT mask */
+#define P4_EVNTSEL_MASK                                \
+       (P4_EVNTSEL_EVENT_MASK          |       \
+       P4_EVNTSEL_EVENTMASK_MASK       |       \
+       P4_EVNTSEL_TAG_MASK             |       \
+       P4_EVNTSEL_TAG_ENABLE           |       \
+       P4_EVNTSEL_T0_OS                |       \
+       P4_EVNTSEL_T0_USR)
+
+/* HT mask */
+#define P4_EVNTSEL_MASK_HT                     \
+       (P4_EVNTSEL_MASK                |       \
+       P4_EVNTSEL_T1_OS                |       \
+       P4_EVNTSEL_T1_USR)
+
+#define P4_CCCR_OVF                    0x80000000U
+#define P4_CCCR_CASCADE                        0x40000000U
+#define P4_CCCR_OVF_PMI_T0             0x04000000U
+#define P4_CCCR_OVF_PMI_T1             0x08000000U
+#define P4_CCCR_FORCE_OVF              0x02000000U
+#define P4_CCCR_EDGE                   0x01000000U
+#define P4_CCCR_THRESHOLD_MASK         0x00f00000U
+#define P4_CCCR_THRESHOLD_SHIFT                20
+#define P4_CCCR_THRESHOLD(v)           ((v) << P4_CCCR_THRESHOLD_SHIFT)
+#define P4_CCCR_COMPLEMENT             0x00080000U
+#define P4_CCCR_COMPARE                        0x00040000U
+#define P4_CCCR_ESCR_SELECT_MASK       0x0000e000U
+#define P4_CCCR_ESCR_SELECT_SHIFT      13
+#define P4_CCCR_ENABLE                 0x00001000U
+#define P4_CCCR_THREAD_SINGLE          0x00010000U
+#define P4_CCCR_THREAD_BOTH            0x00020000U
+#define P4_CCCR_THREAD_ANY             0x00030000U
+
+/* Non HT mask */
+#define P4_CCCR_MASK                           \
+       (P4_CCCR_OVF                    |       \
+       P4_CCCR_CASCADE                 |       \
+       P4_CCCR_OVF_PMI_T0              |       \
+       P4_CCCR_FORCE_OVF               |       \
+       P4_CCCR_EDGE                    |       \
+       P4_CCCR_THRESHOLD_MASK          |       \
+       P4_CCCR_COMPLEMENT              |       \
+       P4_CCCR_COMPARE                 |       \
+       P4_CCCR_ESCR_SELECT_MASK        |       \
+       P4_CCCR_ENABLE)
+
+/* HT mask */
+#define P4_CCCR_MASK_HT                                \
+       (P4_CCCR_MASK                   |       \
+       P4_CCCR_THREAD_ANY)
+
+/*
+ * format is 32 bit: ee ss aa aa
+ * where
+ *     ee - 8 bit event
+ *     ss - 8 bit selector
+ *     aa aa - 16 bits reserved for tags/attributes
+ */
+#define P4_EVENT_PACK(event, selector)         (((event) << 24) | ((selector) << 16))
+#define P4_EVENT_UNPACK_EVENT(packed)          (((packed) >> 24) & 0xff)
+#define P4_EVENT_UNPACK_SELECTOR(packed)       (((packed) >> 16) & 0xff)
+#define P4_EVENT_PACK_ATTR(attr)               ((attr))
+#define P4_EVENT_UNPACK_ATTR(packed)           ((packed) & 0xffff)
+#define P4_MAKE_EVENT_ATTR(class, name, bit)   class##_##name = (1 << bit)
+#define P4_EVENT_ATTR(class, name)             class##_##name
+#define P4_EVENT_ATTR_STR(class, name)         __stringify(class##_##name)
+
+/*
+ * config field is 64bit width and consists of
+ * HT << 63 | ESCR << 32 | CCCR
+ * where HT is HyperThreading bit (since ESCR
+ * has it reserved we may use it for own purpose)
+ *
+ * note that this is NOT the addresses of respective
+ * ESCR and CCCR but rather an only packed value should
+ * be unpacked and written to a proper addresses
+ *
+ * the base idea is to pack as much info as
+ * possible
+ */
+#define p4_config_pack_escr(v)         (((u64)(v)) << 32)
+#define p4_config_pack_cccr(v)         (((u64)(v)) & 0xffffffffULL)
+#define p4_config_unpack_escr(v)       (((u64)(v)) >> 32)
+#define p4_config_unpack_cccr(v)       (((u64)(v)) & 0xffffffffULL)
+
+#define p4_config_unpack_emask(v)                      \
+       ({                                              \
+               u32 t = p4_config_unpack_escr((v));     \
+               t  &= P4_EVNTSEL_EVENTMASK_MASK;        \
+               t >>= P4_EVNTSEL_EVENTMASK_SHIFT;       \
+               t;                                      \
+       })
+
+#define P4_CONFIG_HT_SHIFT             63
+#define P4_CONFIG_HT                   (1ULL << P4_CONFIG_HT_SHIFT)
+
+static inline u32 p4_config_unpack_opcode(u64 config)
+{
+       u32 e, s;
+
+       /*
+        * we don't care about HT presence here since
+        * event opcode doesn't depend on it
+        */
+       e = (p4_config_unpack_escr(config) & P4_EVNTSEL_EVENT_MASK) >> P4_EVNTSEL_EVENT_SHIFT;
+       s = (p4_config_unpack_cccr(config) & P4_CCCR_ESCR_SELECT_MASK) >> P4_CCCR_ESCR_SELECT_SHIFT;
+
+       return P4_EVENT_PACK(e, s);
+}
+
+static inline bool p4_is_event_cascaded(u64 config)
+{
+       u32 cccr = p4_config_unpack_cccr(config);
+       return !!(cccr & P4_CCCR_CASCADE);
+}
+
+static inline int p4_ht_config_thread(u64 config)
+{
+       return !!(config & P4_CONFIG_HT);
+}
+
+static inline u64 p4_set_ht_bit(u64 config)
+{
+       return config | P4_CONFIG_HT;
+}
+
+static inline u64 p4_clear_ht_bit(u64 config)
+{
+       return config & ~P4_CONFIG_HT;
+}
+
+static inline int p4_ht_active(void)
+{
+#ifdef CONFIG_SMP
+       return smp_num_siblings > 1;
+#endif
+       return 0;
+}
+
+static inline int p4_ht_thread(int cpu)
+{
+#ifdef CONFIG_SMP
+       if (smp_num_siblings == 2)
+               return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
+#endif
+       return 0;
+}
+
+static inline int p4_should_swap_ts(u64 config, int cpu)
+{
+       return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
+}
+
+static inline u32 p4_default_cccr_conf(int cpu)
+{
+       /*
+        * Note that P4_CCCR_THREAD_ANY is "required" on
+        * non-HT machines (on HT machines we count TS events
+        * regardless the state of second logical processor
+        */
+       u32 cccr = P4_CCCR_THREAD_ANY;
+
+       if (!p4_ht_thread(cpu))
+               cccr |= P4_CCCR_OVF_PMI_T0;
+       else
+               cccr |= P4_CCCR_OVF_PMI_T1;
+
+       return cccr;
+}
+
+static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
+{
+       u32 escr = 0;
+
+       if (!p4_ht_thread(cpu)) {
+               if (!exclude_os)
+                       escr |= P4_EVNTSEL_T0_OS;
+               if (!exclude_usr)
+                       escr |= P4_EVNTSEL_T0_USR;
+       } else {
+               if (!exclude_os)
+                       escr |= P4_EVNTSEL_T1_OS;
+               if (!exclude_usr)
+                       escr |= P4_EVNTSEL_T1_USR;
+       }
+
+       return escr;
+}
+
+/*
+ * Comments below the event represent ESCR restriction
+ * for this event and counter index per ESCR
+ *
+ * MSR_P4_IQ_ESCR0 and MSR_P4_IQ_ESCR1 are available only on early
+ * processor builds (family 0FH, models 01H-02H). These MSRs
+ * are not available on later versions, so that we don't use
+ * them completely
+ *
+ * Also note that CCCR1 do not have P4_CCCR_ENABLE bit properly
+ * working so that we should not use this CCCR and respective
+ * counter as result
+ */
+#define P4_TC_DELIVER_MODE             P4_EVENT_PACK(0x01, 0x01)
+       /*
+        * MSR_P4_TC_ESCR0:     4, 5
+        * MSR_P4_TC_ESCR1:     6, 7
+        */
+
+#define P4_BPU_FETCH_REQUEST           P4_EVENT_PACK(0x03, 0x00)
+       /*
+        * MSR_P4_BPU_ESCR0:    0, 1
+        * MSR_P4_BPU_ESCR1:    2, 3
+        */
+
+#define P4_ITLB_REFERENCE              P4_EVENT_PACK(0x18, 0x03)
+       /*
+        * MSR_P4_ITLB_ESCR0:   0, 1
+        * MSR_P4_ITLB_ESCR1:   2, 3
+        */
+
+#define P4_MEMORY_CANCEL               P4_EVENT_PACK(0x02, 0x05)
+       /*
+        * MSR_P4_DAC_ESCR0:    8, 9
+        * MSR_P4_DAC_ESCR1:    10, 11
+        */
+
+#define P4_MEMORY_COMPLETE             P4_EVENT_PACK(0x08, 0x02)
+       /*
+        * MSR_P4_SAAT_ESCR0:   8, 9
+        * MSR_P4_SAAT_ESCR1:   10, 11
+        */
+
+#define P4_LOAD_PORT_REPLAY            P4_EVENT_PACK(0x04, 0x02)
+       /*
+        * MSR_P4_SAAT_ESCR0:   8, 9
+        * MSR_P4_SAAT_ESCR1:   10, 11
+        */
+
+#define P4_STORE_PORT_REPLAY           P4_EVENT_PACK(0x05, 0x02)
+       /*
+        * MSR_P4_SAAT_ESCR0:   8, 9
+        * MSR_P4_SAAT_ESCR1:   10, 11
+        */
+
+#define P4_MOB_LOAD_REPLAY             P4_EVENT_PACK(0x03, 0x02)
+       /*
+        * MSR_P4_MOB_ESCR0:    0, 1
+        * MSR_P4_MOB_ESCR1:    2, 3
+        */
+
+#define P4_PAGE_WALK_TYPE              P4_EVENT_PACK(0x01, 0x04)
+       /*
+        * MSR_P4_PMH_ESCR0:    0, 1
+        * MSR_P4_PMH_ESCR1:    2, 3
+        */
+
+#define P4_BSQ_CACHE_REFERENCE         P4_EVENT_PACK(0x0c, 0x07)
+       /*
+        * MSR_P4_BSU_ESCR0:    0, 1
+        * MSR_P4_BSU_ESCR1:    2, 3
+        */
+
+#define P4_IOQ_ALLOCATION              P4_EVENT_PACK(0x03, 0x06)
+       /*
+        * MSR_P4_FSB_ESCR0:    0, 1
+        * MSR_P4_FSB_ESCR1:    2, 3
+        */
+
+#define P4_IOQ_ACTIVE_ENTRIES          P4_EVENT_PACK(0x1a, 0x06)
+       /*
+        * MSR_P4_FSB_ESCR1:    2, 3
+        */
+
+#define P4_FSB_DATA_ACTIVITY           P4_EVENT_PACK(0x17, 0x06)
+       /*
+        * MSR_P4_FSB_ESCR0:    0, 1
+        * MSR_P4_FSB_ESCR1:    2, 3
+        */
+
+#define P4_BSQ_ALLOCATION              P4_EVENT_PACK(0x05, 0x07)
+       /*
+        * MSR_P4_BSU_ESCR0:    0, 1
+        */
+
+#define P4_BSQ_ACTIVE_ENTRIES          P4_EVENT_PACK(0x06, 0x07)
+       /*
+        * NOTE: no ESCR name in docs, it's guessed
+        * MSR_P4_BSU_ESCR1:    2, 3
+        */
+
+#define P4_SSE_INPUT_ASSIST            P4_EVENT_PACK(0x34, 0x01)
+       /*
+        * MSR_P4_FIRM_ESCR0:   8, 9
+        * MSR_P4_FIRM_ESCR1:   10, 11
+        */
+
+#define P4_PACKED_SP_UOP               P4_EVENT_PACK(0x08, 0x01)
+       /*
+        * MSR_P4_FIRM_ESCR0:   8, 9
+        * MSR_P4_FIRM_ESCR1:   10, 11
+        */
+
+#define P4_PACKED_DP_UOP               P4_EVENT_PACK(0x0c, 0x01)
+       /*
+        * MSR_P4_FIRM_ESCR0:   8, 9
+        * MSR_P4_FIRM_ESCR1:   10, 11
+        */
+
+#define P4_SCALAR_SP_UOP               P4_EVENT_PACK(0x0a, 0x01)
+       /*
+        * MSR_P4_FIRM_ESCR0:   8, 9
+        * MSR_P4_FIRM_ESCR1:   10, 11
+        */
+
+#define P4_SCALAR_DP_UOP               P4_EVENT_PACK(0x0e, 0x01)
+       /*
+        * MSR_P4_FIRM_ESCR0:   8, 9
+        * MSR_P4_FIRM_ESCR1:   10, 11
+        */
+
+#define P4_64BIT_MMX_UOP               P4_EVENT_PACK(0x02, 0x01)
+       /*
+        * MSR_P4_FIRM_ESCR0:   8, 9
+        * MSR_P4_FIRM_ESCR1:   10, 11
+        */
+
+#define P4_128BIT_MMX_UOP              P4_EVENT_PACK(0x1a, 0x01)
+       /*
+        * MSR_P4_FIRM_ESCR0:   8, 9
+        * MSR_P4_FIRM_ESCR1:   10, 11
+        */
+
+#define P4_X87_FP_UOP                  P4_EVENT_PACK(0x04, 0x01)
+       /*
+        * MSR_P4_FIRM_ESCR0:   8, 9
+        * MSR_P4_FIRM_ESCR1:   10, 11
+        */
+
+#define P4_TC_MISC                     P4_EVENT_PACK(0x06, 0x01)
+       /*
+        * MSR_P4_TC_ESCR0:     4, 5
+        * MSR_P4_TC_ESCR1:     6, 7
+        */
+
+#define P4_GLOBAL_POWER_EVENTS         P4_EVENT_PACK(0x13, 0x06)
+       /*
+        * MSR_P4_FSB_ESCR0:    0, 1
+        * MSR_P4_FSB_ESCR1:    2, 3
+        */
+
+#define P4_TC_MS_XFER                  P4_EVENT_PACK(0x05, 0x00)
+       /*
+        * MSR_P4_MS_ESCR0:     4, 5
+        * MSR_P4_MS_ESCR1:     6, 7
+        */
+
+#define P4_UOP_QUEUE_WRITES            P4_EVENT_PACK(0x09, 0x00)
+       /*
+        * MSR_P4_MS_ESCR0:     4, 5
+        * MSR_P4_MS_ESCR1:     6, 7
+        */
+
+#define P4_RETIRED_MISPRED_BRANCH_TYPE P4_EVENT_PACK(0x05, 0x02)
+       /*
+        * MSR_P4_TBPU_ESCR0:   4, 5
+        * MSR_P4_TBPU_ESCR0:   6, 7
+        */
+
+#define P4_RETIRED_BRANCH_TYPE         P4_EVENT_PACK(0x04, 0x02)
+       /*
+        * MSR_P4_TBPU_ESCR0:   4, 5
+        * MSR_P4_TBPU_ESCR0:   6, 7
+        */
+
+#define P4_RESOURCE_STALL              P4_EVENT_PACK(0x01, 0x01)
+       /*
+        * MSR_P4_ALF_ESCR0:    12, 13, 16
+        * MSR_P4_ALF_ESCR1:    14, 15, 17
+        */
+
+#define P4_WC_BUFFER                   P4_EVENT_PACK(0x05, 0x05)
+       /*
+        * MSR_P4_DAC_ESCR0:    8, 9
+        * MSR_P4_DAC_ESCR1:    10, 11
+        */
+
+#define P4_B2B_CYCLES                  P4_EVENT_PACK(0x16, 0x03)
+       /*
+        * MSR_P4_FSB_ESCR0:    0, 1
+        * MSR_P4_FSB_ESCR1:    2, 3
+        */
+
+#define P4_BNR                         P4_EVENT_PACK(0x08, 0x03)
+       /*
+        * MSR_P4_FSB_ESCR0:    0, 1
+        * MSR_P4_FSB_ESCR1:    2, 3
+        */
+
+#define P4_SNOOP                       P4_EVENT_PACK(0x06, 0x03)
+       /*
+        * MSR_P4_FSB_ESCR0:    0, 1
+        * MSR_P4_FSB_ESCR1:    2, 3
+        */
+
+#define P4_RESPONSE                    P4_EVENT_PACK(0x04, 0x03)
+       /*
+        * MSR_P4_FSB_ESCR0:    0, 1
+        * MSR_P4_FSB_ESCR1:    2, 3
+        */
+
+#define P4_FRONT_END_EVENT             P4_EVENT_PACK(0x08, 0x05)
+       /*
+        * MSR_P4_CRU_ESCR2:    12, 13, 16
+        * MSR_P4_CRU_ESCR3:    14, 15, 17
+        */
+
+#define P4_EXECUTION_EVENT             P4_EVENT_PACK(0x0c, 0x05)
+       /*
+        * MSR_P4_CRU_ESCR2:    12, 13, 16
+        * MSR_P4_CRU_ESCR3:    14, 15, 17
+        */
+
+#define P4_REPLAY_EVENT                        P4_EVENT_PACK(0x09, 0x05)
+       /*
+        * MSR_P4_CRU_ESCR2:    12, 13, 16
+        * MSR_P4_CRU_ESCR3:    14, 15, 17
+        */
+
+#define P4_INSTR_RETIRED               P4_EVENT_PACK(0x02, 0x04)
+       /*
+        * MSR_P4_CRU_ESCR0:    12, 13, 16
+        * MSR_P4_CRU_ESCR1:    14, 15, 17
+        */
+
+#define P4_UOPS_RETIRED                        P4_EVENT_PACK(0x01, 0x04)
+       /*
+        * MSR_P4_CRU_ESCR0:    12, 13, 16
+        * MSR_P4_CRU_ESCR1:    14, 15, 17
+        */
+
+#define P4_UOP_TYPE                    P4_EVENT_PACK(0x02, 0x02)
+       /*
+        * MSR_P4_RAT_ESCR0:    12, 13, 16
+        * MSR_P4_RAT_ESCR1:    14, 15, 17
+        */
+
+#define P4_BRANCH_RETIRED              P4_EVENT_PACK(0x06, 0x05)
+       /*
+        * MSR_P4_CRU_ESCR2:    12, 13, 16
+        * MSR_P4_CRU_ESCR3:    14, 15, 17
+        */
+
+#define P4_MISPRED_BRANCH_RETIRED      P4_EVENT_PACK(0x03, 0x04)
+       /*
+        * MSR_P4_CRU_ESCR0:    12, 13, 16
+        * MSR_P4_CRU_ESCR1:    14, 15, 17
+        */
+
+#define P4_X87_ASSIST                  P4_EVENT_PACK(0x03, 0x05)
+       /*
+        * MSR_P4_CRU_ESCR2:    12, 13, 16
+        * MSR_P4_CRU_ESCR3:    14, 15, 17
+        */
+
+#define P4_MACHINE_CLEAR               P4_EVENT_PACK(0x02, 0x05)
+       /*
+        * MSR_P4_CRU_ESCR2:    12, 13, 16
+        * MSR_P4_CRU_ESCR3:    14, 15, 17
+        */
+
+#define P4_INSTR_COMPLETED             P4_EVENT_PACK(0x07, 0x04)
+       /*
+        * MSR_P4_CRU_ESCR0:    12, 13, 16
+        * MSR_P4_CRU_ESCR1:    14, 15, 17
+        */
+
+/*
+ * a caller should use P4_EVENT_ATTR helper to
+ * pick the attribute needed, for example
+ *
+ *     P4_EVENT_ATTR(P4_TC_DELIVER_MODE, DD)
+ */
+enum P4_EVENTS_ATTR {
+       P4_MAKE_EVENT_ATTR(P4_TC_DELIVER_MODE, DD, 0),
+       P4_MAKE_EVENT_ATTR(P4_TC_DELIVER_MODE, DB, 1),
+       P4_MAKE_EVENT_ATTR(P4_TC_DELIVER_MODE, DI, 2),
+       P4_MAKE_EVENT_ATTR(P4_TC_DELIVER_MODE, BD, 3),
+       P4_MAKE_EVENT_ATTR(P4_TC_DELIVER_MODE, BB, 4),
+       P4_MAKE_EVENT_ATTR(P4_TC_DELIVER_MODE, BI, 5),
+       P4_MAKE_EVENT_ATTR(P4_TC_DELIVER_MODE, ID, 6),
+
+       P4_MAKE_EVENT_ATTR(P4_BPU_FETCH_REQUEST, TCMISS, 0),
+
+       P4_MAKE_EVENT_ATTR(P4_ITLB_REFERENCE, HIT, 0),
+       P4_MAKE_EVENT_ATTR(P4_ITLB_REFERENCE, MISS, 1),
+       P4_MAKE_EVENT_ATTR(P4_ITLB_REFERENCE, HIT_UK, 2),
+
+       P4_MAKE_EVENT_ATTR(P4_MEMORY_CANCEL, ST_RB_FULL, 2),
+       P4_MAKE_EVENT_ATTR(P4_MEMORY_CANCEL, 64K_CONF, 3),
+
+       P4_MAKE_EVENT_ATTR(P4_MEMORY_COMPLETE, LSC, 0),
+       P4_MAKE_EVENT_ATTR(P4_MEMORY_COMPLETE, SSC, 1),
+
+       P4_MAKE_EVENT_ATTR(P4_LOAD_PORT_REPLAY, SPLIT_LD, 1),
+
+       P4_MAKE_EVENT_ATTR(P4_STORE_PORT_REPLAY, SPLIT_ST, 1),
+
+       P4_MAKE_EVENT_ATTR(P4_MOB_LOAD_REPLAY, NO_STA, 1),
+       P4_MAKE_EVENT_ATTR(P4_MOB_LOAD_REPLAY, NO_STD, 3),
+       P4_MAKE_EVENT_ATTR(P4_MOB_LOAD_REPLAY, PARTIAL_DATA, 4),
+       P4_MAKE_EVENT_ATTR(P4_MOB_LOAD_REPLAY, UNALGN_ADDR, 5),
+
+       P4_MAKE_EVENT_ATTR(P4_PAGE_WALK_TYPE, DTMISS, 0),
+       P4_MAKE_EVENT_ATTR(P4_PAGE_WALK_TYPE, ITMISS, 1),
+
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITS, 0),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITE, 1),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITM, 2),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITS, 3),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITE, 4),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITM, 5),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_MISS, 8),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_MISS, 9),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, WR_2ndL_MISS, 10),
+
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, DEFAULT, 0),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, ALL_READ, 5),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, ALL_WRITE, 6),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, MEM_UC, 7),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, MEM_WC, 8),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, MEM_WT, 9),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, MEM_WP, 10),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, MEM_WB, 11),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, OWN, 13),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, OTHER, 14),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ALLOCATION, PREFETCH, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, DEFAULT, 0),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, ALL_READ, 5),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, ALL_WRITE, 6),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, MEM_UC, 7),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, MEM_WC, 8),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, MEM_WT, 9),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, MEM_WP, 10),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, MEM_WB, 11),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, OWN, 13),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, OTHER, 14),
+       P4_MAKE_EVENT_ATTR(P4_IOQ_ACTIVE_ENTRIES, PREFETCH, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_DRV, 0),
+       P4_MAKE_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_OWN, 1),
+       P4_MAKE_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_OTHER, 2),
+       P4_MAKE_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DBSY_DRV, 3),
+       P4_MAKE_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DBSY_OWN, 4),
+       P4_MAKE_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DBSY_OTHER, 5),
+
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_TYPE0, 0),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_TYPE1, 1),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_LEN0, 2),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_LEN1, 3),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_IO_TYPE, 5),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_LOCK_TYPE, 6),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_CACHE_TYPE, 7),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_SPLIT_TYPE, 8),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_DEM_TYPE, 9),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, REQ_ORD_TYPE, 10),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, MEM_TYPE0, 11),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, MEM_TYPE1, 12),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ALLOCATION, MEM_TYPE2, 13),
+
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_TYPE0, 0),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_TYPE1, 1),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_LEN0, 2),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_LEN1, 3),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE, 5),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE, 6),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE, 7),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE, 8),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE, 9),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE, 10),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, MEM_TYPE0, 11),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, MEM_TYPE1, 12),
+       P4_MAKE_EVENT_ATTR(P4_BSQ_ACTIVE_ENTRIES, MEM_TYPE2, 13),
+
+       P4_MAKE_EVENT_ATTR(P4_SSE_INPUT_ASSIST, ALL, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_PACKED_SP_UOP, ALL, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_PACKED_DP_UOP, ALL, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_SCALAR_SP_UOP, ALL, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_SCALAR_DP_UOP, ALL, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_64BIT_MMX_UOP, ALL, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_128BIT_MMX_UOP, ALL, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_X87_FP_UOP, ALL, 15),
+
+       P4_MAKE_EVENT_ATTR(P4_TC_MISC, FLUSH, 4),
+
+       P4_MAKE_EVENT_ATTR(P4_GLOBAL_POWER_EVENTS, RUNNING, 0),
+
+       P4_MAKE_EVENT_ATTR(P4_TC_MS_XFER, CISC, 0),
+
+       P4_MAKE_EVENT_ATTR(P4_UOP_QUEUE_WRITES, FROM_TC_BUILD, 0),
+       P4_MAKE_EVENT_ATTR(P4_UOP_QUEUE_WRITES, FROM_TC_DELIVER, 1),
+       P4_MAKE_EVENT_ATTR(P4_UOP_QUEUE_WRITES, FROM_ROM, 2),
+
+       P4_MAKE_EVENT_ATTR(P4_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL, 1),
+       P4_MAKE_EVENT_ATTR(P4_RETIRED_MISPRED_BRANCH_TYPE, CALL, 2),
+       P4_MAKE_EVENT_ATTR(P4_RETIRED_MISPRED_BRANCH_TYPE, RETURN, 3),
+       P4_MAKE_EVENT_ATTR(P4_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT, 4),
+
+       P4_MAKE_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, CONDITIONAL, 1),
+       P4_MAKE_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, CALL, 2),
+       P4_MAKE_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, RETURN, 3),
+       P4_MAKE_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, INDIRECT, 4),
+
+       P4_MAKE_EVENT_ATTR(P4_RESOURCE_STALL, SBFULL, 5),
+
+       P4_MAKE_EVENT_ATTR(P4_WC_BUFFER, WCB_EVICTS, 0),
+       P4_MAKE_EVENT_ATTR(P4_WC_BUFFER, WCB_FULL_EVICTS, 1),
+
+       P4_MAKE_EVENT_ATTR(P4_FRONT_END_EVENT, NBOGUS, 0),
+       P4_MAKE_EVENT_ATTR(P4_FRONT_END_EVENT, BOGUS, 1),
+
+       P4_MAKE_EVENT_ATTR(P4_EXECUTION_EVENT, NBOGUS0, 0),
+       P4_MAKE_EVENT_ATTR(P4_EXECUTION_EVENT, NBOGUS1, 1),
+       P4_MAKE_EVENT_ATTR(P4_EXECUTION_EVENT, NBOGUS2, 2),
+       P4_MAKE_EVENT_ATTR(P4_EXECUTION_EVENT, NBOGUS3, 3),
+       P4_MAKE_EVENT_ATTR(P4_EXECUTION_EVENT, BOGUS0, 4),
+       P4_MAKE_EVENT_ATTR(P4_EXECUTION_EVENT, BOGUS1, 5),
+       P4_MAKE_EVENT_ATTR(P4_EXECUTION_EVENT, BOGUS2, 6),
+       P4_MAKE_EVENT_ATTR(P4_EXECUTION_EVENT, BOGUS3, 7),
+
+       P4_MAKE_EVENT_ATTR(P4_REPLAY_EVENT, NBOGUS, 0),
+       P4_MAKE_EVENT_ATTR(P4_REPLAY_EVENT, BOGUS, 1),
+
+       P4_MAKE_EVENT_ATTR(P4_INSTR_RETIRED, NBOGUSNTAG, 0),
+       P4_MAKE_EVENT_ATTR(P4_INSTR_RETIRED, NBOGUSTAG, 1),
+       P4_MAKE_EVENT_ATTR(P4_INSTR_RETIRED, BOGUSNTAG, 2),
+       P4_MAKE_EVENT_ATTR(P4_INSTR_RETIRED, BOGUSTAG, 3),
+
+       P4_MAKE_EVENT_ATTR(P4_UOPS_RETIRED, NBOGUS, 0),
+       P4_MAKE_EVENT_ATTR(P4_UOPS_RETIRED, BOGUS, 1),
+
+       P4_MAKE_EVENT_ATTR(P4_UOP_TYPE, TAGLOADS, 1),
+       P4_MAKE_EVENT_ATTR(P4_UOP_TYPE, TAGSTORES, 2),
+
+       P4_MAKE_EVENT_ATTR(P4_BRANCH_RETIRED, MMNP, 0),
+       P4_MAKE_EVENT_ATTR(P4_BRANCH_RETIRED, MMNM, 1),
+       P4_MAKE_EVENT_ATTR(P4_BRANCH_RETIRED, MMTP, 2),
+       P4_MAKE_EVENT_ATTR(P4_BRANCH_RETIRED, MMTM, 3),
+
+       P4_MAKE_EVENT_ATTR(P4_MISPRED_BRANCH_RETIRED, NBOGUS, 0),
+
+       P4_MAKE_EVENT_ATTR(P4_X87_ASSIST, FPSU, 0),
+       P4_MAKE_EVENT_ATTR(P4_X87_ASSIST, FPSO, 1),
+       P4_MAKE_EVENT_ATTR(P4_X87_ASSIST, POAO, 2),
+       P4_MAKE_EVENT_ATTR(P4_X87_ASSIST, POAU, 3),
+       P4_MAKE_EVENT_ATTR(P4_X87_ASSIST, PREA, 4),
+
+       P4_MAKE_EVENT_ATTR(P4_MACHINE_CLEAR, CLEAR, 0),
+       P4_MAKE_EVENT_ATTR(P4_MACHINE_CLEAR, MOCLEAR, 1),
+       P4_MAKE_EVENT_ATTR(P4_MACHINE_CLEAR, SMCLEAR, 2),
+
+       P4_MAKE_EVENT_ATTR(P4_INSTR_COMPLETED, NBOGUS, 0),
+       P4_MAKE_EVENT_ATTR(P4_INSTR_COMPLETED, BOGUS, 1),
+};
+
+#endif /* PERF_EVENT_P4_H */
index 60398a0d947c855fbc25687694bb6a2b41100a3d..5dacf63f913e74d9e3d39ddb3bdebefc8c9165d9 100644 (file)
 #include <asm/stacktrace.h>
 #include <asm/nmi.h>
 
-static u64 perf_event_mask __read_mostly;
+#if 0
+#undef wrmsrl
+#define wrmsrl(msr, val)                                       \
+do {                                                           \
+       trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
+                       (unsigned long)(val));                  \
+       native_write_msr((msr), (u32)((u64)(val)),              \
+                       (u32)((u64)(val) >> 32));               \
+} while (0)
+#endif
 
-/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS        4
+/*
+ * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
+ */
+static unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
+{
+       unsigned long offset, addr = (unsigned long)from;
+       int type = in_nmi() ? KM_NMI : KM_IRQ0;
+       unsigned long size, len = 0;
+       struct page *page;
+       void *map;
+       int ret;
 
-/* The size of a BTS record in bytes: */
-#define BTS_RECORD_SIZE                24
+       do {
+               ret = __get_user_pages_fast(addr, 1, 0, &page);
+               if (!ret)
+                       break;
 
-/* The size of a per-cpu BTS buffer in bytes: */
-#define BTS_BUFFER_SIZE                (BTS_RECORD_SIZE * 2048)
+               offset = addr & (PAGE_SIZE - 1);
+               size = min(PAGE_SIZE - offset, n - len);
 
-/* The BTS overflow threshold in bytes from the end of the buffer: */
-#define BTS_OVFL_TH            (BTS_RECORD_SIZE * 128)
+               map = kmap_atomic(page, type);
+               memcpy(to, map+offset, size);
+               kunmap_atomic(map, type);
+               put_page(page);
 
+               len  += size;
+               to   += size;
+               addr += size;
 
-/*
- * Bits in the debugctlmsr controlling branch tracing.
- */
-#define X86_DEBUGCTL_TR                        (1 << 6)
-#define X86_DEBUGCTL_BTS               (1 << 7)
-#define X86_DEBUGCTL_BTINT             (1 << 8)
-#define X86_DEBUGCTL_BTS_OFF_OS                (1 << 9)
-#define X86_DEBUGCTL_BTS_OFF_USR       (1 << 10)
+       } while (len < n);
 
-/*
- * A debug store configuration.
- *
- * We only support architectures that use 64bit fields.
- */
-struct debug_store {
-       u64     bts_buffer_base;
-       u64     bts_index;
-       u64     bts_absolute_maximum;
-       u64     bts_interrupt_threshold;
-       u64     pebs_buffer_base;
-       u64     pebs_index;
-       u64     pebs_absolute_maximum;
-       u64     pebs_interrupt_threshold;
-       u64     pebs_event_reset[MAX_PEBS_EVENTS];
-};
+       return len;
+}
+
+static u64 perf_event_mask __read_mostly;
 
 struct event_constraint {
        union {
@@ -87,18 +94,40 @@ struct amd_nb {
        struct event_constraint event_constraints[X86_PMC_IDX_MAX];
 };
 
+#define MAX_LBR_ENTRIES                16
+
 struct cpu_hw_events {
+       /*
+        * Generic x86 PMC bits
+        */
        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
        unsigned long           interrupts;
        int                     enabled;
-       struct debug_store      *ds;
 
        int                     n_events;
        int                     n_added;
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
        u64                     tags[X86_PMC_IDX_MAX];
        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
+
+       /*
+        * Intel DebugStore bits
+        */
+       struct debug_store      *ds;
+       u64                     pebs_enabled;
+
+       /*
+        * Intel LBR bits
+        */
+       int                             lbr_users;
+       void                            *lbr_context;
+       struct perf_branch_stack        lbr_stack;
+       struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
+
+       /*
+        * AMD specific bits
+        */
        struct amd_nb           *amd_nb;
 };
 
@@ -112,22 +141,48 @@ struct cpu_hw_events {
 #define EVENT_CONSTRAINT(c, n, m)      \
        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
 
+/*
+ * Constraint on the Event code.
+ */
 #define INTEL_EVENT_CONSTRAINT(c, n)   \
        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
 
+/*
+ * Constraint on the Event code + UMask + fixed-mask
+ */
 #define FIXED_EVENT_CONSTRAINT(c, n)   \
        EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
 
+/*
+ * Constraint on the Event code + UMask
+ */
+#define PEBS_EVENT_CONSTRAINT(c, n)    \
+       EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
+
 #define EVENT_CONSTRAINT_END           \
        EVENT_CONSTRAINT(0, 0, 0)
 
 #define for_each_event_constraint(e, c)        \
        for ((e) = (c); (e)->cmask; (e)++)
 
+union perf_capabilities {
+       struct {
+               u64     lbr_format    : 6;
+               u64     pebs_trap     : 1;
+               u64     pebs_arch_reg : 1;
+               u64     pebs_format   : 4;
+               u64     smm_freeze    : 1;
+       };
+       u64     capabilities;
+};
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
 struct x86_pmu {
+       /*
+        * Generic x86 PMC bits
+        */
        const char      *name;
        int             version;
        int             (*handle_irq)(struct pt_regs *);
@@ -135,6 +190,8 @@ struct x86_pmu {
        void            (*enable_all)(void);
        void            (*enable)(struct perf_event *);
        void            (*disable)(struct perf_event *);
+       int             (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
+       int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        unsigned        eventsel;
        unsigned        perfctr;
        u64             (*event_map)(int);
@@ -146,10 +203,6 @@ struct x86_pmu {
        u64             event_mask;
        int             apic;
        u64             max_period;
-       u64             intel_ctrl;
-       void            (*enable_bts)(u64 config);
-       void            (*disable_bts)(void);
-
        struct event_constraint *
                        (*get_event_constraints)(struct cpu_hw_events *cpuc,
                                                 struct perf_event *event);
@@ -157,11 +210,32 @@ struct x86_pmu {
        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
                                                 struct perf_event *event);
        struct event_constraint *event_constraints;
+       void            (*quirks)(void);
 
        void            (*cpu_prepare)(int cpu);
        void            (*cpu_starting)(int cpu);
        void            (*cpu_dying)(int cpu);
        void            (*cpu_dead)(int cpu);
+
+       /*
+        * Intel Arch Perfmon v2+
+        */
+       u64                     intel_ctrl;
+       union perf_capabilities intel_cap;
+
+       /*
+        * Intel DebugStore bits
+        */
+       int             bts, pebs;
+       int             pebs_record_size;
+       void            (*drain_pebs)(struct pt_regs *regs);
+       struct event_constraint *pebs_constraints;
+
+       /*
+        * Intel LBR
+        */
+       unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
+       int             lbr_nr;                    /* hardware stack size */
 };
 
 static struct x86_pmu x86_pmu __read_mostly;
@@ -293,110 +367,14 @@ static void release_pmc_hardware(void)
 #endif
 }
 
-static inline bool bts_available(void)
-{
-       return x86_pmu.enable_bts != NULL;
-}
-
-static void init_debug_store_on_cpu(int cpu)
-{
-       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
-       if (!ds)
-               return;
-
-       wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
-                    (u32)((u64)(unsigned long)ds),
-                    (u32)((u64)(unsigned long)ds >> 32));
-}
-
-static void fini_debug_store_on_cpu(int cpu)
-{
-       if (!per_cpu(cpu_hw_events, cpu).ds)
-               return;
-
-       wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
-}
-
-static void release_bts_hardware(void)
-{
-       int cpu;
-
-       if (!bts_available())
-               return;
-
-       get_online_cpus();
-
-       for_each_online_cpu(cpu)
-               fini_debug_store_on_cpu(cpu);
-
-       for_each_possible_cpu(cpu) {
-               struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
-               if (!ds)
-                       continue;
-
-               per_cpu(cpu_hw_events, cpu).ds = NULL;
-
-               kfree((void *)(unsigned long)ds->bts_buffer_base);
-               kfree(ds);
-       }
-
-       put_online_cpus();
-}
-
-static int reserve_bts_hardware(void)
-{
-       int cpu, err = 0;
-
-       if (!bts_available())
-               return 0;
-
-       get_online_cpus();
-
-       for_each_possible_cpu(cpu) {
-               struct debug_store *ds;
-               void *buffer;
-
-               err = -ENOMEM;
-               buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
-               if (unlikely(!buffer))
-                       break;
-
-               ds = kzalloc(sizeof(*ds), GFP_KERNEL);
-               if (unlikely(!ds)) {
-                       kfree(buffer);
-                       break;
-               }
-
-               ds->bts_buffer_base = (u64)(unsigned long)buffer;
-               ds->bts_index = ds->bts_buffer_base;
-               ds->bts_absolute_maximum =
-                       ds->bts_buffer_base + BTS_BUFFER_SIZE;
-               ds->bts_interrupt_threshold =
-                       ds->bts_absolute_maximum - BTS_OVFL_TH;
-
-               per_cpu(cpu_hw_events, cpu).ds = ds;
-               err = 0;
-       }
-
-       if (err)
-               release_bts_hardware();
-       else {
-               for_each_online_cpu(cpu)
-                       init_debug_store_on_cpu(cpu);
-       }
-
-       put_online_cpus();
-
-       return err;
-}
+static int reserve_ds_buffers(void);
+static void release_ds_buffers(void);
 
 static void hw_perf_event_destroy(struct perf_event *event)
 {
        if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
                release_pmc_hardware();
-               release_bts_hardware();
+               release_ds_buffers();
                mutex_unlock(&pmc_reserve_mutex);
        }
 }
@@ -439,6 +417,25 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
        return 0;
 }
 
+static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
+{
+       /*
+        * Generate PMC IRQs:
+        * (keep 'enabled' bit clear for now)
+        */
+       hwc->config = ARCH_PERFMON_EVENTSEL_INT;
+
+       /*
+        * Count user and OS events unless requested not to
+        */
+       if (!attr->exclude_user)
+               hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
+       if (!attr->exclude_kernel)
+               hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
+
+       return 0;
+}
+
 /*
  * Setup the hardware configuration for a given attr_type
  */
@@ -459,7 +456,7 @@ static int __hw_perf_event_init(struct perf_event *event)
                        if (!reserve_pmc_hardware())
                                err = -EBUSY;
                        else
-                               err = reserve_bts_hardware();
+                               err = reserve_ds_buffers();
                }
                if (!err)
                        atomic_inc(&active_events);
@@ -470,23 +467,14 @@ static int __hw_perf_event_init(struct perf_event *event)
 
        event->destroy = hw_perf_event_destroy;
 
-       /*
-        * Generate PMC IRQs:
-        * (keep 'enabled' bit clear for now)
-        */
-       hwc->config = ARCH_PERFMON_EVENTSEL_INT;
-
        hwc->idx = -1;
        hwc->last_cpu = -1;
        hwc->last_tag = ~0ULL;
 
-       /*
-        * Count user and OS events unless requested not to.
-        */
-       if (!attr->exclude_user)
-               hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
-       if (!attr->exclude_kernel)
-               hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
+       /* Processor specifics */
+       err = x86_pmu.hw_config(attr, hwc);
+       if (err)
+               return err;
 
        if (!hwc->sample_period) {
                hwc->sample_period = x86_pmu.max_period;
@@ -537,11 +525,11 @@ static int __hw_perf_event_init(struct perf_event *event)
        if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
            (hwc->sample_period == 1)) {
                /* BTS is not supported by this architecture. */
-               if (!bts_available())
+               if (!x86_pmu.bts)
                        return -EOPNOTSUPP;
 
                /* BTS is currently only allowed for user-mode. */
-               if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+               if (!attr->exclude_kernel)
                        return -EOPNOTSUPP;
        }
 
@@ -850,14 +838,15 @@ void hw_perf_enable(void)
 
 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
 {
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+       wrmsrl(hwc->config_base + hwc->idx,
                              hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
 }
 
 static inline void x86_pmu_disable_event(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
+
+       wrmsrl(hwc->config_base + hwc->idx, hwc->config);
 }
 
 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -872,7 +861,7 @@ x86_perf_event_set_period(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        s64 left = atomic64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
-       int err, ret = 0, idx = hwc->idx;
+       int ret = 0, idx = hwc->idx;
 
        if (idx == X86_PMC_IDX_FIXED_BTS)
                return 0;
@@ -910,8 +899,8 @@ x86_perf_event_set_period(struct perf_event *event)
         */
        atomic64_set(&hwc->prev_count, (u64)-left);
 
-       err = checking_wrmsrl(hwc->event_base + idx,
-                            (u64)(-left) & x86_pmu.event_mask);
+       wrmsrl(hwc->event_base + idx,
+                       (u64)(-left) & x86_pmu.event_mask);
 
        perf_event_update_userpage(event);
 
@@ -948,7 +937,7 @@ static int x86_pmu_enable(struct perf_event *event)
        if (n < 0)
                return n;
 
-       ret = x86_schedule_events(cpuc, n, assign);
+       ret = x86_pmu.schedule_events(cpuc, n, assign);
        if (ret)
                return ret;
        /*
@@ -989,6 +978,7 @@ static void x86_pmu_unthrottle(struct perf_event *event)
 void perf_event_print_debug(void)
 {
        u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
+       u64 pebs;
        struct cpu_hw_events *cpuc;
        unsigned long flags;
        int cpu, idx;
@@ -1006,14 +996,16 @@ void perf_event_print_debug(void)
                rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
                rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
                rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
+               rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
 
                pr_info("\n");
                pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
                pr_info("CPU#%d: status:     %016llx\n", cpu, status);
                pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
                pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
+               pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
        }
-       pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
+       pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
 
        for (idx = 0; idx < x86_pmu.num_events; idx++) {
                rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
@@ -1272,12 +1264,15 @@ int hw_perf_group_sched_in(struct perf_event *leader,
        int assign[X86_PMC_IDX_MAX];
        int n0, n1, ret;
 
+       if (!x86_pmu_initialized())
+               return 0;
+
        /* n0 = total number of events */
        n0 = collect_events(cpuc, leader, true);
        if (n0 < 0)
                return n0;
 
-       ret = x86_schedule_events(cpuc, n0, assign);
+       ret = x86_pmu.schedule_events(cpuc, n0, assign);
        if (ret)
                return ret;
 
@@ -1327,6 +1322,9 @@ undo:
 
 #include "perf_event_amd.c"
 #include "perf_event_p6.c"
+#include "perf_event_p4.c"
+#include "perf_event_intel_lbr.c"
+#include "perf_event_intel_ds.c"
 #include "perf_event_intel.c"
 
 static int __cpuinit
@@ -1398,6 +1396,9 @@ void __init init_hw_perf_events(void)
 
        pr_cont("%s PMU driver.\n", x86_pmu.name);
 
+       if (x86_pmu.quirks)
+               x86_pmu.quirks();
+
        if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
                WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
                     x86_pmu.num_events, X86_PMC_MAX_GENERIC);
@@ -1458,6 +1459,32 @@ static const struct pmu pmu = {
        .unthrottle     = x86_pmu_unthrottle,
 };
 
+/*
+ * validate that we can schedule this event
+ */
+static int validate_event(struct perf_event *event)
+{
+       struct cpu_hw_events *fake_cpuc;
+       struct event_constraint *c;
+       int ret = 0;
+
+       fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
+       if (!fake_cpuc)
+               return -ENOMEM;
+
+       c = x86_pmu.get_event_constraints(fake_cpuc, event);
+
+       if (!c || !c->weight)
+               ret = -ENOSPC;
+
+       if (x86_pmu.put_event_constraints)
+               x86_pmu.put_event_constraints(fake_cpuc, event);
+
+       kfree(fake_cpuc);
+
+       return ret;
+}
+
 /*
  * validate a single event group
  *
@@ -1498,7 +1525,7 @@ static int validate_group(struct perf_event *event)
 
        fake_cpuc->n_events = n;
 
-       ret = x86_schedule_events(fake_cpuc, n, NULL);
+       ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
 
 out_free:
        kfree(fake_cpuc);
@@ -1523,6 +1550,8 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
 
                if (event->group_leader != event)
                        err = validate_group(event);
+               else
+                       err = validate_event(event);
 
                event->pmu = tmp;
        }
@@ -1593,41 +1622,6 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
        dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
 }
 
-/*
- * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
- */
-static unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
-{
-       unsigned long offset, addr = (unsigned long)from;
-       int type = in_nmi() ? KM_NMI : KM_IRQ0;
-       unsigned long size, len = 0;
-       struct page *page;
-       void *map;
-       int ret;
-
-       do {
-               ret = __get_user_pages_fast(addr, 1, 0, &page);
-               if (!ret)
-                       break;
-
-               offset = addr & (PAGE_SIZE - 1);
-               size = min(PAGE_SIZE - offset, n - len);
-
-               map = kmap_atomic(page, type);
-               memcpy(to, map+offset, size);
-               kunmap_atomic(map, type);
-               put_page(page);
-
-               len  += size;
-               to   += size;
-               addr += size;
-
-       } while (len < n);
-
-       return len;
-}
-
 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
 {
        unsigned long bytes;
index 573458f1caf23c91ab39930fc0613cf78d27d55f..358a8e3d05f8aca50f6f7195e9be841ef6b3adba 100644 (file)
@@ -363,6 +363,8 @@ static __initconst struct x86_pmu amd_pmu = {
        .enable_all             = x86_pmu_enable_all,
        .enable                 = x86_pmu_enable_event,
        .disable                = x86_pmu_disable_event,
+       .hw_config              = x86_hw_config,
+       .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_K7_EVNTSEL0,
        .perfctr                = MSR_K7_PERFCTR0,
        .event_map              = amd_pmu_event_map,
index 84bfde64a337909cfbb5549202804b7fd3c11085..044b8436b19d53263e9b3c5e804ee4079028312e 100644 (file)
@@ -470,42 +470,6 @@ static u64 intel_pmu_raw_event(u64 hw_event)
        return hw_event & CORE_EVNTSEL_MASK;
 }
 
-static void intel_pmu_enable_bts(u64 config)
-{
-       unsigned long debugctlmsr;
-
-       debugctlmsr = get_debugctlmsr();
-
-       debugctlmsr |= X86_DEBUGCTL_TR;
-       debugctlmsr |= X86_DEBUGCTL_BTS;
-       debugctlmsr |= X86_DEBUGCTL_BTINT;
-
-       if (!(config & ARCH_PERFMON_EVENTSEL_OS))
-               debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
-
-       if (!(config & ARCH_PERFMON_EVENTSEL_USR))
-               debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
-
-       update_debugctlmsr(debugctlmsr);
-}
-
-static void intel_pmu_disable_bts(void)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       unsigned long debugctlmsr;
-
-       if (!cpuc->ds)
-               return;
-
-       debugctlmsr = get_debugctlmsr();
-
-       debugctlmsr &=
-               ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
-                 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
-
-       update_debugctlmsr(debugctlmsr);
-}
-
 static void intel_pmu_disable_all(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -514,12 +478,17 @@ static void intel_pmu_disable_all(void)
 
        if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
                intel_pmu_disable_bts();
+
+       intel_pmu_pebs_disable_all();
+       intel_pmu_lbr_disable_all();
 }
 
 static void intel_pmu_enable_all(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
+       intel_pmu_pebs_enable_all();
+       intel_pmu_lbr_enable_all();
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
 
        if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
@@ -547,8 +516,7 @@ static inline void intel_pmu_ack_status(u64 ack)
        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
 }
 
-static inline void
-intel_pmu_disable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
 {
        int idx = hwc->idx - X86_PMC_IDX_FIXED;
        u64 ctrl_val, mask;
@@ -557,71 +525,10 @@ intel_pmu_disable_fixed(struct hw_perf_event *hwc)
 
        rdmsrl(hwc->config_base, ctrl_val);
        ctrl_val &= ~mask;
-       (void)checking_wrmsrl(hwc->config_base, ctrl_val);
+       wrmsrl(hwc->config_base, ctrl_val);
 }
 
-static void intel_pmu_drain_bts_buffer(void)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct debug_store *ds = cpuc->ds;
-       struct bts_record {
-               u64     from;
-               u64     to;
-               u64     flags;
-       };
-       struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
-       struct bts_record *at, *top;
-       struct perf_output_handle handle;
-       struct perf_event_header header;
-       struct perf_sample_data data;
-       struct pt_regs regs;
-
-       if (!event)
-               return;
-
-       if (!ds)
-               return;
-
-       at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
-       top = (struct bts_record *)(unsigned long)ds->bts_index;
-
-       if (top <= at)
-               return;
-
-       ds->bts_index = ds->bts_buffer_base;
-
-       perf_sample_data_init(&data, 0);
-
-       data.period     = event->hw.last_period;
-       regs.ip         = 0;
-
-       /*
-        * Prepare a generic sample, i.e. fill in the invariant fields.
-        * We will overwrite the from and to address before we output
-        * the sample.
-        */
-       perf_prepare_sample(&header, &data, event, &regs);
-
-       if (perf_output_begin(&handle, event,
-                             header.size * (top - at), 1, 1))
-               return;
-
-       for (; at < top; at++) {
-               data.ip         = at->from;
-               data.addr       = at->to;
-
-               perf_output_sample(&handle, &header, &data, event);
-       }
-
-       perf_output_end(&handle);
-
-       /* There's new data available. */
-       event->hw.interrupts++;
-       event->pending_kill = POLL_IN;
-}
-
-static inline void
-intel_pmu_disable_event(struct perf_event *event)
+static void intel_pmu_disable_event(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
 
@@ -637,14 +544,15 @@ intel_pmu_disable_event(struct perf_event *event)
        }
 
        x86_pmu_disable_event(event);
+
+       if (unlikely(event->attr.precise))
+               intel_pmu_pebs_disable(event);
 }
 
-static inline void
-intel_pmu_enable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
 {
        int idx = hwc->idx - X86_PMC_IDX_FIXED;
        u64 ctrl_val, bits, mask;
-       int err;
 
        /*
         * Enable IRQ generation (0x8),
@@ -669,7 +577,7 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc)
        rdmsrl(hwc->config_base, ctrl_val);
        ctrl_val &= ~mask;
        ctrl_val |= bits;
-       err = checking_wrmsrl(hwc->config_base, ctrl_val);
+       wrmsrl(hwc->config_base, ctrl_val);
 }
 
 static void intel_pmu_enable_event(struct perf_event *event)
@@ -689,6 +597,9 @@ static void intel_pmu_enable_event(struct perf_event *event)
                return;
        }
 
+       if (unlikely(event->attr.precise))
+               intel_pmu_pebs_enable(event);
+
        __x86_pmu_enable_event(hwc);
 }
 
@@ -762,6 +673,15 @@ again:
 
        inc_irq_stat(apic_perf_irqs);
        ack = status;
+
+       intel_pmu_lbr_read();
+
+       /*
+        * PEBS overflow sets bit 62 in the global status register
+        */
+       if (__test_and_clear_bit(62, (unsigned long *)&status))
+               x86_pmu.drain_pebs(regs);
+
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
@@ -791,22 +711,18 @@ done:
        return 1;
 }
 
-static struct event_constraint bts_constraint =
-       EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
-
 static struct event_constraint *
-intel_special_constraints(struct perf_event *event)
+intel_bts_constraints(struct perf_event *event)
 {
-       unsigned int hw_event;
-
-       hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int hw_event, bts_event;
 
-       if (unlikely((hw_event ==
-                     x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
-                    (event->hw.sample_period == 1))) {
+       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
 
+       if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
                return &bts_constraint;
-       }
+
        return NULL;
 }
 
@@ -815,7 +731,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
 {
        struct event_constraint *c;
 
-       c = intel_special_constraints(event);
+       c = intel_bts_constraints(event);
+       if (c)
+               return c;
+
+       c = intel_pebs_constraints(event);
        if (c)
                return c;
 
@@ -829,6 +749,8 @@ static __initconst struct x86_pmu core_pmu = {
        .enable_all             = x86_pmu_enable_all,
        .enable                 = x86_pmu_enable_event,
        .disable                = x86_pmu_disable_event,
+       .hw_config              = x86_hw_config,
+       .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
        .event_map              = intel_pmu_event_map,
@@ -845,6 +767,20 @@ static __initconst struct x86_pmu core_pmu = {
        .event_constraints      = intel_core_event_constraints,
 };
 
+static void intel_pmu_cpu_starting(int cpu)
+{
+       init_debug_store_on_cpu(cpu);
+       /*
+        * Deal with CPUs that don't clear their LBRs on power-up.
+        */
+       intel_pmu_lbr_reset();
+}
+
+static void intel_pmu_cpu_dying(int cpu)
+{
+       fini_debug_store_on_cpu(cpu);
+}
+
 static __initconst struct x86_pmu intel_pmu = {
        .name                   = "Intel",
        .handle_irq             = intel_pmu_handle_irq,
@@ -852,6 +788,8 @@ static __initconst struct x86_pmu intel_pmu = {
        .enable_all             = intel_pmu_enable_all,
        .enable                 = intel_pmu_enable_event,
        .disable                = intel_pmu_disable_event,
+       .hw_config              = x86_hw_config,
+       .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
        .event_map              = intel_pmu_event_map,
@@ -864,14 +802,38 @@ static __initconst struct x86_pmu intel_pmu = {
         * the generic event period:
         */
        .max_period             = (1ULL << 31) - 1,
-       .enable_bts             = intel_pmu_enable_bts,
-       .disable_bts            = intel_pmu_disable_bts,
        .get_event_constraints  = intel_get_event_constraints,
 
-       .cpu_starting           = init_debug_store_on_cpu,
-       .cpu_dying              = fini_debug_store_on_cpu,
+       .cpu_starting           = intel_pmu_cpu_starting,
+       .cpu_dying              = intel_pmu_cpu_dying,
 };
 
+static void intel_clovertown_quirks(void)
+{
+       /*
+        * PEBS is unreliable due to:
+        *
+        *   AJ67  - PEBS may experience CPL leaks
+        *   AJ68  - PEBS PMI may be delayed by one event
+        *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
+        *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
+        *
+        * AJ67 could be worked around by restricting the OS/USR flags.
+        * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
+        *
+        * AJ106 could possibly be worked around by not allowing LBR
+        *       usage from PEBS, including the fixup.
+        * AJ68  could possibly be worked around by always programming
+        *       a pebs_event_reset[0] value and coping with the lost events.
+        *
+        * But taken together it might just make sense to not enable PEBS on
+        * these chips.
+        */
+       printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+       x86_pmu.pebs = 0;
+       x86_pmu.pebs_constraints = NULL;
+}
+
 static __init int intel_pmu_init(void)
 {
        union cpuid10_edx edx;
@@ -881,12 +843,13 @@ static __init int intel_pmu_init(void)
        int version;
 
        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
-               /* check for P6 processor family */
-          if (boot_cpu_data.x86 == 6) {
-               return p6_pmu_init();
-          } else {
+               switch (boot_cpu_data.x86) {
+               case 0x6:
+                       return p6_pmu_init();
+               case 0xf:
+                       return p4_pmu_init();
+               }
                return -ENODEV;
-          }
        }
 
        /*
@@ -915,6 +878,18 @@ static __init int intel_pmu_init(void)
        if (version > 1)
                x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
 
+       /*
+        * v2 and above have a perf capabilities MSR
+        */
+       if (version > 1) {
+               u64 capabilities;
+
+               rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+               x86_pmu.intel_cap.capabilities = capabilities;
+       }
+
+       intel_ds_init();
+
        /*
         * Install the hw-cache-events table:
         */
@@ -924,12 +899,15 @@ static __init int intel_pmu_init(void)
                break;
 
        case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+               x86_pmu.quirks = intel_clovertown_quirks;
        case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
        case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
        case 29: /* six-core 45 nm xeon "Dunnington" */
                memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               intel_pmu_lbr_init_core();
+
                x86_pmu.event_constraints = intel_core2_event_constraints;
                pr_cont("Core2 events, ");
                break;
@@ -939,13 +917,18 @@ static __init int intel_pmu_init(void)
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               intel_pmu_lbr_init_nhm();
+
                x86_pmu.event_constraints = intel_nehalem_event_constraints;
                pr_cont("Nehalem/Corei7 events, ");
                break;
+
        case 28: /* Atom */
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               intel_pmu_lbr_init_atom();
+
                x86_pmu.event_constraints = intel_gen_event_constraints;
                pr_cont("Atom events, ");
                break;
@@ -955,6 +938,8 @@ static __init int intel_pmu_init(void)
                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
+               intel_pmu_lbr_init_nhm();
+
                x86_pmu.event_constraints = intel_westmere_event_constraints;
                pr_cont("Westmere events, ");
                break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
new file mode 100644 (file)
index 0000000..c59678a
--- /dev/null
@@ -0,0 +1,673 @@
+#ifdef CONFIG_CPU_SUP_INTEL
+
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS                4
+
+/* The size of a BTS record in bytes: */
+#define BTS_RECORD_SIZE                24
+
+#define BTS_BUFFER_SIZE                (PAGE_SIZE << 4)
+#define PEBS_BUFFER_SIZE       PAGE_SIZE
+
+/*
+ * pebs_record_32 for p4 and core not supported
+
+struct pebs_record_32 {
+       u32 flags, ip;
+       u32 ax, bc, cx, dx;
+       u32 si, di, bp, sp;
+};
+
+ */
+
+struct pebs_record_core {
+       u64 flags, ip;
+       u64 ax, bx, cx, dx;
+       u64 si, di, bp, sp;
+       u64 r8,  r9,  r10, r11;
+       u64 r12, r13, r14, r15;
+};
+
+struct pebs_record_nhm {
+       u64 flags, ip;
+       u64 ax, bx, cx, dx;
+       u64 si, di, bp, sp;
+       u64 r8,  r9,  r10, r11;
+       u64 r12, r13, r14, r15;
+       u64 status, dla, dse, lat;
+};
+
+/*
+ * Bits in the debugctlmsr controlling branch tracing.
+ */
+#define X86_DEBUGCTL_TR                        (1 << 6)
+#define X86_DEBUGCTL_BTS               (1 << 7)
+#define X86_DEBUGCTL_BTINT             (1 << 8)
+#define X86_DEBUGCTL_BTS_OFF_OS                (1 << 9)
+#define X86_DEBUGCTL_BTS_OFF_USR       (1 << 10)
+
+/*
+ * A debug store configuration.
+ *
+ * We only support architectures that use 64bit fields.
+ */
+struct debug_store {
+       u64     bts_buffer_base;
+       u64     bts_index;
+       u64     bts_absolute_maximum;
+       u64     bts_interrupt_threshold;
+       u64     pebs_buffer_base;
+       u64     pebs_index;
+       u64     pebs_absolute_maximum;
+       u64     pebs_interrupt_threshold;
+       u64     pebs_event_reset[MAX_PEBS_EVENTS];
+};
+
+static void init_debug_store_on_cpu(int cpu)
+{
+       struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+       if (!ds)
+               return;
+
+       wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
+                    (u32)((u64)(unsigned long)ds),
+                    (u32)((u64)(unsigned long)ds >> 32));
+}
+
+static void fini_debug_store_on_cpu(int cpu)
+{
+       if (!per_cpu(cpu_hw_events, cpu).ds)
+               return;
+
+       wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
+}
+
+static void release_ds_buffers(void)
+{
+       int cpu;
+
+       if (!x86_pmu.bts && !x86_pmu.pebs)
+               return;
+
+       get_online_cpus();
+
+       for_each_online_cpu(cpu)
+               fini_debug_store_on_cpu(cpu);
+
+       for_each_possible_cpu(cpu) {
+               struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+               if (!ds)
+                       continue;
+
+               per_cpu(cpu_hw_events, cpu).ds = NULL;
+
+               kfree((void *)(unsigned long)ds->pebs_buffer_base);
+               kfree((void *)(unsigned long)ds->bts_buffer_base);
+               kfree(ds);
+       }
+
+       put_online_cpus();
+}
+
+static int reserve_ds_buffers(void)
+{
+       int cpu, err = 0;
+
+       if (!x86_pmu.bts && !x86_pmu.pebs)
+               return 0;
+
+       get_online_cpus();
+
+       for_each_possible_cpu(cpu) {
+               struct debug_store *ds;
+               void *buffer;
+               int max, thresh;
+
+               err = -ENOMEM;
+               ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+               if (unlikely(!ds))
+                       break;
+               per_cpu(cpu_hw_events, cpu).ds = ds;
+
+               if (x86_pmu.bts) {
+                       buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
+                       if (unlikely(!buffer))
+                               break;
+
+                       max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+                       thresh = max / 16;
+
+                       ds->bts_buffer_base = (u64)(unsigned long)buffer;
+                       ds->bts_index = ds->bts_buffer_base;
+                       ds->bts_absolute_maximum = ds->bts_buffer_base +
+                               max * BTS_RECORD_SIZE;
+                       ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+                               thresh * BTS_RECORD_SIZE;
+               }
+
+               if (x86_pmu.pebs) {
+                       buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
+                       if (unlikely(!buffer))
+                               break;
+
+                       max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+
+                       ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+                       ds->pebs_index = ds->pebs_buffer_base;
+                       ds->pebs_absolute_maximum = ds->pebs_buffer_base +
+                               max * x86_pmu.pebs_record_size;
+                       /*
+                        * Always use single record PEBS
+                        */
+                       ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
+                               x86_pmu.pebs_record_size;
+               }
+
+               err = 0;
+       }
+
+       if (err)
+               release_ds_buffers();
+       else {
+               for_each_online_cpu(cpu)
+                       init_debug_store_on_cpu(cpu);
+       }
+
+       put_online_cpus();
+
+       return err;
+}
+
+/*
+ * BTS
+ */
+
+static struct event_constraint bts_constraint =
+       EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
+
+static void intel_pmu_enable_bts(u64 config)
+{
+       unsigned long debugctlmsr;
+
+       debugctlmsr = get_debugctlmsr();
+
+       debugctlmsr |= X86_DEBUGCTL_TR;
+       debugctlmsr |= X86_DEBUGCTL_BTS;
+       debugctlmsr |= X86_DEBUGCTL_BTINT;
+
+       if (!(config & ARCH_PERFMON_EVENTSEL_OS))
+               debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
+
+       if (!(config & ARCH_PERFMON_EVENTSEL_USR))
+               debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
+
+       update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_disable_bts(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       unsigned long debugctlmsr;
+
+       if (!cpuc->ds)
+               return;
+
+       debugctlmsr = get_debugctlmsr();
+
+       debugctlmsr &=
+               ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
+                 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
+
+       update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_drain_bts_buffer(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct debug_store *ds = cpuc->ds;
+       struct bts_record {
+               u64     from;
+               u64     to;
+               u64     flags;
+       };
+       struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
+       struct bts_record *at, *top;
+       struct perf_output_handle handle;
+       struct perf_event_header header;
+       struct perf_sample_data data;
+       struct pt_regs regs;
+
+       if (!event)
+               return;
+
+       if (!ds)
+               return;
+
+       at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
+       top = (struct bts_record *)(unsigned long)ds->bts_index;
+
+       if (top <= at)
+               return;
+
+       ds->bts_index = ds->bts_buffer_base;
+
+       perf_sample_data_init(&data, 0);
+       data.period = event->hw.last_period;
+       regs.ip     = 0;
+
+       /*
+        * Prepare a generic sample, i.e. fill in the invariant fields.
+        * We will overwrite the from and to address before we output
+        * the sample.
+        */
+       perf_prepare_sample(&header, &data, event, &regs);
+
+       if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
+               return;
+
+       for (; at < top; at++) {
+               data.ip         = at->from;
+               data.addr       = at->to;
+
+               perf_output_sample(&handle, &header, &data, event);
+       }
+
+       perf_output_end(&handle);
+
+       /* There's new data available. */
+       event->hw.interrupts++;
+       event->pending_kill = POLL_IN;
+}
+
+/*
+ * PEBS
+ */
+
+static struct event_constraint intel_core_pebs_events[] = {
+       PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
+       PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
+       PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
+       PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+       PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
+       PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
+       PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
+       PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
+       PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_nehalem_pebs_events[] = {
+       PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
+       PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
+       PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
+       PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
+       PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
+       PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
+       PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
+       PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
+       PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
+       EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint *
+intel_pebs_constraints(struct perf_event *event)
+{
+       struct event_constraint *c;
+
+       if (!event->attr.precise)
+               return NULL;
+
+       if (x86_pmu.pebs_constraints) {
+               for_each_event_constraint(c, x86_pmu.pebs_constraints) {
+                       if ((event->hw.config & c->cmask) == c->code)
+                               return c;
+               }
+       }
+
+       return &emptyconstraint;
+}
+
+static void intel_pmu_pebs_enable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+
+       hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
+
+       cpuc->pebs_enabled |= 1ULL << hwc->idx;
+       WARN_ON_ONCE(cpuc->enabled);
+
+       if (x86_pmu.intel_cap.pebs_trap)
+               intel_pmu_lbr_enable(event);
+}
+
+static void intel_pmu_pebs_disable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+
+       cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
+       if (cpuc->enabled)
+               wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+
+       hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
+
+       if (x86_pmu.intel_cap.pebs_trap)
+               intel_pmu_lbr_disable(event);
+}
+
+static void intel_pmu_pebs_enable_all(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (cpuc->pebs_enabled)
+               wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+}
+
+static void intel_pmu_pebs_disable_all(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (cpuc->pebs_enabled)
+               wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+}
+
+#include <asm/insn.h>
+
+static inline bool kernel_ip(unsigned long ip)
+{
+#ifdef CONFIG_X86_32
+       return ip > PAGE_OFFSET;
+#else
+       return (long)ip < 0;
+#endif
+}
+
+static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       unsigned long from = cpuc->lbr_entries[0].from;
+       unsigned long old_to, to = cpuc->lbr_entries[0].to;
+       unsigned long ip = regs->ip;
+
+       /*
+        * We don't need to fixup if the PEBS assist is fault like
+        */
+       if (!x86_pmu.intel_cap.pebs_trap)
+               return 1;
+
+       /*
+        * No LBR entry, no basic block, no rewinding
+        */
+       if (!cpuc->lbr_stack.nr || !from || !to)
+               return 0;
+
+       /*
+        * Basic blocks should never cross user/kernel boundaries
+        */
+       if (kernel_ip(ip) != kernel_ip(to))
+               return 0;
+
+       /*
+        * unsigned math, either ip is before the start (impossible) or
+        * the basic block is larger than 1 page (sanity)
+        */
+       if ((ip - to) > PAGE_SIZE)
+               return 0;
+
+       /*
+        * We sampled a branch insn, rewind using the LBR stack
+        */
+       if (ip == to) {
+               regs->ip = from;
+               return 1;
+       }
+
+       do {
+               struct insn insn;
+               u8 buf[MAX_INSN_SIZE];
+               void *kaddr;
+
+               old_to = to;
+               if (!kernel_ip(ip)) {
+                       int bytes, size = MAX_INSN_SIZE;
+
+                       bytes = copy_from_user_nmi(buf, (void __user *)to, size);
+                       if (bytes != size)
+                               return 0;
+
+                       kaddr = buf;
+               } else
+                       kaddr = (void *)to;
+
+               kernel_insn_init(&insn, kaddr);
+               insn_get_length(&insn);
+               to += insn.length;
+       } while (to < ip);
+
+       if (to == ip) {
+               regs->ip = old_to;
+               return 1;
+       }
+
+       /*
+        * Even though we decoded the basic block, the instruction stream
+        * never matched the given IP, either the TO or the IP got corrupted.
+        */
+       return 0;
+}
+
+static int intel_pmu_save_and_restart(struct perf_event *event);
+
+static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct debug_store *ds = cpuc->ds;
+       struct perf_event *event = cpuc->events[0]; /* PMC0 only */
+       struct pebs_record_core *at, *top;
+       struct perf_sample_data data;
+       struct perf_raw_record raw;
+       struct pt_regs regs;
+       int n;
+
+       if (!ds || !x86_pmu.pebs)
+               return;
+
+       at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
+       top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
+
+       /*
+        * Whatever else happens, drain the thing
+        */
+       ds->pebs_index = ds->pebs_buffer_base;
+
+       if (!test_bit(0, cpuc->active_mask))
+               return;
+
+       WARN_ON_ONCE(!event);
+
+       if (!event->attr.precise)
+               return;
+
+       n = top - at;
+       if (n <= 0)
+               return;
+
+       if (!intel_pmu_save_and_restart(event))
+               return;
+
+       /*
+        * Should not happen, we program the threshold at 1 and do not
+        * set a reset value.
+        */
+       WARN_ON_ONCE(n > 1);
+       at += n - 1;
+
+       perf_sample_data_init(&data, 0);
+       data.period = event->hw.last_period;
+
+       if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+               raw.size = x86_pmu.pebs_record_size;
+               raw.data = at;
+               data.raw = &raw;
+       }
+
+       /*
+        * We use the interrupt regs as a base because the PEBS record
+        * does not contain a full regs set, specifically it seems to
+        * lack segment descriptors, which get used by things like
+        * user_mode().
+        *
+        * In the simple case fix up only the IP and BP,SP regs, for
+        * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
+        * A possible PERF_SAMPLE_REGS will have to transfer all regs.
+        */
+       regs = *iregs;
+       regs.ip = at->ip;
+       regs.bp = at->bp;
+       regs.sp = at->sp;
+
+       if (intel_pmu_pebs_fixup_ip(&regs))
+               regs.flags |= PERF_EFLAGS_EXACT;
+       else
+               regs.flags &= ~PERF_EFLAGS_EXACT;
+
+       if (perf_event_overflow(event, 1, &data, &regs))
+               x86_pmu_stop(event);
+}
+
+static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct debug_store *ds = cpuc->ds;
+       struct pebs_record_nhm *at, *top;
+       struct perf_sample_data data;
+       struct perf_event *event = NULL;
+       struct perf_raw_record raw;
+       struct pt_regs regs;
+       u64 status = 0;
+       int bit, n;
+
+       if (!ds || !x86_pmu.pebs)
+               return;
+
+       at  = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
+       top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
+
+       ds->pebs_index = ds->pebs_buffer_base;
+
+       n = top - at;
+       if (n <= 0)
+               return;
+
+       /*
+        * Should not happen, we program the threshold at 1 and do not
+        * set a reset value.
+        */
+       WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
+
+       for ( ; at < top; at++) {
+               for_each_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
+                       event = cpuc->events[bit];
+                       if (!test_bit(bit, cpuc->active_mask))
+                               continue;
+
+                       WARN_ON_ONCE(!event);
+
+                       if (!event->attr.precise)
+                               continue;
+
+                       if (__test_and_set_bit(bit, (unsigned long *)&status))
+                               continue;
+
+                       break;
+               }
+
+               if (!event || bit >= MAX_PEBS_EVENTS)
+                       continue;
+
+               if (!intel_pmu_save_and_restart(event))
+                       continue;
+
+               perf_sample_data_init(&data, 0);
+               data.period = event->hw.last_period;
+
+               if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+                       raw.size = x86_pmu.pebs_record_size;
+                       raw.data = at;
+                       data.raw = &raw;
+               }
+
+               /*
+                * See the comment in intel_pmu_drain_pebs_core()
+                */
+               regs = *iregs;
+               regs.ip = at->ip;
+               regs.bp = at->bp;
+               regs.sp = at->sp;
+
+               if (intel_pmu_pebs_fixup_ip(&regs))
+                       regs.flags |= PERF_EFLAGS_EXACT;
+               else
+                       regs.flags &= ~PERF_EFLAGS_EXACT;
+
+               if (perf_event_overflow(event, 1, &data, &regs))
+                       x86_pmu_stop(event);
+       }
+}
+
+/*
+ * BTS, PEBS probe and setup
+ */
+
+static void intel_ds_init(void)
+{
+       /*
+        * No support for 32bit formats
+        */
+       if (!boot_cpu_has(X86_FEATURE_DTES64))
+               return;
+
+       x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
+       x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
+       if (x86_pmu.pebs) {
+               char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
+               int format = x86_pmu.intel_cap.pebs_format;
+
+               switch (format) {
+               case 0:
+                       printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
+                       x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
+                       x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
+                       x86_pmu.pebs_constraints = intel_core_pebs_events;
+                       break;
+
+               case 1:
+                       printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
+                       x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
+                       x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
+                       x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
+                       break;
+
+               default:
+                       printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
+                       x86_pmu.pebs = 0;
+                       break;
+               }
+       }
+}
+
+#else /* CONFIG_CPU_SUP_INTEL */
+
+static int reserve_ds_buffers(void)
+{
+       return 0;
+}
+
+static void release_ds_buffers(void)
+{
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
new file mode 100644 (file)
index 0000000..df4c98e
--- /dev/null
@@ -0,0 +1,221 @@
+#ifdef CONFIG_CPU_SUP_INTEL
+
+enum {
+       LBR_FORMAT_32           = 0x00,
+       LBR_FORMAT_LIP          = 0x01,
+       LBR_FORMAT_EIP          = 0x02,
+       LBR_FORMAT_EIP_FLAGS    = 0x03,
+};
+
+/*
+ * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
+ * otherwise it becomes near impossible to get a reliable stack.
+ */
+
+#define X86_DEBUGCTL_LBR                               (1 << 0)
+#define X86_DEBUGCTL_FREEZE_LBRS_ON_PMI                (1 << 11)
+
+static void __intel_pmu_lbr_enable(void)
+{
+       u64 debugctl;
+
+       rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+       debugctl |= (X86_DEBUGCTL_LBR | X86_DEBUGCTL_FREEZE_LBRS_ON_PMI);
+       wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+}
+
+static void __intel_pmu_lbr_disable(void)
+{
+       u64 debugctl;
+
+       rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+       debugctl &= ~(X86_DEBUGCTL_LBR | X86_DEBUGCTL_FREEZE_LBRS_ON_PMI);
+       wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+}
+
+static void intel_pmu_lbr_reset_32(void)
+{
+       int i;
+
+       for (i = 0; i < x86_pmu.lbr_nr; i++)
+               wrmsrl(x86_pmu.lbr_from + i, 0);
+}
+
+static void intel_pmu_lbr_reset_64(void)
+{
+       int i;
+
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               wrmsrl(x86_pmu.lbr_from + i, 0);
+               wrmsrl(x86_pmu.lbr_to   + i, 0);
+       }
+}
+
+static void intel_pmu_lbr_reset(void)
+{
+       if (!x86_pmu.lbr_nr)
+               return;
+
+       if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
+               intel_pmu_lbr_reset_32();
+       else
+               intel_pmu_lbr_reset_64();
+}
+
+static void intel_pmu_lbr_enable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (!x86_pmu.lbr_nr)
+               return;
+
+       WARN_ON_ONCE(cpuc->enabled);
+
+       /*
+        * Reset the LBR stack if we changed task context to
+        * avoid data leaks.
+        */
+
+       if (event->ctx->task && cpuc->lbr_context != event->ctx) {
+               intel_pmu_lbr_reset();
+               cpuc->lbr_context = event->ctx;
+       }
+
+       cpuc->lbr_users++;
+}
+
+static void intel_pmu_lbr_disable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (!x86_pmu.lbr_nr)
+               return;
+
+       cpuc->lbr_users--;
+       WARN_ON_ONCE(cpuc->lbr_users < 0);
+
+       if (cpuc->enabled && !cpuc->lbr_users)
+               __intel_pmu_lbr_disable();
+}
+
+static void intel_pmu_lbr_enable_all(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (cpuc->lbr_users)
+               __intel_pmu_lbr_enable();
+}
+
+static void intel_pmu_lbr_disable_all(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (cpuc->lbr_users)
+               __intel_pmu_lbr_disable();
+}
+
+static inline u64 intel_pmu_lbr_tos(void)
+{
+       u64 tos;
+
+       rdmsrl(x86_pmu.lbr_tos, tos);
+
+       return tos;
+}
+
+static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
+{
+       unsigned long mask = x86_pmu.lbr_nr - 1;
+       u64 tos = intel_pmu_lbr_tos();
+       int i;
+
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               unsigned long lbr_idx = (tos - i) & mask;
+               union {
+                       struct {
+                               u32 from;
+                               u32 to;
+                       };
+                       u64     lbr;
+               } msr_lastbranch;
+
+               rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
+
+               cpuc->lbr_entries[i].from  = msr_lastbranch.from;
+               cpuc->lbr_entries[i].to    = msr_lastbranch.to;
+               cpuc->lbr_entries[i].flags = 0;
+       }
+       cpuc->lbr_stack.nr = i;
+}
+
+#define LBR_FROM_FLAG_MISPRED  (1ULL << 63)
+
+/*
+ * Due to lack of segmentation in Linux the effective address (offset)
+ * is the same as the linear address, allowing us to merge the LIP and EIP
+ * LBR formats.
+ */
+static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
+{
+       unsigned long mask = x86_pmu.lbr_nr - 1;
+       int lbr_format = x86_pmu.intel_cap.lbr_format;
+       u64 tos = intel_pmu_lbr_tos();
+       int i;
+
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               unsigned long lbr_idx = (tos - i) & mask;
+               u64 from, to, flags = 0;
+
+               rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
+               rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
+
+               if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
+                       flags = !!(from & LBR_FROM_FLAG_MISPRED);
+                       from = (u64)((((s64)from) << 1) >> 1);
+               }
+
+               cpuc->lbr_entries[i].from  = from;
+               cpuc->lbr_entries[i].to    = to;
+               cpuc->lbr_entries[i].flags = flags;
+       }
+       cpuc->lbr_stack.nr = i;
+}
+
+static void intel_pmu_lbr_read(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (!cpuc->lbr_users)
+               return;
+
+       if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
+               intel_pmu_lbr_read_32(cpuc);
+       else
+               intel_pmu_lbr_read_64(cpuc);
+}
+
+static void intel_pmu_lbr_init_core(void)
+{
+       x86_pmu.lbr_nr     = 4;
+       x86_pmu.lbr_tos    = 0x01c9;
+       x86_pmu.lbr_from   = 0x40;
+       x86_pmu.lbr_to     = 0x60;
+}
+
+static void intel_pmu_lbr_init_nhm(void)
+{
+       x86_pmu.lbr_nr     = 16;
+       x86_pmu.lbr_tos    = 0x01c9;
+       x86_pmu.lbr_from   = 0x680;
+       x86_pmu.lbr_to     = 0x6c0;
+}
+
+static void intel_pmu_lbr_init_atom(void)
+{
+       x86_pmu.lbr_nr     = 8;
+       x86_pmu.lbr_tos    = 0x01c9;
+       x86_pmu.lbr_from   = 0x40;
+       x86_pmu.lbr_to     = 0x60;
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
new file mode 100644 (file)
index 0000000..a11ce73
--- /dev/null
@@ -0,0 +1,607 @@
+/*
+ * Netburst Perfomance Events (P4, old Xeon)
+ *
+ *  Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
+ *  Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
+ *
+ *  For licencing details see kernel-base/COPYING
+ */
+
+#ifdef CONFIG_CPU_SUP_INTEL
+
+#include <asm/perf_event_p4.h>
+
+/*
+ * array indices: 0,1 - HT threads, used with HT enabled cpu
+ */
+struct p4_event_template {
+       u32 opcode;                     /* ESCR event + CCCR selector */
+       u64 config;                     /* packed predefined bits */
+       int dep;                        /* upstream dependency event index */
+       unsigned int emask;             /* ESCR EventMask */
+       unsigned int escr_msr[2];       /* ESCR MSR for this event */
+       unsigned int cntr[2];           /* counter index (offset) */
+};
+
+struct p4_pmu_res {
+       /* maps hw_conf::idx into template for ESCR sake */
+       struct p4_event_template *tpl[ARCH_P4_MAX_CCCR];
+};
+
+static DEFINE_PER_CPU(struct p4_pmu_res, p4_pmu_config);
+
+/*
+ * WARN: CCCR1 doesn't have a working enable bit so try to not
+ * use it if possible
+ *
+ * Also as only we start to support raw events we will need to
+ * append _all_ P4_EVENT_PACK'ed events here
+ */
+struct p4_event_template p4_templates[] = {
+       [0] = {
+               .opcode = P4_UOP_TYPE,
+               .config = 0,
+               .dep    = -1,
+               .emask  =
+                       P4_EVENT_ATTR(P4_UOP_TYPE, TAGLOADS)    |
+                       P4_EVENT_ATTR(P4_UOP_TYPE, TAGSTORES),
+               .escr_msr       = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
+               .cntr           = { 16, 17 },
+       },
+       [1] = {
+               .opcode = P4_GLOBAL_POWER_EVENTS,
+               .config = 0,
+               .dep    = -1,
+               .emask  =
+                       P4_EVENT_ATTR(P4_GLOBAL_POWER_EVENTS, RUNNING),
+               .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .cntr           = { 0, 2 },
+       },
+       [2] = {
+               .opcode = P4_INSTR_RETIRED,
+               .config = 0,
+               .dep    = -1, /* needs front-end tagging */
+               .emask  =
+                       P4_EVENT_ATTR(P4_INSTR_RETIRED, NBOGUSNTAG)     |
+                       P4_EVENT_ATTR(P4_INSTR_RETIRED, BOGUSNTAG),
+               .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .cntr           = { 12, 14 },
+       },
+       [3] = {
+               .opcode = P4_BSQ_CACHE_REFERENCE,
+               .config = 0,
+               .dep    = -1,
+               .emask  =
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITS)     |
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITE)     |
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_HITM)     |
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITS)     |
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITE)     |
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_HITM),
+               .escr_msr       = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
+               .cntr           = { 0, 2 },
+       },
+       [4] = {
+               .opcode = P4_BSQ_CACHE_REFERENCE,
+               .config = 0,
+               .dep    = -1,
+               .emask  =
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_2ndL_MISS)     |
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, RD_3rdL_MISS)     |
+                       P4_EVENT_ATTR(P4_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
+               .escr_msr       = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
+               .cntr           = { 0, 3 },
+       },
+       [5] = {
+               .opcode = P4_RETIRED_BRANCH_TYPE,
+               .config = 0,
+               .dep    = -1,
+               .emask  =
+                       P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, CONDITIONAL)      |
+                       P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, CALL)             |
+                       P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, RETURN)           |
+                       P4_EVENT_ATTR(P4_RETIRED_BRANCH_TYPE, INDIRECT),
+               .escr_msr       = { MSR_P4_TBPU_ESCR0, MSR_P4_TBPU_ESCR1 },
+               .cntr           = { 4, 6 },
+       },
+       [6] = {
+               .opcode = P4_MISPRED_BRANCH_RETIRED,
+               .config = 0,
+               .dep    = -1,
+               .emask  =
+                       P4_EVENT_ATTR(P4_MISPRED_BRANCH_RETIRED, NBOGUS),
+               .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .cntr           = { 12, 14 },
+       },
+       [7] = {
+               .opcode = P4_FSB_DATA_ACTIVITY,
+               .config = p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
+               .dep    = -1,
+               .emask  =
+                       P4_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_DRV)   |
+                       P4_EVENT_ATTR(P4_FSB_DATA_ACTIVITY, DRDY_OWN),
+               .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .cntr           = { 0, 2 },
+       },
+};
+
+static struct p4_event_template *p4_event_map[PERF_COUNT_HW_MAX] = {
+       /* non-halted CPU clocks */
+       [PERF_COUNT_HW_CPU_CYCLES]              = &p4_templates[1],
+
+       /* retired instructions: dep on tagging the FSB */
+       [PERF_COUNT_HW_INSTRUCTIONS]            = &p4_templates[2],
+
+       /* cache hits */
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = &p4_templates[3],
+
+       /* cache misses */
+       [PERF_COUNT_HW_CACHE_MISSES]            = &p4_templates[4],
+
+       /* branch instructions retired */
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = &p4_templates[5],
+
+       /* mispredicted branches retired */
+       [PERF_COUNT_HW_BRANCH_MISSES]           = &p4_templates[6],
+
+       /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN):  */
+       [PERF_COUNT_HW_BUS_CYCLES]              = &p4_templates[7],
+};
+
+static u64 p4_pmu_event_map(int hw_event)
+{
+       struct p4_event_template *tpl;
+       u64 config;
+
+       if (hw_event > ARRAY_SIZE(p4_event_map)) {
+               printk_once(KERN_ERR "PMU: Incorrect event index\n");
+               return 0;
+       }
+       tpl = p4_event_map[hw_event];
+
+       /*
+        * fill config up according to
+        * a predefined event template
+        */
+       config  = tpl->config;
+       config |= p4_config_pack_escr(P4_EVENT_UNPACK_EVENT(tpl->opcode) << P4_EVNTSEL_EVENT_SHIFT);
+       config |= p4_config_pack_escr(tpl->emask << P4_EVNTSEL_EVENTMASK_SHIFT);
+       config |= p4_config_pack_cccr(P4_EVENT_UNPACK_SELECTOR(tpl->opcode) << P4_CCCR_ESCR_SELECT_SHIFT);
+
+       /* on HT machine we need a special bit */
+       if (p4_ht_active() && p4_ht_thread(raw_smp_processor_id()))
+               config = p4_set_ht_bit(config);
+
+       return config;
+}
+
+/*
+ * Note that we still have 5 events (from global events SDM list)
+ * intersected in opcode+emask bits so we will need another
+ * scheme there do distinguish templates.
+ */
+static inline int p4_pmu_emask_match(unsigned int dst, unsigned int src)
+{
+       return dst & src;
+}
+
+static struct p4_event_template *p4_pmu_template_lookup(u64 config)
+{
+       u32 opcode = p4_config_unpack_opcode(config);
+       unsigned int emask = p4_config_unpack_emask(config);
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(p4_templates); i++) {
+               if (opcode == p4_templates[i].opcode &&
+                       p4_pmu_emask_match(emask, p4_templates[i].emask))
+                       return &p4_templates[i];
+       }
+
+       return NULL;
+}
+
+/*
+ * We don't control raw events so it's up to the caller
+ * to pass sane values (and we don't count the thread number
+ * on HT machine but allow HT-compatible specifics to be
+ * passed on)
+ */
+static u64 p4_pmu_raw_event(u64 hw_event)
+{
+       return hw_event &
+               (p4_config_pack_escr(P4_EVNTSEL_MASK_HT) |
+                p4_config_pack_cccr(P4_CCCR_MASK_HT));
+}
+
+static int p4_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
+{
+       int cpu = raw_smp_processor_id();
+
+       /*
+        * the reason we use cpu that early is that: if we get scheduled
+        * first time on the same cpu -- we will not need swap thread
+        * specific flags in config (and will save some cpu cycles)
+        */
+
+       /* CCCR by default */
+       hwc->config = p4_config_pack_cccr(p4_default_cccr_conf(cpu));
+
+       /* Count user and OS events unless not requested to */
+       hwc->config |= p4_config_pack_escr(p4_default_escr_conf(cpu, attr->exclude_kernel,
+                                                               attr->exclude_user));
+       return 0;
+}
+
+static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
+{
+       unsigned long dummy;
+
+       rdmsrl(hwc->config_base + hwc->idx, dummy);
+       if (dummy & P4_CCCR_OVF) {
+               (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+                       ((u64)dummy) & ~P4_CCCR_OVF);
+       }
+}
+
+static inline void p4_pmu_disable_event(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       /*
+        * If event gets disabled while counter is in overflowed
+        * state we need to clear P4_CCCR_OVF, otherwise interrupt get
+        * asserted again and again
+        */
+       (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+               (u64)(p4_config_unpack_cccr(hwc->config)) &
+                       ~P4_CCCR_ENABLE & ~P4_CCCR_OVF);
+}
+
+static void p4_pmu_disable_all(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx;
+
+       for (idx = 0; idx < x86_pmu.num_events; idx++) {
+               struct perf_event *event = cpuc->events[idx];
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+               p4_pmu_disable_event(event);
+       }
+}
+
+static void p4_pmu_enable_event(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       int thread = p4_ht_config_thread(hwc->config);
+       u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
+       u64 escr_base;
+       struct p4_event_template *tpl;
+       struct p4_pmu_res *c;
+
+       /*
+        * some preparation work from per-cpu private fields
+        * since we need to find out which ESCR to use
+        */
+       c = &__get_cpu_var(p4_pmu_config);
+       tpl = c->tpl[hwc->idx];
+       if (!tpl) {
+               pr_crit("%s: Wrong index: %d\n", __func__, hwc->idx);
+               return;
+       }
+       escr_base = (u64)tpl->escr_msr[thread];
+
+       /*
+        * - we dont support cascaded counters yet
+        * - and counter 1 is broken (erratum)
+        */
+       WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
+       WARN_ON_ONCE(hwc->idx == 1);
+
+       (void)checking_wrmsrl(escr_base, escr_conf);
+       (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+               (u64)(p4_config_unpack_cccr(hwc->config)) | P4_CCCR_ENABLE);
+}
+
+static void p4_pmu_enable_all(void)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx;
+
+       for (idx = 0; idx < x86_pmu.num_events; idx++) {
+               struct perf_event *event = cpuc->events[idx];
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+               p4_pmu_enable_event(event);
+       }
+}
+
+static int p4_pmu_handle_irq(struct pt_regs *regs)
+{
+       struct perf_sample_data data;
+       struct cpu_hw_events *cpuc;
+       struct perf_event *event;
+       struct hw_perf_event *hwc;
+       int idx, handled = 0;
+       u64 val;
+
+       data.addr = 0;
+       data.raw = NULL;
+
+       cpuc = &__get_cpu_var(cpu_hw_events);
+
+       for (idx = 0; idx < x86_pmu.num_events; idx++) {
+
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               event = cpuc->events[idx];
+               hwc = &event->hw;
+
+               WARN_ON_ONCE(hwc->idx != idx);
+
+               /*
+                * FIXME: Redundant call, actually not needed
+                * but just to check if we're screwed
+                */
+               p4_pmu_clear_cccr_ovf(hwc);
+
+               val = x86_perf_event_update(event);
+               if (val & (1ULL << (x86_pmu.event_bits - 1)))
+                       continue;
+
+               /*
+                * event overflow
+                */
+               handled         = 1;
+               data.period     = event->hw.last_period;
+
+               if (!x86_perf_event_set_period(event))
+                       continue;
+               if (perf_event_overflow(event, 1, &data, regs))
+                       p4_pmu_disable_event(event);
+       }
+
+       if (handled) {
+#ifdef CONFIG_X86_LOCAL_APIC
+               /* p4 quirk: unmask it again */
+               apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
+#endif
+               inc_irq_stat(apic_perf_irqs);
+       }
+
+       return handled;
+}
+
+/*
+ * swap thread specific fields according to a thread
+ * we are going to run on
+ */
+static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
+{
+       u32 escr, cccr;
+
+       /*
+        * we either lucky and continue on same cpu or no HT support
+        */
+       if (!p4_should_swap_ts(hwc->config, cpu))
+               return;
+
+       /*
+        * the event is migrated from an another logical
+        * cpu, so we need to swap thread specific flags
+        */
+
+       escr = p4_config_unpack_escr(hwc->config);
+       cccr = p4_config_unpack_cccr(hwc->config);
+
+       if (p4_ht_thread(cpu)) {
+               cccr &= ~P4_CCCR_OVF_PMI_T0;
+               cccr |= P4_CCCR_OVF_PMI_T1;
+               if (escr & P4_EVNTSEL_T0_OS) {
+                       escr &= ~P4_EVNTSEL_T0_OS;
+                       escr |= P4_EVNTSEL_T1_OS;
+               }
+               if (escr & P4_EVNTSEL_T0_USR) {
+                       escr &= ~P4_EVNTSEL_T0_USR;
+                       escr |= P4_EVNTSEL_T1_USR;
+               }
+               hwc->config  = p4_config_pack_escr(escr);
+               hwc->config |= p4_config_pack_cccr(cccr);
+               hwc->config |= P4_CONFIG_HT;
+       } else {
+               cccr &= ~P4_CCCR_OVF_PMI_T1;
+               cccr |= P4_CCCR_OVF_PMI_T0;
+               if (escr & P4_EVNTSEL_T1_OS) {
+                       escr &= ~P4_EVNTSEL_T1_OS;
+                       escr |= P4_EVNTSEL_T0_OS;
+               }
+               if (escr & P4_EVNTSEL_T1_USR) {
+                       escr &= ~P4_EVNTSEL_T1_USR;
+                       escr |= P4_EVNTSEL_T0_USR;
+               }
+               hwc->config  = p4_config_pack_escr(escr);
+               hwc->config |= p4_config_pack_cccr(cccr);
+               hwc->config &= ~P4_CONFIG_HT;
+       }
+}
+
+/* ESCRs are not sequential in memory so we need a map */
+static unsigned int p4_escr_map[ARCH_P4_TOTAL_ESCR] = {
+       MSR_P4_ALF_ESCR0,       /*  0 */
+       MSR_P4_ALF_ESCR1,       /*  1 */
+       MSR_P4_BPU_ESCR0,       /*  2 */
+       MSR_P4_BPU_ESCR1,       /*  3 */
+       MSR_P4_BSU_ESCR0,       /*  4 */
+       MSR_P4_BSU_ESCR1,       /*  5 */
+       MSR_P4_CRU_ESCR0,       /*  6 */
+       MSR_P4_CRU_ESCR1,       /*  7 */
+       MSR_P4_CRU_ESCR2,       /*  8 */
+       MSR_P4_CRU_ESCR3,       /*  9 */
+       MSR_P4_CRU_ESCR4,       /* 10 */
+       MSR_P4_CRU_ESCR5,       /* 11 */
+       MSR_P4_DAC_ESCR0,       /* 12 */
+       MSR_P4_DAC_ESCR1,       /* 13 */
+       MSR_P4_FIRM_ESCR0,      /* 14 */
+       MSR_P4_FIRM_ESCR1,      /* 15 */
+       MSR_P4_FLAME_ESCR0,     /* 16 */
+       MSR_P4_FLAME_ESCR1,     /* 17 */
+       MSR_P4_FSB_ESCR0,       /* 18 */
+       MSR_P4_FSB_ESCR1,       /* 19 */
+       MSR_P4_IQ_ESCR0,        /* 20 */
+       MSR_P4_IQ_ESCR1,        /* 21 */
+       MSR_P4_IS_ESCR0,        /* 22 */
+       MSR_P4_IS_ESCR1,        /* 23 */
+       MSR_P4_ITLB_ESCR0,      /* 24 */
+       MSR_P4_ITLB_ESCR1,      /* 25 */
+       MSR_P4_IX_ESCR0,        /* 26 */
+       MSR_P4_IX_ESCR1,        /* 27 */
+       MSR_P4_MOB_ESCR0,       /* 28 */
+       MSR_P4_MOB_ESCR1,       /* 29 */
+       MSR_P4_MS_ESCR0,        /* 30 */
+       MSR_P4_MS_ESCR1,        /* 31 */
+       MSR_P4_PMH_ESCR0,       /* 32 */
+       MSR_P4_PMH_ESCR1,       /* 33 */
+       MSR_P4_RAT_ESCR0,       /* 34 */
+       MSR_P4_RAT_ESCR1,       /* 35 */
+       MSR_P4_SAAT_ESCR0,      /* 36 */
+       MSR_P4_SAAT_ESCR1,      /* 37 */
+       MSR_P4_SSU_ESCR0,       /* 38 */
+       MSR_P4_SSU_ESCR1,       /* 39 */
+       MSR_P4_TBPU_ESCR0,      /* 40 */
+       MSR_P4_TBPU_ESCR1,      /* 41 */
+       MSR_P4_TC_ESCR0,        /* 42 */
+       MSR_P4_TC_ESCR1,        /* 43 */
+       MSR_P4_U2L_ESCR0,       /* 44 */
+       MSR_P4_U2L_ESCR1,       /* 45 */
+};
+
+static int p4_get_escr_idx(unsigned int addr)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(p4_escr_map); i++) {
+               if (addr == p4_escr_map[i])
+                       return i;
+       }
+
+       return -1;
+}
+
+static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
+{
+       unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       unsigned long escr_mask[BITS_TO_LONGS(ARCH_P4_TOTAL_ESCR)];
+
+       struct hw_perf_event *hwc;
+       struct p4_event_template *tpl;
+       struct p4_pmu_res *c;
+       int cpu = raw_smp_processor_id();
+       int escr_idx, thread, i, num;
+
+       bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+       bitmap_zero(escr_mask, ARCH_P4_TOTAL_ESCR);
+
+       c = &__get_cpu_var(p4_pmu_config);
+       /*
+        * Firstly find out which resource events are going
+        * to use, if ESCR+CCCR tuple is already borrowed
+        * then get out of here
+        */
+       for (i = 0, num = n; i < n; i++, num--) {
+               hwc = &cpuc->event_list[i]->hw;
+               tpl = p4_pmu_template_lookup(hwc->config);
+               if (!tpl)
+                       goto done;
+               thread = p4_ht_thread(cpu);
+               escr_idx = p4_get_escr_idx(tpl->escr_msr[thread]);
+               if (escr_idx == -1)
+                       goto done;
+
+               /* already allocated and remains on the same cpu */
+               if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
+                       if (assign)
+                               assign[i] = hwc->idx;
+                       /* upstream dependent event */
+                       if (unlikely(tpl->dep != -1))
+                               printk_once(KERN_WARNING "PMU: Dep events are "
+                                       "not implemented yet\n");
+                       goto reserve;
+               }
+
+               /* it may be already borrowed */
+               if (test_bit(tpl->cntr[thread], used_mask) ||
+                       test_bit(escr_idx, escr_mask))
+                       goto done;
+
+               /*
+                * ESCR+CCCR+COUNTERs are available to use lets swap
+                * thread specific bits, push assigned bits
+                * back and save template into per-cpu
+                * area (which will allow us to find out the ESCR
+                * to be used at moment of "enable event via real MSR")
+                */
+               p4_pmu_swap_config_ts(hwc, cpu);
+               if (assign) {
+                       assign[i] = tpl->cntr[thread];
+                       c->tpl[assign[i]] = tpl;
+               }
+reserve:
+               set_bit(tpl->cntr[thread], used_mask);
+               set_bit(escr_idx, escr_mask);
+       }
+
+done:
+       return num ? -ENOSPC : 0;
+}
+
+static __initconst struct x86_pmu p4_pmu = {
+       .name                   = "Netburst P4/Xeon",
+       .handle_irq             = p4_pmu_handle_irq,
+       .disable_all            = p4_pmu_disable_all,
+       .enable_all             = p4_pmu_enable_all,
+       .enable                 = p4_pmu_enable_event,
+       .disable                = p4_pmu_disable_event,
+       .eventsel               = MSR_P4_BPU_CCCR0,
+       .perfctr                = MSR_P4_BPU_PERFCTR0,
+       .event_map              = p4_pmu_event_map,
+       .raw_event              = p4_pmu_raw_event,
+       .max_events             = ARRAY_SIZE(p4_event_map),
+       .get_event_constraints  = x86_get_event_constraints,
+       /*
+        * IF HT disabled we may need to use all
+        * ARCH_P4_MAX_CCCR counters simulaneously
+        * though leave it restricted at moment assuming
+        * HT is on
+        */
+       .num_events             = ARCH_P4_MAX_CCCR,
+       .apic                   = 1,
+       .event_bits             = 40,
+       .event_mask             = (1ULL << 40) - 1,
+       .max_period             = (1ULL << 39) - 1,
+       .hw_config              = p4_hw_config,
+       .schedule_events        = p4_pmu_schedule_events,
+};
+
+static __init int p4_pmu_init(void)
+{
+       unsigned int low, high;
+
+       /* If we get stripped -- indexig fails */
+       BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
+
+       rdmsr(MSR_IA32_MISC_ENABLE, low, high);
+       if (!(low & (1 << 7))) {
+               pr_cont("unsupported Netburst CPU model %d ",
+                       boot_cpu_data.x86_model);
+               return -ENODEV;
+       }
+
+       pr_cont("Netburst events, ");
+
+       x86_pmu = p4_pmu;
+
+       return 0;
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
index a330485d14da24dbfe5a1d21244512494bb1d914..6ff4d01d880f96a2ea2d583aeb79baf69c303cfa 100644 (file)
@@ -109,6 +109,8 @@ static __initconst struct x86_pmu p6_pmu = {
        .enable_all             = p6_pmu_enable_all,
        .enable                 = p6_pmu_enable_event,
        .disable                = p6_pmu_disable_event,
+       .hw_config              = x86_hw_config,
+       .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_P6_EVNTSEL0,
        .perfctr                = MSR_P6_PERFCTR0,
        .event_map              = p6_pmu_event_map,
index 419386c24b8205c3a22c993b3da48f448bf96283..cbaf8f2b83df34907df786d28892a61e32662148 100644 (file)
@@ -20,7 +20,7 @@ lib-y := delay.o
 lib-y += thunk_$(BITS).o
 lib-y += usercopy_$(BITS).o getuser.o putuser.o
 lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_KPROBES) += insn.o inat.o
+lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
 
 obj-y += msr.o msr-reg.o msr-reg-export.o
 
index 95477038a72ad479b2c1b5d19138e9ff675ee62c..2bccb7b9da2d70d82b2ca178432e6d03a0d0e3c6 100644 (file)
@@ -203,8 +203,9 @@ struct perf_event_attr {
                                enable_on_exec :  1, /* next exec enables     */
                                task           :  1, /* trace fork/exit       */
                                watermark      :  1, /* wakeup_watermark      */
+                               precise        :  1, /* OoO invariant counter */
 
-                               __reserved_1   : 49;
+                               __reserved_1   : 48;
 
        union {
                __u32           wakeup_events;    /* wakeup every n events */
@@ -293,6 +294,12 @@ struct perf_event_mmap_page {
 #define PERF_RECORD_MISC_USER                  (2 << 0)
 #define PERF_RECORD_MISC_HYPERVISOR            (3 << 0)
 
+#define PERF_RECORD_MISC_EXACT                 (1 << 14)
+/*
+ * Reserve the last bit to indicate some extended misc field
+ */
+#define PERF_RECORD_MISC_EXT_RESERVED          (1 << 15)
+
 struct perf_event_header {
        __u32   type;
        __u16   misc;
@@ -468,6 +475,17 @@ struct perf_raw_record {
        void                            *data;
 };
 
+struct perf_branch_entry {
+       __u64                           from;
+       __u64                           to;
+       __u64                           flags;
+};
+
+struct perf_branch_stack {
+       __u64                           nr;
+       struct perf_branch_entry        entries[0];
+};
+
 struct task_struct;
 
 /**
index 574ee58a3046a4bff950aa9051e73a2d5dfd7d8b..455393e71cab43c91244e01f82d93cdcfa4addc2 100644 (file)
@@ -1368,6 +1368,8 @@ void perf_event_task_sched_in(struct task_struct *task)
        if (cpuctx->task_ctx == ctx)
                return;
 
+       perf_disable();
+
        /*
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
@@ -1380,6 +1382,8 @@ void perf_event_task_sched_in(struct task_struct *task)
        ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
 
        cpuctx->task_ctx = ctx;
+
+       perf_enable();
 }
 
 #define MAX_INTERRUPTS (~0ULL)
index 8a8f52db7e385c28b2092f255ea07fb532fa5607..0abd25ee595f8f7350ca053d3f74c05820a19564 100644 (file)
@@ -513,6 +513,14 @@ else
        LIB_OBJS += util/probe-finder.o
 endif
 
+ifneq ($(shell sh -c "(echo '\#include <newt.h>'; echo 'int main(void) { newtInit(); newtCls(); return newtFinished(); }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -lnewt -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+       msg := $(warning newt not found, disables TUI support. Please install newt-devel or libnewt-dev);
+       BASIC_CFLAGS += -DNO_NEWT_SUPPORT
+else
+       EXTLIBS += -lnewt
+       LIB_OBJS += util/newt.o
+endif
+
 ifndef NO_LIBPERL
 PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
 PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
index 6ad7148451c5bcc32995c08e75a920d0898e4c7e..45d14660d53db2c1c9830284d83ae07e1fe91307 100644 (file)
@@ -452,6 +452,16 @@ static void annotate_sym(struct hist_entry *he)
        if (!filename)
                return;
 
+       if (dso->origin == DSO__ORIG_KERNEL) {
+               if (dso->annotate_warned)
+                       return;
+               dso->annotate_warned = 1;
+               pr_err("Can't annotate %s: No vmlinux file was found in the "
+                      "path:\n", sym->name);
+               vmlinux_path__fprintf(stderr);
+               return;
+       }
+
        pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
                 filename, sym->name, map->unmap_ip(map, sym->start),
                 map->unmap_ip(map, sym->end));
index 3b8b6387c47ca4de49873990b2c47216c8309aaf..962cdbf44ae972946b3864c4b281a05bd8b99e03 100644 (file)
@@ -225,7 +225,7 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
        return h_attr;
 }
 
-static void create_counter(int counter, int cpu, pid_t pid)
+static void create_counter(int counter, int cpu, pid_t pid, bool forks)
 {
        char *filter = filters[counter];
        struct perf_event_attr *attr = attrs + counter;
@@ -277,6 +277,9 @@ static void create_counter(int counter, int cpu, pid_t pid)
        attr->inherit           = inherit;
        attr->disabled          = 1;
 
+       if (forks)
+               attr->enable_on_exec = 1;
+
 try_again:
        fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0);
 
@@ -284,7 +287,8 @@ try_again:
                int err = errno;
 
                if (err == EPERM || err == EACCES)
-                       die("Permission error - are you root?\n");
+                       die("Permission error - are you root?\n"
+                           "\t Consider tweaking /proc/sys/kernel/perf_event_paranoid.\n");
                else if (err ==  ENODEV && profile_cpu != -1)
                        die("No such device - did you specify an out-of-range profile CPU?\n");
 
@@ -380,13 +384,13 @@ try_again:
        ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
 }
 
-static void open_counters(int cpu, pid_t pid)
+static void open_counters(int cpu, pid_t pid, bool forks)
 {
        int counter;
 
        group_fd = -1;
        for (counter = 0; counter < nr_counters; counter++)
-               create_counter(counter, cpu, pid);
+               create_counter(counter, cpu, pid, forks);
 
        nr_cpu++;
 }
@@ -546,11 +550,11 @@ static int __cmd_record(int argc, const char **argv)
 
 
        if ((!system_wide && !inherit) || profile_cpu != -1) {
-               open_counters(profile_cpu, target_pid);
+               open_counters(profile_cpu, target_pid, forks);
        } else {
                nr_cpus = read_cpu_map();
                for (i = 0; i < nr_cpus; i++)
-                       open_counters(cpumap[i], target_pid);
+                       open_counters(cpumap[i], target_pid, forks);
        }
 
        if (file_new) {
index f815de25d0fc5b76eef680411930589654e793e8..1f9f8695f055a3327fbff0658d4e2df6864dc0db 100644 (file)
@@ -267,6 +267,7 @@ static int __cmd_report(void)
        int ret = -EINVAL;
        struct perf_session *session;
        struct rb_node *next;
+       const char *help = "For a higher level overview, try: perf report --sort comm,dso";
 
        session = perf_session__new(input_name, O_RDONLY, force);
        if (session == NULL)
@@ -301,30 +302,38 @@ static int __cmd_report(void)
                stats = rb_entry(next, struct event_stat_id, rb_node);
                perf_session__collapse_resort(&stats->hists);
                perf_session__output_resort(&stats->hists, stats->stats.total);
-               if (rb_first(&session->stats_by_id) ==
-                   rb_last(&session->stats_by_id))
-                       fprintf(stdout, "# Samples: %Ld\n#\n",
-                               stats->stats.total);
-               else
-                       fprintf(stdout, "# Samples: %Ld %s\n#\n",
-                               stats->stats.total,
-                               __event_name(stats->type, stats->config));
 
-               perf_session__fprintf_hists(&stats->hists, NULL, false, stdout,
+               if (use_browser)
+                       perf_session__browse_hists(&stats->hists,
+                                                  stats->stats.total, help);
+               else {
+                       if (rb_first(&session->stats_by_id) ==
+                           rb_last(&session->stats_by_id))
+                               fprintf(stdout, "# Samples: %Ld\n#\n",
+                                       stats->stats.total);
+                       else
+                               fprintf(stdout, "# Samples: %Ld %s\n#\n",
+                                       stats->stats.total,
+                                       __event_name(stats->type, stats->config));
+
+                       perf_session__fprintf_hists(&stats->hists, NULL, false, stdout,
                                            stats->stats.total);
-               fprintf(stdout, "\n\n");
+                       fprintf(stdout, "\n\n");
+               }
+
                next = rb_next(&stats->rb_node);
        }
 
-       if (sort_order == default_sort_order &&
-           parent_pattern == default_parent_pattern)
-               fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n");
+       if (!use_browser && sort_order == default_sort_order &&
+           parent_pattern == default_parent_pattern) {
+               fprintf(stdout, "#\n# (%s)\n#\n", help);
 
-       if (show_threads) {
-               bool raw_printing_style = !strcmp(pretty_printing_style, "raw");
-               perf_read_values_display(stdout, &show_threads_values,
-                                        raw_printing_style);
-               perf_read_values_destroy(&show_threads_values);
+               if (show_threads) {
+                       bool style = !strcmp(pretty_printing_style, "raw");
+                       perf_read_values_display(stdout, &show_threads_values,
+                                                style);
+                       perf_read_values_destroy(&show_threads_values);
+               }
        }
 out_delete:
        perf_session__delete(session);
@@ -447,7 +456,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
 {
        argc = parse_options(argc, argv, options, report_usage, 0);
 
-       setup_pager();
+       setup_browser();
 
        if (symbol__init() < 0)
                return -1;
index 1f529321607eb2c2c852bb481982f1287fc86ef9..887ebbf5d1ff0d9f301091ffa3f7694247674ea5 100644 (file)
@@ -133,7 +133,7 @@ static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
        return ((void *)self) + symbol_conf.priv_size;
 }
 
-static void get_term_dimensions(struct winsize *ws)
+void get_term_dimensions(struct winsize *ws)
 {
        char *s = getenv("LINES");
 
@@ -169,7 +169,7 @@ static void sig_winch_handler(int sig __used)
        update_print_entries(&winsize);
 }
 
-static void parse_source(struct sym_entry *syme)
+static int parse_source(struct sym_entry *syme)
 {
        struct symbol *sym;
        struct sym_entry_source *source;
@@ -180,12 +180,21 @@ static void parse_source(struct sym_entry *syme)
        u64 len;
 
        if (!syme)
-               return;
+               return -1;
+
+       sym = sym_entry__symbol(syme);
+       map = syme->map;
+
+       /*
+        * We can't annotate with just /proc/kallsyms
+        */
+       if (map->dso->origin == DSO__ORIG_KERNEL)
+               return -1;
 
        if (syme->src == NULL) {
                syme->src = zalloc(sizeof(*source));
                if (syme->src == NULL)
-                       return;
+                       return -1;
                pthread_mutex_init(&syme->src->lock, NULL);
        }
 
@@ -195,9 +204,6 @@ static void parse_source(struct sym_entry *syme)
                pthread_mutex_lock(&source->lock);
                goto out_assign;
        }
-
-       sym = sym_entry__symbol(syme);
-       map = syme->map;
        path = map->dso->long_name;
 
        len = sym->end - sym->start;
@@ -209,7 +215,7 @@ static void parse_source(struct sym_entry *syme)
 
        file = popen(command, "r");
        if (!file)
-               return;
+               return -1;
 
        pthread_mutex_lock(&source->lock);
        source->lines_tail = &source->lines;
@@ -245,6 +251,7 @@ static void parse_source(struct sym_entry *syme)
 out_assign:
        sym_filter_entry = syme;
        pthread_mutex_unlock(&source->lock);
+       return 0;
 }
 
 static void __zero_source_counters(struct sym_entry *syme)
@@ -411,6 +418,7 @@ static double sym_weight(const struct sym_entry *sym)
 
 static long                    samples;
 static long                    userspace_samples;
+static long                    exact_samples;
 static const char              CONSOLE_CLEAR[] = "\e[H\e[2J";
 
 static void __list_insert_active_sym(struct sym_entry *syme)
@@ -451,6 +459,7 @@ static void print_sym_table(void)
        int counter, snap = !display_weighted ? sym_counter : 0;
        float samples_per_sec = samples/delay_secs;
        float ksamples_per_sec = (samples-userspace_samples)/delay_secs;
+       float esamples_percent = (100.0*exact_samples)/samples;
        float sum_ksamples = 0.0;
        struct sym_entry *syme, *n;
        struct rb_root tmp = RB_ROOT;
@@ -458,7 +467,7 @@ static void print_sym_table(void)
        int sym_width = 0, dso_width = 0, dso_short_width = 0;
        const int win_width = winsize.ws_col - 1;
 
-       samples = userspace_samples = 0;
+       samples = userspace_samples = exact_samples = 0;
 
        /* Sort the active symbols */
        pthread_mutex_lock(&active_symbols_lock);
@@ -489,9 +498,10 @@ static void print_sym_table(void)
        puts(CONSOLE_CLEAR);
 
        printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
-       printf( "   PerfTop:%8.0f irqs/sec  kernel:%4.1f%% [",
+       printf( "   PerfTop:%8.0f irqs/sec  kernel:%4.1f%%  exact: %4.1f%% [",
                samples_per_sec,
-               100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
+               100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)),
+               esamples_percent);
 
        if (nr_counters == 1 || !display_weighted) {
                printf("%Ld", (u64)attrs[0].sample_period);
@@ -960,6 +970,9 @@ static void event__process_sample(const event_t *self,
                return;
        }
 
+       if (self->header.misc & PERF_RECORD_MISC_EXACT)
+               exact_samples++;
+
        if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
            al.filtered)
                return;
@@ -990,7 +1003,17 @@ static void event__process_sample(const event_t *self,
        if (sym_filter_entry_sched) {
                sym_filter_entry = sym_filter_entry_sched;
                sym_filter_entry_sched = NULL;
-               parse_source(sym_filter_entry);
+               if (parse_source(sym_filter_entry) < 0) {
+                       struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+
+                       pr_err("Can't annotate %s", sym->name);
+                       if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
+                               pr_err(": No vmlinux file was found in the path:\n");
+                               vmlinux_path__fprintf(stderr);
+                       } else
+                               pr_err(".\n");
+                       exit(1);
+               }
        }
 
        syme = symbol__priv(al.sym);
index 57cb107c1f13291a7c94272d473bfea5862f6281..0d4b9edfab1246f5add6bec7d3bb46d16f0bc7e3 100644 (file)
@@ -16,6 +16,8 @@
 #include "util/string.h"
 #include "util/debugfs.h"
 
+bool use_browser;
+
 const char perf_usage_string[] =
        "perf [--version] [--help] COMMAND [ARGS]";
 
@@ -265,6 +267,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
        if (status)
                return status & 0xff;
 
+       exit_browser();
+
        /* Somebody closed stdout? */
        if (fstat(fileno(stdout), &st))
                return 0;
index 6fb379bc1d1fec0c4c9384fdf2e301d4445a8a01..aa786158b66814c66a1a86baa4edea6b1097aef2 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef _PERF_PERF_H
 #define _PERF_PERF_H
 
+struct winsize;
+
+void get_term_dimensions(struct winsize *ws);
+
 #if defined(__i386__)
 #include "../../arch/x86/include/asm/unistd.h"
 #define rmb()          asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
index 918eb376abe3943375cf1ea1843d4f724ac0f621..47b12a3d11bf92a8d48fa6b9944f1c64067a93a6 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __PERF_CACHE_H
 #define __PERF_CACHE_H
 
+#include <stdbool.h>
 #include "util.h"
 #include "strbuf.h"
 #include "../perf.h"
@@ -69,6 +70,19 @@ extern const char *pager_program;
 extern int pager_in_use(void);
 extern int pager_use_color;
 
+extern bool use_browser;
+
+#ifdef NO_NEWT_SUPPORT
+static inline void setup_browser(void)
+{
+       setup_pager();
+}
+static inline void exit_browser(void) {}
+#else
+void setup_browser(void);
+void exit_browser(void);
+#endif
+
 extern const char *editor_program;
 extern const char *excludes_file;
 
index e88bca55a5993c82212c6307d35bab9824d9358f..9da01914e0af644461e303650c7021c6fc383013 100644 (file)
@@ -203,7 +203,10 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
        int r;
 
        va_start(args, fmt);
-       r = color_vfprintf(fp, color, fmt, args);
+       if (use_browser)
+               r = vfprintf(fp, fmt, args);
+       else
+               r = color_vfprintf(fp, color, fmt, args);
        va_end(args);
        return r;
 }
index 0905600c3851b51ec6b52f716e33f797c1da1032..033d66db863a14990b308c66be3f62e1e863d0d6 100644 (file)
@@ -6,6 +6,7 @@
 #include <stdarg.h>
 #include <stdio.h>
 
+#include "cache.h"
 #include "color.h"
 #include "event.h"
 #include "debug.h"
@@ -21,7 +22,10 @@ int eprintf(int level, const char *fmt, ...)
 
        if (verbose >= level) {
                va_start(args, fmt);
-               ret = vfprintf(stderr, fmt, args);
+               if (use_browser)
+                       ret = browser__show_help(fmt, args);
+               else
+                       ret = vfprintf(stderr, fmt, args);
                va_end(args);
        }
 
index c6c24c522deaf0cbbef6af3ffd95de706525412d..0172edf3f1533bf4f33991dedda6fcf435edba14 100644 (file)
@@ -7,9 +7,16 @@
 extern int verbose;
 extern int dump_trace;
 
-int eprintf(int level,
-           const char *fmt, ...) __attribute__((format(printf, 2, 3)));
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(event_t *event);
 
+#ifdef NO_NEWT_SUPPORT
+static inline int browser__show_help(const char *format __used, va_list ap __used)
+{
+       return 0;
+}
+#else
+int browser__show_help(const char *format, va_list ap);
+#endif
+
 #endif /* __PERF_DEBUG_H */
index bdcfd6190b217d88c2d8530fb8400f442c62d96d..1a4e8376d843ea244163ef507d3cca91f4946eae 100644 (file)
@@ -455,11 +455,11 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
        return ret;
 }
 
-static size_t hist_entry__fprintf(struct hist_entry *self,
-                                 struct perf_session *pair_session,
-                                 bool show_displacement,
-                                 long displacement, FILE *fp,
-                                 u64 session_total)
+size_t hist_entry__fprintf(struct hist_entry *self,
+                          struct perf_session *pair_session,
+                          bool show_displacement,
+                          long displacement, FILE *fp,
+                          u64 session_total)
 {
        struct sort_entry *se;
        u64 count, total;
@@ -485,9 +485,9 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
 
        if (symbol_conf.show_nr_samples) {
                if (sep)
-                       fprintf(fp, "%c%lld", *sep, count);
+                       ret += fprintf(fp, "%c%lld", *sep, count);
                else
-                       fprintf(fp, "%11lld", count);
+                       ret += fprintf(fp, "%11lld", count);
        }
 
        if (pair_session) {
@@ -518,9 +518,9 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
                                snprintf(bf, sizeof(bf), " ");
 
                        if (sep)
-                               fprintf(fp, "%c%s", *sep, bf);
+                               ret += fprintf(fp, "%c%s", *sep, bf);
                        else
-                               fprintf(fp, "%6.6s", bf);
+                               ret += fprintf(fp, "%6.6s", bf);
                }
        }
 
@@ -528,27 +528,27 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
                if (se->elide)
                        continue;
 
-               fprintf(fp, "%s", sep ?: "  ");
+               ret += fprintf(fp, "%s", sep ?: "  ");
                ret += se->print(fp, self, se->width ? *se->width : 0);
        }
 
-       ret += fprintf(fp, "\n");
-
-       if (symbol_conf.use_callchain) {
-               int left_margin = 0;
+       return ret + fprintf(fp, "\n");
+}
 
-               if (sort__first_dimension == SORT_COMM) {
-                       se = list_first_entry(&hist_entry__sort_list, typeof(*se),
-                                               list);
-                       left_margin = se->width ? *se->width : 0;
-                       left_margin -= thread__comm_len(self->thread);
-               }
+static size_t hist_entry__fprintf_callchain(struct hist_entry *self, FILE *fp,
+                                           u64 session_total)
+{
+       int left_margin = 0;
 
-               hist_entry_callchain__fprintf(fp, self, session_total,
-                                             left_margin);
+       if (sort__first_dimension == SORT_COMM) {
+               struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
+                                                        typeof(*se), list);
+               left_margin = se->width ? *se->width : 0;
+               left_margin -= thread__comm_len(self->thread);
        }
 
-       return ret;
+       return hist_entry_callchain__fprintf(fp, self, session_total,
+                                            left_margin);
 }
 
 size_t perf_session__fprintf_hists(struct rb_root *hists,
@@ -655,6 +655,10 @@ print_entries:
                }
                ret += hist_entry__fprintf(h, pair, show_displacement,
                                           displacement, fp, session_total);
+
+               if (symbol_conf.use_callchain)
+                       ret += hist_entry__fprintf_callchain(h, fp, session_total);
+
                if (h->map == NULL && verbose > 1) {
                        __map_groups__fprintf_maps(&h->thread->mg,
                                                   MAP__FUNCTION, fp);
index 16f360cce5bfff0e56c7096e418bc38949652d58..fe366ce5db453ba2a153374b82de4a055d98ff64 100644 (file)
@@ -18,6 +18,11 @@ struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
                                                  u64 count, bool *hit);
 extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
 extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
+size_t hist_entry__fprintf(struct hist_entry *self,
+                          struct perf_session *pair_session,
+                          bool show_displacement,
+                          long displacement, FILE *fp,
+                          u64 session_total);
 void hist_entry__free(struct hist_entry *);
 
 void perf_session__output_resort(struct rb_root *hists, u64 total_samples);
index f2611655ab5176b09117aef19eba27f1f940e2be..388ab1bfd1141bbd29fb4540c65828c69ca44b52 100644 (file)
@@ -85,16 +85,19 @@ simple_strtoul(const char *nptr, char **endptr, int base)
        return strtoul(nptr, endptr, base);
 }
 
+int eprintf(int level,
+           const char *fmt, ...) __attribute__((format(printf, 2, 3)));
+
 #ifndef pr_fmt
 #define pr_fmt(fmt) fmt
 #endif
 
 #define pr_err(fmt, ...) \
-       do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+       eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warning(fmt, ...) \
-       do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+       eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info(fmt, ...) \
-       do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+       eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_debug(fmt, ...) \
        eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_debugN(n, fmt, ...) \
diff --git a/tools/perf/util/newt.c b/tools/perf/util/newt.c
new file mode 100644 (file)
index 0000000..2d19e7a
--- /dev/null
@@ -0,0 +1,207 @@
+#define _GNU_SOURCE
+#include <stdio.h>
+#undef _GNU_SOURCE
+
+#include <stdlib.h>
+#include <newt.h>
+#include <sys/ttydefaults.h>
+
+#include "cache.h"
+#include "hist.h"
+#include "session.h"
+#include "sort.h"
+#include "symbol.h"
+
+static void newt_form__set_exit_keys(newtComponent self)
+{
+       newtFormAddHotKey(self, NEWT_KEY_ESCAPE);
+       newtFormAddHotKey(self, 'Q');
+       newtFormAddHotKey(self, 'q');
+       newtFormAddHotKey(self, CTRL('c'));
+}
+
+static newtComponent newt_form__new(void)
+{
+       newtComponent self = newtForm(NULL, NULL, 0);
+       if (self)
+               newt_form__set_exit_keys(self);
+       return self;
+}
+
+static size_t hist_entry__append_browser(struct hist_entry *self,
+                                        newtComponent listbox, u64 total)
+{
+       char bf[1024];
+       size_t len;
+       FILE *fp;
+
+       if (symbol_conf.exclude_other && !self->parent)
+               return 0;
+
+       fp = fmemopen(bf, sizeof(bf), "w");
+       if (fp == NULL)
+               return 0;
+
+       len = hist_entry__fprintf(self, NULL, false, 0, fp, total);
+
+       fclose(fp);
+       newtListboxAppendEntry(listbox, bf, self);
+       return len;
+}
+
+static void hist_entry__annotate_browser(struct hist_entry *self)
+{
+       FILE *fp;
+       int cols, rows;
+       newtComponent form, listbox;
+       struct newtExitStruct es;
+       char *str;
+       size_t line_len, max_line_len = 0;
+       size_t max_usable_width;
+       char *line = NULL;
+
+       if (self->sym == NULL)
+               return;
+
+       if (asprintf(&str, "perf annotate %s 2>&1 | expand", self->sym->name) < 0)
+               return;
+
+       fp = popen(str, "r");
+       if (fp == NULL)
+               goto out_free_str;
+
+       newtPushHelpLine("Press ESC to exit");
+       newtGetScreenSize(&cols, &rows);
+       listbox = newtListbox(0, 0, rows - 5, NEWT_FLAG_SCROLL);
+
+       while (!feof(fp)) {
+               if (getline(&line, &line_len, fp) < 0 || !line_len)
+                       break;
+               while (line_len != 0 && isspace(line[line_len - 1]))
+                       line[--line_len] = '\0';
+
+               if (line_len > max_line_len)
+                       max_line_len = line_len;
+               newtListboxAppendEntry(listbox, line, NULL);
+       }
+       fclose(fp);
+       free(line);
+
+       max_usable_width = cols - 22;
+       if (max_line_len > max_usable_width)
+               max_line_len = max_usable_width;
+
+       newtListboxSetWidth(listbox, max_line_len);
+
+       newtCenteredWindow(max_line_len + 2, rows - 5, self->sym->name);
+       form = newt_form__new();
+       newtFormAddComponents(form, listbox, NULL);
+
+       newtFormRun(form, &es);
+       newtFormDestroy(form);
+       newtPopWindow();
+       newtPopHelpLine();
+out_free_str:
+       free(str);
+}
+
+void perf_session__browse_hists(struct rb_root *hists, u64 session_total,
+                               const char *helpline)
+{
+       struct sort_entry *se;
+       struct rb_node *nd;
+       unsigned int width;
+       char *col_width = symbol_conf.col_width_list_str;
+       int rows;
+       size_t max_len = 0;
+       char str[1024];
+       newtComponent form, listbox;
+       struct newtExitStruct es;
+
+       snprintf(str, sizeof(str), "Samples: %Ld", session_total);
+       newtDrawRootText(0, 0, str);
+       newtPushHelpLine(helpline);
+
+       newtGetScreenSize(NULL, &rows);
+
+       form = newt_form__new();
+
+       listbox = newtListbox(1, 1, rows - 2, (NEWT_FLAG_SCROLL |
+                                              NEWT_FLAG_BORDER |
+                                              NEWT_FLAG_RETURNEXIT));
+
+       list_for_each_entry(se, &hist_entry__sort_list, list) {
+               if (se->elide)
+                       continue;
+               width = strlen(se->header);
+               if (se->width) {
+                       if (symbol_conf.col_width_list_str) {
+                               if (col_width) {
+                                       *se->width = atoi(col_width);
+                                       col_width = strchr(col_width, ',');
+                                       if (col_width)
+                                               ++col_width;
+                               }
+                       }
+                       *se->width = max(*se->width, width);
+               }
+       }
+
+       for (nd = rb_first(hists); nd; nd = rb_next(nd)) {
+               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+               size_t len = hist_entry__append_browser(h, listbox, session_total);
+               if (len > max_len)
+                       max_len = len;
+       }
+
+       newtListboxSetWidth(listbox, max_len);
+       newtFormAddComponents(form, listbox, NULL);
+
+       while (1) {
+               struct hist_entry *selection;
+
+               newtFormRun(form, &es);
+               if (es.reason == NEWT_EXIT_HOTKEY)
+                       break;
+               selection = newtListboxGetCurrent(listbox);
+               hist_entry__annotate_browser(selection);
+       }
+
+       newtFormDestroy(form);
+}
+
+int browser__show_help(const char *format, va_list ap)
+{
+       int ret;
+       static int backlog;
+       static char msg[1024];
+
+        ret = vsnprintf(msg + backlog, sizeof(msg) - backlog, format, ap);
+       backlog += ret;
+
+       if (msg[backlog - 1] == '\n') {
+               newtPopHelpLine();
+               newtPushHelpLine(msg);
+               newtRefresh();
+               backlog = 0;
+       }
+
+       return ret;
+}
+
+void setup_browser(void)
+{
+       if (!isatty(1))
+               return;
+
+       use_browser = true;
+       newtInit();
+       newtCls();
+       newtPushHelpLine(" ");
+}
+
+void exit_browser(void)
+{
+       if (use_browser)
+               newtFinished();
+}
index 05d0c5c2030cbd5d8bfac67c00fc341404419465..a2014459125aad2ead6782da1983f78ea746d700 100644 (file)
@@ -656,6 +656,10 @@ parse_raw_event(const char **strp, struct perf_event_attr *attr)
                return EVT_FAILED;
        n = hex2u64(str + 1, &config);
        if (n > 0) {
+               if (str[n+1] == 'p') {
+                       attr->precise = 1;
+                       n++;
+               }
                *strp = str + n + 1;
                attr->type = PERF_TYPE_RAW;
                attr->config = config;
index 5c33417eebb396599d77caa15798e5bce5293063..34d73395baacacbb48d0cfe73218c91f84f57dfa 100644 (file)
@@ -86,4 +86,13 @@ static inline struct map *
 {
        return map_groups__new_module(&self->kmaps, start, filename);
 }
+
+#ifdef NO_NEWT_SUPPORT
+static inline void perf_session__browse_hists(struct rb_root *hists __used,
+                                             u64 session_total __used,
+                                             const char *helpline __used) {}
+#else
+void perf_session__browse_hists(struct rb_root *hists, u64 session_total,
+                               const char *helpline);
+#endif
 #endif /* __PERF_SESSION_H */
index c458c4a371d11ea11715f3937111843234739462..3eb9de4baef32b816eaa4e5285dacf17f93f2d08 100644 (file)
 #define NT_GNU_BUILD_ID 3
 #endif
 
-enum dso_origin {
-       DSO__ORIG_KERNEL = 0,
-       DSO__ORIG_JAVA_JIT,
-       DSO__ORIG_BUILD_ID_CACHE,
-       DSO__ORIG_FEDORA,
-       DSO__ORIG_UBUNTU,
-       DSO__ORIG_BUILDID,
-       DSO__ORIG_DSO,
-       DSO__ORIG_KMODULE,
-       DSO__ORIG_NOT_FOUND,
-};
-
 static void dsos__add(struct list_head *head, struct dso *dso);
 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
 static int dso__load_kernel_sym(struct dso *self, struct map *map,
@@ -870,8 +858,8 @@ out_close:
        if (err == 0)
                return nr;
 out:
-       pr_warning("%s: problems reading %s PLT info.\n",
-                  __func__, self->long_name);
+       pr_debug("%s: problems reading %s PLT info.\n",
+                __func__, self->long_name);
        return 0;
 }
 
@@ -1025,7 +1013,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
                                }
                                curr_map->map_ip = identity__map_ip;
                                curr_map->unmap_ip = identity__map_ip;
-                               curr_dso->origin = DSO__ORIG_KERNEL;
+                               curr_dso->origin = self->origin;
                                map_groups__insert(kmap->kmaps, curr_map);
                                dsos__add(&dsos__kernel, curr_dso);
                                dso__set_loaded(curr_dso, map->type);
@@ -1895,6 +1883,17 @@ out_fail:
        return -1;
 }
 
+size_t vmlinux_path__fprintf(FILE *fp)
+{
+       int i;
+       size_t printed = 0;
+
+       for (i = 0; i < vmlinux_path__nr_entries; ++i)
+               printed += fprintf(fp, "[%d] %s\n", i, vmlinux_path[i]);
+
+       return printed;
+}
+
 static int setup_list(struct strlist **list, const char *list_str,
                      const char *list_name)
 {
index f30a37428919b6d959eba0811cf0489ed4d8e5b2..0da2455d5b903046f328df419558309775b2dca9 100644 (file)
@@ -106,6 +106,7 @@ struct dso {
        u8               has_build_id:1;
        u8               kernel:1;
        u8               hit:1;
+       u8               annotate_warned:1;
        unsigned char    origin;
        u8               sorted_by_name;
        u8               loaded;
@@ -150,6 +151,19 @@ size_t dsos__fprintf_buildid(FILE *fp, bool with_hits);
 
 size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
 size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
+
+enum dso_origin {
+       DSO__ORIG_KERNEL = 0,
+       DSO__ORIG_JAVA_JIT,
+       DSO__ORIG_BUILD_ID_CACHE,
+       DSO__ORIG_FEDORA,
+       DSO__ORIG_UBUNTU,
+       DSO__ORIG_BUILDID,
+       DSO__ORIG_DSO,
+       DSO__ORIG_KMODULE,
+       DSO__ORIG_NOT_FOUND,
+};
+
 char dso__symtab_origin(const struct dso *self);
 void dso__set_long_name(struct dso *self, char *name);
 void dso__set_build_id(struct dso *self, void *build_id);
@@ -169,4 +183,6 @@ int kallsyms__parse(const char *filename, void *arg,
 int symbol__init(void);
 bool symbol_type__is_a(char symbol_type, enum map_type map_type);
 
+size_t vmlinux_path__fprintf(FILE *fp);
+
 #endif /* __PERF_SYMBOL */
This page took 0.144932 seconds and 5 git commands to generate.