s390/perf: add support for the CPU-Measurement Sampling Facility
[deliverable/linux.git] / arch / s390 / kernel / perf_event.c
1 /*
2 * Performance event support for s390x
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11 #define KMSG_COMPONENT "perf"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/kernel.h>
15 #include <linux/perf_event.h>
16 #include <linux/kvm_host.h>
17 #include <linux/percpu.h>
18 #include <linux/export.h>
19 #include <linux/spinlock.h>
20 #include <linux/sysfs.h>
21 #include <asm/irq.h>
22 #include <asm/cpu_mf.h>
23 #include <asm/lowcore.h>
24 #include <asm/processor.h>
25
26 const char *perf_pmu_name(void)
27 {
28 if (cpum_cf_avail() || cpum_sf_avail())
29 return "CPU-measurement facilities (CPUMF)";
30 return "pmu";
31 }
32 EXPORT_SYMBOL(perf_pmu_name);
33
34 int perf_num_counters(void)
35 {
36 int num = 0;
37
38 if (cpum_cf_avail())
39 num += PERF_CPUM_CF_MAX_CTR;
40 if (cpum_sf_avail())
41 num += PERF_CPUM_SF_MAX_CTR;
42
43 return num;
44 }
45 EXPORT_SYMBOL(perf_num_counters);
46
47 static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
48 {
49 struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
50
51 if (!stack)
52 return NULL;
53
54 return (struct kvm_s390_sie_block *) stack->empty1[0];
55 }
56
57 static bool is_in_guest(struct pt_regs *regs)
58 {
59 if (user_mode(regs))
60 return false;
61 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
62 return instruction_pointer(regs) == (unsigned long) &sie_exit;
63 #else
64 return false;
65 #endif
66 }
67
68 static unsigned long guest_is_user_mode(struct pt_regs *regs)
69 {
70 return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
71 }
72
73 static unsigned long instruction_pointer_guest(struct pt_regs *regs)
74 {
75 return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
76 }
77
78 unsigned long perf_instruction_pointer(struct pt_regs *regs)
79 {
80 return is_in_guest(regs) ? instruction_pointer_guest(regs)
81 : instruction_pointer(regs);
82 }
83
84 static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
85 {
86 return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
87 : PERF_RECORD_MISC_GUEST_KERNEL;
88 }
89
90 unsigned long perf_misc_flags(struct pt_regs *regs)
91 {
92 if (is_in_guest(regs))
93 return perf_misc_guest_flags(regs);
94
95 return user_mode(regs) ? PERF_RECORD_MISC_USER
96 : PERF_RECORD_MISC_KERNEL;
97 }
98
99 void print_debug_cf(void)
100 {
101 struct cpumf_ctr_info cf_info;
102 int cpu = smp_processor_id();
103
104 memset(&cf_info, 0, sizeof(cf_info));
105 if (!qctri(&cf_info))
106 pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
107 cpu, cf_info.cfvn, cf_info.csvn,
108 cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
109 }
110
111 static void print_debug_sf(void)
112 {
113 struct hws_qsi_info_block si;
114 int cpu = smp_processor_id();
115
116 memset(&si, 0, sizeof(si));
117 if (qsi(&si)) {
118 pr_err("CPU[%i]: CPM_SF: qsi failed\n");
119 return;
120 }
121
122 pr_info("CPU[%i]: CPM_SF: as=%i es=%i cs=%i bsdes=%i dsdes=%i"
123 " min=%i max=%i cpu_speed=%i tear=%p dear=%p\n",
124 cpu, si.as, si.es, si.cs, si.bsdes, si.dsdes,
125 si.min_sampl_rate, si.max_sampl_rate, si.cpu_speed,
126 si.tear, si.dear);
127 }
128
129 void perf_event_print_debug(void)
130 {
131 unsigned long flags;
132
133 local_irq_save(flags);
134 if (cpum_cf_avail())
135 print_debug_cf();
136 if (cpum_sf_avail())
137 print_debug_sf();
138 local_irq_restore(flags);
139 }
140
141 /* See also arch/s390/kernel/traps.c */
142 static unsigned long __store_trace(struct perf_callchain_entry *entry,
143 unsigned long sp,
144 unsigned long low, unsigned long high)
145 {
146 struct stack_frame *sf;
147 struct pt_regs *regs;
148
149 while (1) {
150 sp = sp & PSW_ADDR_INSN;
151 if (sp < low || sp > high - sizeof(*sf))
152 return sp;
153 sf = (struct stack_frame *) sp;
154 perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
155 /* Follow the backchain. */
156 while (1) {
157 low = sp;
158 sp = sf->back_chain & PSW_ADDR_INSN;
159 if (!sp)
160 break;
161 if (sp <= low || sp > high - sizeof(*sf))
162 return sp;
163 sf = (struct stack_frame *) sp;
164 perf_callchain_store(entry,
165 sf->gprs[8] & PSW_ADDR_INSN);
166 }
167 /* Zero backchain detected, check for interrupt frame. */
168 sp = (unsigned long) (sf + 1);
169 if (sp <= low || sp > high - sizeof(*regs))
170 return sp;
171 regs = (struct pt_regs *) sp;
172 perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
173 low = sp;
174 sp = regs->gprs[15];
175 }
176 }
177
178 void perf_callchain_kernel(struct perf_callchain_entry *entry,
179 struct pt_regs *regs)
180 {
181 unsigned long head;
182 struct stack_frame *head_sf;
183
184 if (user_mode(regs))
185 return;
186
187 head = regs->gprs[15];
188 head_sf = (struct stack_frame *) head;
189
190 if (!head_sf || !head_sf->back_chain)
191 return;
192
193 head = head_sf->back_chain;
194 head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
195 S390_lowcore.async_stack);
196
197 __store_trace(entry, head, S390_lowcore.thread_info,
198 S390_lowcore.thread_info + THREAD_SIZE);
199 }
200
201 /* Perf defintions for PMU event attributes in sysfs */
202 ssize_t cpumf_events_sysfs_show(struct device *dev,
203 struct device_attribute *attr, char *page)
204 {
205 struct perf_pmu_events_attr *pmu_attr;
206
207 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
208 return sprintf(page, "event=0x%04llx,name=%s\n",
209 pmu_attr->id, attr->attr.name);
210 }
This page took 0.054219 seconds and 5 git commands to generate.