perf/x86: Detect number of instances of uncore CBox
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.h
CommitLineData
087bfbb0
YZ
1#include <linux/module.h>
2#include <linux/slab.h>
14371cce 3#include <linux/pci.h>
087bfbb0
YZ
4#include <linux/perf_event.h>
5#include "perf_event.h"
6
7#define UNCORE_PMU_NAME_LEN 32
8#define UNCORE_BOX_HASH_SIZE 8
9
10#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
11
eca26c99 12#define UNCORE_FIXED_EVENT 0xff
087bfbb0
YZ
13#define UNCORE_PMC_IDX_MAX_GENERIC 8
14#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
15#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
16
17#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
18
fcde10e9
YZ
19/* SNB event control */
20#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
21#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
22#define SNB_UNC_CTL_EDGE_DET (1 << 18)
23#define SNB_UNC_CTL_EN (1 << 22)
24#define SNB_UNC_CTL_INVERT (1 << 23)
25#define SNB_UNC_CTL_CMASK_MASK 0x1f000000
26#define NHM_UNC_CTL_CMASK_MASK 0xff000000
27#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
28
29#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
30 SNB_UNC_CTL_UMASK_MASK | \
31 SNB_UNC_CTL_EDGE_DET | \
32 SNB_UNC_CTL_INVERT | \
33 SNB_UNC_CTL_CMASK_MASK)
34
35#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
36 SNB_UNC_CTL_UMASK_MASK | \
37 SNB_UNC_CTL_EDGE_DET | \
38 SNB_UNC_CTL_INVERT | \
39 NHM_UNC_CTL_CMASK_MASK)
40
41/* SNB global control register */
42#define SNB_UNC_PERF_GLOBAL_CTL 0x391
43#define SNB_UNC_FIXED_CTR_CTRL 0x394
44#define SNB_UNC_FIXED_CTR 0x395
45
46/* SNB uncore global control */
47#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
48#define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
49
50/* SNB Cbo register */
51#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
52#define SNB_UNC_CBO_0_PER_CTR0 0x706
53#define SNB_UNC_CBO_MSR_OFFSET 0x10
54
55/* NHM global control register */
56#define NHM_UNC_PERF_GLOBAL_CTL 0x391
57#define NHM_UNC_FIXED_CTR 0x394
58#define NHM_UNC_FIXED_CTR_CTRL 0x395
59
60/* NHM uncore global control */
61#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
62#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
63
64/* NHM uncore register */
65#define NHM_UNC_PERFEVTSEL0 0x3c0
66#define NHM_UNC_UNCORE_PMC0 0x3b0
67
7c94ee2e
YZ
68/* SNB-EP Box level control */
69#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
70#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
71#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
72#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
73#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
74 SNBEP_PMON_BOX_CTL_RST_CTRS | \
75 SNBEP_PMON_BOX_CTL_FRZ_EN)
76/* SNB-EP event control */
77#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
78#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
79#define SNBEP_PMON_CTL_RST (1 << 17)
80#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
81#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */
82#define SNBEP_PMON_CTL_EN (1 << 22)
83#define SNBEP_PMON_CTL_INVERT (1 << 23)
84#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
85#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
86 SNBEP_PMON_CTL_UMASK_MASK | \
87 SNBEP_PMON_CTL_EDGE_DET | \
88 SNBEP_PMON_CTL_INVERT | \
89 SNBEP_PMON_CTL_TRESH_MASK)
90
91/* SNB-EP Ubox event control */
92#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
93#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
94 (SNBEP_PMON_CTL_EV_SEL_MASK | \
95 SNBEP_PMON_CTL_UMASK_MASK | \
96 SNBEP_PMON_CTL_EDGE_DET | \
97 SNBEP_PMON_CTL_INVERT | \
98 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
99
100/* SNB-EP PCU event control */
101#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
102#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
103#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
104#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
105#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
106 (SNBEP_PMON_CTL_EV_SEL_MASK | \
107 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
108 SNBEP_PMON_CTL_EDGE_DET | \
109 SNBEP_PMON_CTL_INVERT | \
110 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
111 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
112 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
113
114/* SNB-EP pci control register */
115#define SNBEP_PCI_PMON_BOX_CTL 0xf4
116#define SNBEP_PCI_PMON_CTL0 0xd8
117/* SNB-EP pci counter register */
118#define SNBEP_PCI_PMON_CTR0 0xa0
119
120/* SNB-EP home agent register */
121#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
122#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
123#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
124/* SNB-EP memory controller register */
125#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
126#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
127/* SNB-EP QPI register */
128#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
129#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
130#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
131#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
132
133/* SNB-EP Ubox register */
134#define SNBEP_U_MSR_PMON_CTR0 0xc16
135#define SNBEP_U_MSR_PMON_CTL0 0xc10
136
137#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
138#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
139
140/* SNB-EP Cbo register */
141#define SNBEP_C0_MSR_PMON_CTR0 0xd16
142#define SNBEP_C0_MSR_PMON_CTL0 0xd10
143#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
144#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
145#define SNBEP_CBO_MSR_OFFSET 0x20
146
147/* SNB-EP PCU register */
148#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
149#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
150#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
151#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
152#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
153#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
fcde10e9 154
087bfbb0
YZ
155struct intel_uncore_ops;
156struct intel_uncore_pmu;
157struct intel_uncore_box;
158struct uncore_event_desc;
159
160struct intel_uncore_type {
161 const char *name;
162 int num_counters;
163 int num_boxes;
164 int perf_ctr_bits;
165 int fixed_ctr_bits;
166 int single_fixed;
167 unsigned perf_ctr;
168 unsigned event_ctl;
169 unsigned event_mask;
170 unsigned fixed_ctr;
171 unsigned fixed_ctl;
172 unsigned box_ctl;
173 unsigned msr_offset;
174 struct event_constraint unconstrainted;
175 struct event_constraint *constraints;
176 struct intel_uncore_pmu *pmus;
177 struct intel_uncore_ops *ops;
178 struct uncore_event_desc *event_descs;
179 const struct attribute_group *attr_groups[3];
180};
181
182#define format_group attr_groups[0]
183
184struct intel_uncore_ops {
185 void (*init_box)(struct intel_uncore_box *);
186 void (*disable_box)(struct intel_uncore_box *);
187 void (*enable_box)(struct intel_uncore_box *);
188 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
189 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
190 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
191};
192
193struct intel_uncore_pmu {
194 struct pmu pmu;
195 char name[UNCORE_PMU_NAME_LEN];
196 int pmu_idx;
197 int func_id;
198 struct intel_uncore_type *type;
199 struct intel_uncore_box ** __percpu box;
14371cce 200 struct list_head box_list;
087bfbb0
YZ
201};
202
203struct intel_uncore_box {
204 int phys_id;
205 int n_active; /* number of active events */
206 int n_events;
207 int cpu; /* cpu to collect events */
208 unsigned long flags;
209 atomic_t refcnt;
210 struct perf_event *events[UNCORE_PMC_IDX_MAX];
211 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
212 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
213 u64 tags[UNCORE_PMC_IDX_MAX];
14371cce 214 struct pci_dev *pci_dev;
087bfbb0
YZ
215 struct intel_uncore_pmu *pmu;
216 struct hrtimer hrtimer;
217 struct list_head list;
218};
219
220#define UNCORE_BOX_FLAG_INITIATED 0
221
222struct uncore_event_desc {
223 struct kobj_attribute attr;
224 const char *config;
225};
226
227#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
228{ \
229 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
230 .config = _config, \
231}
232
233#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
234static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
235 struct kobj_attribute *attr, \
236 char *page) \
237{ \
238 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
239 return sprintf(page, _format "\n"); \
240} \
241static struct kobj_attribute format_attr_##_var = \
242 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
243
244
245static ssize_t uncore_event_show(struct kobject *kobj,
246 struct kobj_attribute *attr, char *buf)
247{
248 struct uncore_event_desc *event =
249 container_of(attr, struct uncore_event_desc, attr);
250 return sprintf(buf, "%s", event->config);
251}
252
14371cce
YZ
253static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
254{
255 return box->pmu->type->box_ctl;
256}
257
258static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
259{
260 return box->pmu->type->fixed_ctl;
261}
262
263static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
264{
265 return box->pmu->type->fixed_ctr;
266}
267
268static inline
269unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
270{
271 return idx * 4 + box->pmu->type->event_ctl;
272}
273
274static inline
275unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
276{
277 return idx * 8 + box->pmu->type->perf_ctr;
278}
279
087bfbb0
YZ
280static inline
281unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
282{
283 if (!box->pmu->type->box_ctl)
284 return 0;
285 return box->pmu->type->box_ctl +
286 box->pmu->type->msr_offset * box->pmu->pmu_idx;
287}
288
289static inline
290unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
291{
292 if (!box->pmu->type->fixed_ctl)
293 return 0;
294 return box->pmu->type->fixed_ctl +
295 box->pmu->type->msr_offset * box->pmu->pmu_idx;
296}
297
298static inline
299unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
300{
301 return box->pmu->type->fixed_ctr +
302 box->pmu->type->msr_offset * box->pmu->pmu_idx;
303}
304
305static inline
306unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
307{
308 return idx + box->pmu->type->event_ctl +
309 box->pmu->type->msr_offset * box->pmu->pmu_idx;
310}
311
312static inline
313unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
314{
315 return idx + box->pmu->type->perf_ctr +
316 box->pmu->type->msr_offset * box->pmu->pmu_idx;
317}
318
14371cce
YZ
319static inline
320unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
321{
322 if (box->pci_dev)
323 return uncore_pci_fixed_ctl(box);
324 else
325 return uncore_msr_fixed_ctl(box);
326}
327
328static inline
329unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
330{
331 if (box->pci_dev)
332 return uncore_pci_fixed_ctr(box);
333 else
334 return uncore_msr_fixed_ctr(box);
335}
336
337static inline
338unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
339{
340 if (box->pci_dev)
341 return uncore_pci_event_ctl(box, idx);
342 else
343 return uncore_msr_event_ctl(box, idx);
344}
345
346static inline
347unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
348{
349 if (box->pci_dev)
350 return uncore_pci_perf_ctr(box, idx);
351 else
352 return uncore_msr_perf_ctr(box, idx);
353}
354
087bfbb0
YZ
355static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
356{
357 return box->pmu->type->perf_ctr_bits;
358}
359
360static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
361{
362 return box->pmu->type->fixed_ctr_bits;
363}
364
365static inline int uncore_num_counters(struct intel_uncore_box *box)
366{
367 return box->pmu->type->num_counters;
368}
369
370static inline void uncore_disable_box(struct intel_uncore_box *box)
371{
372 if (box->pmu->type->ops->disable_box)
373 box->pmu->type->ops->disable_box(box);
374}
375
376static inline void uncore_enable_box(struct intel_uncore_box *box)
377{
378 if (box->pmu->type->ops->enable_box)
379 box->pmu->type->ops->enable_box(box);
380}
381
382static inline void uncore_disable_event(struct intel_uncore_box *box,
383 struct perf_event *event)
384{
385 box->pmu->type->ops->disable_event(box, event);
386}
387
388static inline void uncore_enable_event(struct intel_uncore_box *box,
389 struct perf_event *event)
390{
391 box->pmu->type->ops->enable_event(box, event);
392}
393
394static inline u64 uncore_read_counter(struct intel_uncore_box *box,
395 struct perf_event *event)
396{
397 return box->pmu->type->ops->read_counter(box, event);
398}
399
400static inline void uncore_box_init(struct intel_uncore_box *box)
401{
402 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
403 if (box->pmu->type->ops->init_box)
404 box->pmu->type->ops->init_box(box);
405 }
406}
This page took 0.04049 seconds and 5 git commands to generate.