1 #include "perf_event_intel_uncore.h"
3 static struct intel_uncore_type
*empty_uncore
[] = { NULL
, };
4 static struct intel_uncore_type
**msr_uncores
= empty_uncore
;
5 static struct intel_uncore_type
**pci_uncores
= empty_uncore
;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid
[256] = { [0 ... 255] = -1, };
9 static struct pci_dev
*extra_pci_dev
[UNCORE_SOCKET_MAX
][UNCORE_EXTRA_PCI_DEV_MAX
];
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock
);
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask
;
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed
=
18 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED
, ~0ULL);
19 static struct event_constraint constraint_empty
=
20 EVENT_CONSTRAINT(0, 0, 0);
22 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
25 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext
, event
, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en
, tid_en
, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5
, cmask
, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8
, cmask
, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8
, thresh
, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5
, thresh
, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel
, occ_sel
, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert
, occ_invert
, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge
, occ_edge
, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid
, filter_tid
, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link
, filter_link
, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid
, filter_nid
, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2
, filter_nid
, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state
, filter_state
, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2
, filter_state
, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc
, filter_opc
, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2
, filter_opc
, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0
, filter_band0
, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1
, filter_band1
, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2
, filter_band2
, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3
, filter_band3
, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds
, match_rds
, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30
, match_rnid30
, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4
, match_rnid4
, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid
, match_dnid
, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc
, match_mc
, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc
, match_opc
, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw
, match_vnw
, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0
, match0
, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1
, match1
, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds
, mask_rds
, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30
, mask_rnid30
, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4
, mask_rnid4
, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid
, mask_dnid
, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc
, mask_mc
, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc
, mask_opc
, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw
, mask_vnw
, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0
, mask0
, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1
, mask1
, "config2:32-63");
69 static void uncore_pmu_start_hrtimer(struct intel_uncore_box
*box
);
70 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box
*box
);
71 static void uncore_perf_event_update(struct intel_uncore_box
*box
, struct perf_event
*event
);
72 static void uncore_pmu_event_read(struct perf_event
*event
);
74 static struct intel_uncore_pmu
*uncore_event_to_pmu(struct perf_event
*event
)
76 return container_of(event
->pmu
, struct intel_uncore_pmu
, pmu
);
79 static struct intel_uncore_box
*
80 uncore_pmu_to_box(struct intel_uncore_pmu
*pmu
, int cpu
)
82 struct intel_uncore_box
*box
;
84 box
= *per_cpu_ptr(pmu
->box
, cpu
);
88 raw_spin_lock(&uncore_box_lock
);
89 list_for_each_entry(box
, &pmu
->box_list
, list
) {
90 if (box
->phys_id
== topology_physical_package_id(cpu
)) {
91 atomic_inc(&box
->refcnt
);
92 *per_cpu_ptr(pmu
->box
, cpu
) = box
;
96 raw_spin_unlock(&uncore_box_lock
);
98 return *per_cpu_ptr(pmu
->box
, cpu
);
101 static struct intel_uncore_box
*uncore_event_to_box(struct perf_event
*event
)
104 * perf core schedules event on the basis of cpu, uncore events are
105 * collected by one of the cpus inside a physical package.
107 return uncore_pmu_to_box(uncore_event_to_pmu(event
), smp_processor_id());
110 static u64
uncore_msr_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
114 rdmsrl(event
->hw
.event_base
, count
);
120 * generic get constraint function for shared match/mask registers.
122 static struct event_constraint
*
123 uncore_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
125 struct intel_uncore_extra_reg
*er
;
126 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
127 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
132 * reg->alloc can be set due to existing state, so for fake box we
133 * need to ignore this, otherwise we might fail to allocate proper
134 * fake state for this extra reg constraint.
136 if (reg1
->idx
== EXTRA_REG_NONE
||
137 (!uncore_box_is_fake(box
) && reg1
->alloc
))
140 er
= &box
->shared_regs
[reg1
->idx
];
141 raw_spin_lock_irqsave(&er
->lock
, flags
);
142 if (!atomic_read(&er
->ref
) ||
143 (er
->config1
== reg1
->config
&& er
->config2
== reg2
->config
)) {
144 atomic_inc(&er
->ref
);
145 er
->config1
= reg1
->config
;
146 er
->config2
= reg2
->config
;
149 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
152 if (!uncore_box_is_fake(box
))
157 return &constraint_empty
;
160 static void uncore_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
162 struct intel_uncore_extra_reg
*er
;
163 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
166 * Only put constraint if extra reg was actually allocated. Also
167 * takes care of event which do not use an extra shared reg.
169 * Also, if this is a fake box we shouldn't touch any event state
170 * (reg->alloc) and we don't care about leaving inconsistent box
171 * state either since it will be thrown out.
173 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
176 er
= &box
->shared_regs
[reg1
->idx
];
177 atomic_dec(&er
->ref
);
181 static u64
uncore_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
183 struct intel_uncore_extra_reg
*er
;
187 er
= &box
->shared_regs
[idx
];
189 raw_spin_lock_irqsave(&er
->lock
, flags
);
191 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
196 /* Sandy Bridge-EP uncore support */
197 static struct intel_uncore_type snbep_uncore_cbox
;
198 static struct intel_uncore_type snbep_uncore_pcu
;
200 static void snbep_uncore_pci_disable_box(struct intel_uncore_box
*box
)
202 struct pci_dev
*pdev
= box
->pci_dev
;
203 int box_ctl
= uncore_pci_box_ctl(box
);
206 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
207 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
208 pci_write_config_dword(pdev
, box_ctl
, config
);
212 static void snbep_uncore_pci_enable_box(struct intel_uncore_box
*box
)
214 struct pci_dev
*pdev
= box
->pci_dev
;
215 int box_ctl
= uncore_pci_box_ctl(box
);
218 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
219 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
220 pci_write_config_dword(pdev
, box_ctl
, config
);
224 static void snbep_uncore_pci_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
226 struct pci_dev
*pdev
= box
->pci_dev
;
227 struct hw_perf_event
*hwc
= &event
->hw
;
229 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
232 static void snbep_uncore_pci_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
234 struct pci_dev
*pdev
= box
->pci_dev
;
235 struct hw_perf_event
*hwc
= &event
->hw
;
237 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
);
240 static u64
snbep_uncore_pci_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
242 struct pci_dev
*pdev
= box
->pci_dev
;
243 struct hw_perf_event
*hwc
= &event
->hw
;
246 pci_read_config_dword(pdev
, hwc
->event_base
, (u32
*)&count
);
247 pci_read_config_dword(pdev
, hwc
->event_base
+ 4, (u32
*)&count
+ 1);
252 static void snbep_uncore_pci_init_box(struct intel_uncore_box
*box
)
254 struct pci_dev
*pdev
= box
->pci_dev
;
256 pci_write_config_dword(pdev
, SNBEP_PCI_PMON_BOX_CTL
, SNBEP_PMON_BOX_CTL_INT
);
259 static void snbep_uncore_msr_disable_box(struct intel_uncore_box
*box
)
264 msr
= uncore_msr_box_ctl(box
);
267 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
272 static void snbep_uncore_msr_enable_box(struct intel_uncore_box
*box
)
277 msr
= uncore_msr_box_ctl(box
);
280 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
285 static void snbep_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
287 struct hw_perf_event
*hwc
= &event
->hw
;
288 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
290 if (reg1
->idx
!= EXTRA_REG_NONE
)
291 wrmsrl(reg1
->reg
, uncore_shared_reg_config(box
, 0));
293 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
296 static void snbep_uncore_msr_disable_event(struct intel_uncore_box
*box
,
297 struct perf_event
*event
)
299 struct hw_perf_event
*hwc
= &event
->hw
;
301 wrmsrl(hwc
->config_base
, hwc
->config
);
304 static void snbep_uncore_msr_init_box(struct intel_uncore_box
*box
)
306 unsigned msr
= uncore_msr_box_ctl(box
);
309 wrmsrl(msr
, SNBEP_PMON_BOX_CTL_INT
);
312 static struct attribute
*snbep_uncore_formats_attr
[] = {
313 &format_attr_event
.attr
,
314 &format_attr_umask
.attr
,
315 &format_attr_edge
.attr
,
316 &format_attr_inv
.attr
,
317 &format_attr_thresh8
.attr
,
321 static struct attribute
*snbep_uncore_ubox_formats_attr
[] = {
322 &format_attr_event
.attr
,
323 &format_attr_umask
.attr
,
324 &format_attr_edge
.attr
,
325 &format_attr_inv
.attr
,
326 &format_attr_thresh5
.attr
,
330 static struct attribute
*snbep_uncore_cbox_formats_attr
[] = {
331 &format_attr_event
.attr
,
332 &format_attr_umask
.attr
,
333 &format_attr_edge
.attr
,
334 &format_attr_tid_en
.attr
,
335 &format_attr_inv
.attr
,
336 &format_attr_thresh8
.attr
,
337 &format_attr_filter_tid
.attr
,
338 &format_attr_filter_nid
.attr
,
339 &format_attr_filter_state
.attr
,
340 &format_attr_filter_opc
.attr
,
344 static struct attribute
*snbep_uncore_pcu_formats_attr
[] = {
345 &format_attr_event_ext
.attr
,
346 &format_attr_occ_sel
.attr
,
347 &format_attr_edge
.attr
,
348 &format_attr_inv
.attr
,
349 &format_attr_thresh5
.attr
,
350 &format_attr_occ_invert
.attr
,
351 &format_attr_occ_edge
.attr
,
352 &format_attr_filter_band0
.attr
,
353 &format_attr_filter_band1
.attr
,
354 &format_attr_filter_band2
.attr
,
355 &format_attr_filter_band3
.attr
,
359 static struct attribute
*snbep_uncore_qpi_formats_attr
[] = {
360 &format_attr_event_ext
.attr
,
361 &format_attr_umask
.attr
,
362 &format_attr_edge
.attr
,
363 &format_attr_inv
.attr
,
364 &format_attr_thresh8
.attr
,
365 &format_attr_match_rds
.attr
,
366 &format_attr_match_rnid30
.attr
,
367 &format_attr_match_rnid4
.attr
,
368 &format_attr_match_dnid
.attr
,
369 &format_attr_match_mc
.attr
,
370 &format_attr_match_opc
.attr
,
371 &format_attr_match_vnw
.attr
,
372 &format_attr_match0
.attr
,
373 &format_attr_match1
.attr
,
374 &format_attr_mask_rds
.attr
,
375 &format_attr_mask_rnid30
.attr
,
376 &format_attr_mask_rnid4
.attr
,
377 &format_attr_mask_dnid
.attr
,
378 &format_attr_mask_mc
.attr
,
379 &format_attr_mask_opc
.attr
,
380 &format_attr_mask_vnw
.attr
,
381 &format_attr_mask0
.attr
,
382 &format_attr_mask1
.attr
,
386 static struct uncore_event_desc snbep_uncore_imc_events
[] = {
387 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
388 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x03"),
389 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x0c"),
390 { /* end: all zeroes */ },
393 static struct uncore_event_desc snbep_uncore_qpi_events
[] = {
394 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x14"),
395 INTEL_UNCORE_EVENT_DESC(txl_flits_active
, "event=0x00,umask=0x06"),
396 INTEL_UNCORE_EVENT_DESC(drs_data
, "event=0x102,umask=0x08"),
397 INTEL_UNCORE_EVENT_DESC(ncb_data
, "event=0x103,umask=0x04"),
398 { /* end: all zeroes */ },
401 static struct attribute_group snbep_uncore_format_group
= {
403 .attrs
= snbep_uncore_formats_attr
,
406 static struct attribute_group snbep_uncore_ubox_format_group
= {
408 .attrs
= snbep_uncore_ubox_formats_attr
,
411 static struct attribute_group snbep_uncore_cbox_format_group
= {
413 .attrs
= snbep_uncore_cbox_formats_attr
,
416 static struct attribute_group snbep_uncore_pcu_format_group
= {
418 .attrs
= snbep_uncore_pcu_formats_attr
,
421 static struct attribute_group snbep_uncore_qpi_format_group
= {
423 .attrs
= snbep_uncore_qpi_formats_attr
,
426 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
427 .init_box = snbep_uncore_msr_init_box, \
428 .disable_box = snbep_uncore_msr_disable_box, \
429 .enable_box = snbep_uncore_msr_enable_box, \
430 .disable_event = snbep_uncore_msr_disable_event, \
431 .enable_event = snbep_uncore_msr_enable_event, \
432 .read_counter = uncore_msr_read_counter
434 static struct intel_uncore_ops snbep_uncore_msr_ops
= {
435 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
438 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
439 .init_box = snbep_uncore_pci_init_box, \
440 .disable_box = snbep_uncore_pci_disable_box, \
441 .enable_box = snbep_uncore_pci_enable_box, \
442 .disable_event = snbep_uncore_pci_disable_event, \
443 .read_counter = snbep_uncore_pci_read_counter
445 static struct intel_uncore_ops snbep_uncore_pci_ops
= {
446 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
447 .enable_event
= snbep_uncore_pci_enable_event
, \
450 static struct event_constraint snbep_uncore_cbox_constraints
[] = {
451 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
452 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
453 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
454 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
455 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
456 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
457 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
458 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
459 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
460 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
461 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
462 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
463 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
464 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
465 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
466 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
469 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
470 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
471 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
472 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
473 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
474 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
480 static struct event_constraint snbep_uncore_r2pcie_constraints
[] = {
481 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
482 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
483 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
484 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
485 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
486 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
487 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
488 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
489 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
490 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
494 static struct event_constraint snbep_uncore_r3qpi_constraints
[] = {
495 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
496 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
497 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
498 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
499 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
500 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
501 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
502 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
503 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
504 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
505 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
506 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
507 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
508 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
509 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
510 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
511 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
512 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
513 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
514 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
515 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
516 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
517 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
518 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
519 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
520 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
521 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
522 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
526 static struct intel_uncore_type snbep_uncore_ubox
= {
531 .fixed_ctr_bits
= 48,
532 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
533 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
534 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
535 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
536 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
537 .ops
= &snbep_uncore_msr_ops
,
538 .format_group
= &snbep_uncore_ubox_format_group
,
541 static struct extra_reg snbep_uncore_cbox_extra_regs
[] = {
542 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
543 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
544 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
545 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
546 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
547 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
548 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
549 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
566 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
570 static void snbep_cbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
572 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
573 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
576 if (uncore_box_is_fake(box
))
579 for (i
= 0; i
< 5; i
++) {
580 if (reg1
->alloc
& (0x1 << i
))
581 atomic_sub(1 << (i
* 6), &er
->ref
);
586 static struct event_constraint
*
587 __snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
,
588 u64 (*cbox_filter_mask
)(int fields
))
590 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
591 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
596 if (reg1
->idx
== EXTRA_REG_NONE
)
599 raw_spin_lock_irqsave(&er
->lock
, flags
);
600 for (i
= 0; i
< 5; i
++) {
601 if (!(reg1
->idx
& (0x1 << i
)))
603 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
606 mask
= cbox_filter_mask(0x1 << i
);
607 if (!__BITS_VALUE(atomic_read(&er
->ref
), i
, 6) ||
608 !((reg1
->config
^ er
->config
) & mask
)) {
609 atomic_add(1 << (i
* 6), &er
->ref
);
611 er
->config
|= reg1
->config
& mask
;
617 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
621 if (!uncore_box_is_fake(box
))
622 reg1
->alloc
|= alloc
;
626 for (; i
>= 0; i
--) {
627 if (alloc
& (0x1 << i
))
628 atomic_sub(1 << (i
* 6), &er
->ref
);
630 return &constraint_empty
;
633 static u64
snbep_cbox_filter_mask(int fields
)
638 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID
;
640 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID
;
642 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
644 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
649 static struct event_constraint
*
650 snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
652 return __snbep_cbox_get_constraint(box
, event
, snbep_cbox_filter_mask
);
655 static int snbep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
657 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
658 struct extra_reg
*er
;
661 for (er
= snbep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
662 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
668 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
669 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
670 reg1
->config
= event
->attr
.config1
& snbep_cbox_filter_mask(idx
);
676 static struct intel_uncore_ops snbep_uncore_cbox_ops
= {
677 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
678 .hw_config
= snbep_cbox_hw_config
,
679 .get_constraint
= snbep_cbox_get_constraint
,
680 .put_constraint
= snbep_cbox_put_constraint
,
683 static struct intel_uncore_type snbep_uncore_cbox
= {
688 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
689 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
690 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
691 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
692 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
693 .num_shared_regs
= 1,
694 .constraints
= snbep_uncore_cbox_constraints
,
695 .ops
= &snbep_uncore_cbox_ops
,
696 .format_group
= &snbep_uncore_cbox_format_group
,
699 static u64
snbep_pcu_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
701 struct hw_perf_event
*hwc
= &event
->hw
;
702 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
703 u64 config
= reg1
->config
;
705 if (new_idx
> reg1
->idx
)
706 config
<<= 8 * (new_idx
- reg1
->idx
);
708 config
>>= 8 * (reg1
->idx
- new_idx
);
711 hwc
->config
+= new_idx
- reg1
->idx
;
712 reg1
->config
= config
;
718 static struct event_constraint
*
719 snbep_pcu_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
721 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
722 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
725 u64 mask
, config1
= reg1
->config
;
728 if (reg1
->idx
== EXTRA_REG_NONE
||
729 (!uncore_box_is_fake(box
) && reg1
->alloc
))
732 mask
= 0xffULL
<< (idx
* 8);
733 raw_spin_lock_irqsave(&er
->lock
, flags
);
734 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8) ||
735 !((config1
^ er
->config
) & mask
)) {
736 atomic_add(1 << (idx
* 8), &er
->ref
);
738 er
->config
|= config1
& mask
;
741 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
745 if (idx
!= reg1
->idx
) {
746 config1
= snbep_pcu_alter_er(event
, idx
, false);
749 return &constraint_empty
;
752 if (!uncore_box_is_fake(box
)) {
753 if (idx
!= reg1
->idx
)
754 snbep_pcu_alter_er(event
, idx
, true);
760 static void snbep_pcu_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
762 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
763 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
765 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
768 atomic_sub(1 << (reg1
->idx
* 8), &er
->ref
);
772 static int snbep_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
774 struct hw_perf_event
*hwc
= &event
->hw
;
775 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
776 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
778 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
779 reg1
->reg
= SNBEP_PCU_MSR_PMON_BOX_FILTER
;
780 reg1
->idx
= ev_sel
- 0xb;
781 reg1
->config
= event
->attr
.config1
& (0xff << reg1
->idx
);
786 static struct intel_uncore_ops snbep_uncore_pcu_ops
= {
787 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
788 .hw_config
= snbep_pcu_hw_config
,
789 .get_constraint
= snbep_pcu_get_constraint
,
790 .put_constraint
= snbep_pcu_put_constraint
,
793 static struct intel_uncore_type snbep_uncore_pcu
= {
798 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
799 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
800 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
801 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
802 .num_shared_regs
= 1,
803 .ops
= &snbep_uncore_pcu_ops
,
804 .format_group
= &snbep_uncore_pcu_format_group
,
807 static struct intel_uncore_type
*snbep_msr_uncores
[] = {
815 SNBEP_PCI_QPI_PORT0_FILTER
,
816 SNBEP_PCI_QPI_PORT1_FILTER
,
819 static int snbep_qpi_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
821 struct hw_perf_event
*hwc
= &event
->hw
;
822 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
823 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
825 if ((hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
) == 0x38) {
827 reg1
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MATCH0
;
828 reg1
->config
= event
->attr
.config1
;
829 reg2
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MASK0
;
830 reg2
->config
= event
->attr
.config2
;
835 static void snbep_qpi_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
837 struct pci_dev
*pdev
= box
->pci_dev
;
838 struct hw_perf_event
*hwc
= &event
->hw
;
839 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
840 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
842 if (reg1
->idx
!= EXTRA_REG_NONE
) {
843 int idx
= box
->pmu
->pmu_idx
+ SNBEP_PCI_QPI_PORT0_FILTER
;
844 struct pci_dev
*filter_pdev
= extra_pci_dev
[box
->phys_id
][idx
];
845 WARN_ON_ONCE(!filter_pdev
);
847 pci_write_config_dword(filter_pdev
, reg1
->reg
,
849 pci_write_config_dword(filter_pdev
, reg1
->reg
+ 4,
850 (u32
)(reg1
->config
>> 32));
851 pci_write_config_dword(filter_pdev
, reg2
->reg
,
853 pci_write_config_dword(filter_pdev
, reg2
->reg
+ 4,
854 (u32
)(reg2
->config
>> 32));
858 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
861 static struct intel_uncore_ops snbep_uncore_qpi_ops
= {
862 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
863 .enable_event
= snbep_qpi_enable_event
,
864 .hw_config
= snbep_qpi_hw_config
,
865 .get_constraint
= uncore_get_constraint
,
866 .put_constraint
= uncore_put_constraint
,
869 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
870 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
871 .event_ctl = SNBEP_PCI_PMON_CTL0, \
872 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
873 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
874 .ops = &snbep_uncore_pci_ops, \
875 .format_group = &snbep_uncore_format_group
877 static struct intel_uncore_type snbep_uncore_ha
= {
882 SNBEP_UNCORE_PCI_COMMON_INIT(),
885 static struct intel_uncore_type snbep_uncore_imc
= {
890 .fixed_ctr_bits
= 48,
891 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
892 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
893 .event_descs
= snbep_uncore_imc_events
,
894 SNBEP_UNCORE_PCI_COMMON_INIT(),
897 static struct intel_uncore_type snbep_uncore_qpi
= {
902 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
903 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
904 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
905 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
906 .num_shared_regs
= 1,
907 .ops
= &snbep_uncore_qpi_ops
,
908 .event_descs
= snbep_uncore_qpi_events
,
909 .format_group
= &snbep_uncore_qpi_format_group
,
913 static struct intel_uncore_type snbep_uncore_r2pcie
= {
918 .constraints
= snbep_uncore_r2pcie_constraints
,
919 SNBEP_UNCORE_PCI_COMMON_INIT(),
922 static struct intel_uncore_type snbep_uncore_r3qpi
= {
927 .constraints
= snbep_uncore_r3qpi_constraints
,
928 SNBEP_UNCORE_PCI_COMMON_INIT(),
933 SNBEP_PCI_UNCORE_IMC
,
934 SNBEP_PCI_UNCORE_QPI
,
935 SNBEP_PCI_UNCORE_R2PCIE
,
936 SNBEP_PCI_UNCORE_R3QPI
,
939 static struct intel_uncore_type
*snbep_pci_uncores
[] = {
940 [SNBEP_PCI_UNCORE_HA
] = &snbep_uncore_ha
,
941 [SNBEP_PCI_UNCORE_IMC
] = &snbep_uncore_imc
,
942 [SNBEP_PCI_UNCORE_QPI
] = &snbep_uncore_qpi
,
943 [SNBEP_PCI_UNCORE_R2PCIE
] = &snbep_uncore_r2pcie
,
944 [SNBEP_PCI_UNCORE_R3QPI
] = &snbep_uncore_r3qpi
,
948 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids
) = {
950 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_HA
),
951 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA
, 0),
954 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC0
),
955 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 0),
958 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC1
),
959 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 1),
962 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC2
),
963 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 2),
966 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC3
),
967 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 3),
970 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI0
),
971 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 0),
974 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI1
),
975 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 1),
978 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R2PCIE
),
979 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE
, 0),
982 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI0
),
983 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 0),
986 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI1
),
987 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 1),
989 { /* QPI Port 0 filter */
990 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c86),
991 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
992 SNBEP_PCI_QPI_PORT0_FILTER
),
994 { /* QPI Port 0 filter */
995 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c96),
996 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
997 SNBEP_PCI_QPI_PORT1_FILTER
),
999 { /* end: all zeroes */ }
1002 static struct pci_driver snbep_uncore_pci_driver
= {
1003 .name
= "snbep_uncore",
1004 .id_table
= snbep_uncore_pci_ids
,
1008 * build pci bus to socket mapping
1010 static int snbep_pci2phy_map_init(int devid
)
1012 struct pci_dev
*ubox_dev
= NULL
;
1018 /* find the UBOX device */
1019 ubox_dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, ubox_dev
);
1022 bus
= ubox_dev
->bus
->number
;
1023 /* get the Node ID of the local register */
1024 err
= pci_read_config_dword(ubox_dev
, 0x40, &config
);
1028 /* get the Node ID mapping */
1029 err
= pci_read_config_dword(ubox_dev
, 0x54, &config
);
1033 * every three bits in the Node ID mapping register maps
1034 * to a particular node.
1036 for (i
= 0; i
< 8; i
++) {
1037 if (nodeid
== ((config
>> (3 * i
)) & 0x7)) {
1038 pcibus_to_physid
[bus
] = i
;
1046 * For PCI bus with no UBOX device, find the next bus
1047 * that has UBOX device and use its mapping.
1050 for (bus
= 255; bus
>= 0; bus
--) {
1051 if (pcibus_to_physid
[bus
] >= 0)
1052 i
= pcibus_to_physid
[bus
];
1054 pcibus_to_physid
[bus
] = i
;
1059 pci_dev_put(ubox_dev
);
1061 return err
? pcibios_err_to_errno(err
) : 0;
1063 /* end of Sandy Bridge-EP uncore support */
1065 /* IvyTown uncore support */
1066 static void ivt_uncore_msr_init_box(struct intel_uncore_box
*box
)
1068 unsigned msr
= uncore_msr_box_ctl(box
);
1070 wrmsrl(msr
, IVT_PMON_BOX_CTL_INT
);
1073 static void ivt_uncore_pci_init_box(struct intel_uncore_box
*box
)
1075 struct pci_dev
*pdev
= box
->pci_dev
;
1077 pci_write_config_dword(pdev
, SNBEP_PCI_PMON_BOX_CTL
, IVT_PMON_BOX_CTL_INT
);
1080 #define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1081 .init_box = ivt_uncore_msr_init_box, \
1082 .disable_box = snbep_uncore_msr_disable_box, \
1083 .enable_box = snbep_uncore_msr_enable_box, \
1084 .disable_event = snbep_uncore_msr_disable_event, \
1085 .enable_event = snbep_uncore_msr_enable_event, \
1086 .read_counter = uncore_msr_read_counter
1088 static struct intel_uncore_ops ivt_uncore_msr_ops
= {
1089 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1092 static struct intel_uncore_ops ivt_uncore_pci_ops
= {
1093 .init_box
= ivt_uncore_pci_init_box
,
1094 .disable_box
= snbep_uncore_pci_disable_box
,
1095 .enable_box
= snbep_uncore_pci_enable_box
,
1096 .disable_event
= snbep_uncore_pci_disable_event
,
1097 .enable_event
= snbep_uncore_pci_enable_event
,
1098 .read_counter
= snbep_uncore_pci_read_counter
,
1101 #define IVT_UNCORE_PCI_COMMON_INIT() \
1102 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1103 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1104 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1105 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1106 .ops = &ivt_uncore_pci_ops, \
1107 .format_group = &ivt_uncore_format_group
1109 static struct attribute
*ivt_uncore_formats_attr
[] = {
1110 &format_attr_event
.attr
,
1111 &format_attr_umask
.attr
,
1112 &format_attr_edge
.attr
,
1113 &format_attr_inv
.attr
,
1114 &format_attr_thresh8
.attr
,
1118 static struct attribute
*ivt_uncore_ubox_formats_attr
[] = {
1119 &format_attr_event
.attr
,
1120 &format_attr_umask
.attr
,
1121 &format_attr_edge
.attr
,
1122 &format_attr_inv
.attr
,
1123 &format_attr_thresh5
.attr
,
1127 static struct attribute
*ivt_uncore_cbox_formats_attr
[] = {
1128 &format_attr_event
.attr
,
1129 &format_attr_umask
.attr
,
1130 &format_attr_edge
.attr
,
1131 &format_attr_tid_en
.attr
,
1132 &format_attr_thresh8
.attr
,
1133 &format_attr_filter_tid
.attr
,
1134 &format_attr_filter_link
.attr
,
1135 &format_attr_filter_state2
.attr
,
1136 &format_attr_filter_nid2
.attr
,
1137 &format_attr_filter_opc2
.attr
,
1141 static struct attribute
*ivt_uncore_pcu_formats_attr
[] = {
1142 &format_attr_event_ext
.attr
,
1143 &format_attr_occ_sel
.attr
,
1144 &format_attr_edge
.attr
,
1145 &format_attr_thresh5
.attr
,
1146 &format_attr_occ_invert
.attr
,
1147 &format_attr_occ_edge
.attr
,
1148 &format_attr_filter_band0
.attr
,
1149 &format_attr_filter_band1
.attr
,
1150 &format_attr_filter_band2
.attr
,
1151 &format_attr_filter_band3
.attr
,
1155 static struct attribute
*ivt_uncore_qpi_formats_attr
[] = {
1156 &format_attr_event_ext
.attr
,
1157 &format_attr_umask
.attr
,
1158 &format_attr_edge
.attr
,
1159 &format_attr_thresh8
.attr
,
1160 &format_attr_match_rds
.attr
,
1161 &format_attr_match_rnid30
.attr
,
1162 &format_attr_match_rnid4
.attr
,
1163 &format_attr_match_dnid
.attr
,
1164 &format_attr_match_mc
.attr
,
1165 &format_attr_match_opc
.attr
,
1166 &format_attr_match_vnw
.attr
,
1167 &format_attr_match0
.attr
,
1168 &format_attr_match1
.attr
,
1169 &format_attr_mask_rds
.attr
,
1170 &format_attr_mask_rnid30
.attr
,
1171 &format_attr_mask_rnid4
.attr
,
1172 &format_attr_mask_dnid
.attr
,
1173 &format_attr_mask_mc
.attr
,
1174 &format_attr_mask_opc
.attr
,
1175 &format_attr_mask_vnw
.attr
,
1176 &format_attr_mask0
.attr
,
1177 &format_attr_mask1
.attr
,
1181 static struct attribute_group ivt_uncore_format_group
= {
1183 .attrs
= ivt_uncore_formats_attr
,
1186 static struct attribute_group ivt_uncore_ubox_format_group
= {
1188 .attrs
= ivt_uncore_ubox_formats_attr
,
1191 static struct attribute_group ivt_uncore_cbox_format_group
= {
1193 .attrs
= ivt_uncore_cbox_formats_attr
,
1196 static struct attribute_group ivt_uncore_pcu_format_group
= {
1198 .attrs
= ivt_uncore_pcu_formats_attr
,
1201 static struct attribute_group ivt_uncore_qpi_format_group
= {
1203 .attrs
= ivt_uncore_qpi_formats_attr
,
1206 static struct intel_uncore_type ivt_uncore_ubox
= {
1210 .perf_ctr_bits
= 44,
1211 .fixed_ctr_bits
= 48,
1212 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
1213 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
1214 .event_mask
= IVT_U_MSR_PMON_RAW_EVENT_MASK
,
1215 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
1216 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
1217 .ops
= &ivt_uncore_msr_ops
,
1218 .format_group
= &ivt_uncore_ubox_format_group
,
1221 static struct extra_reg ivt_uncore_cbox_extra_regs
[] = {
1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
1223 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1228 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1229 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1230 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1231 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1232 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1233 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1234 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1235 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1236 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1237 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1238 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1239 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1240 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1241 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1242 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1243 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1244 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1245 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1246 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1248 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1252 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1253 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1254 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1255 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1256 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1257 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1258 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1262 static u64
ivt_cbox_filter_mask(int fields
)
1267 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_TID
;
1269 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_LINK
;
1271 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_STATE
;
1273 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_NID
;
1275 mask
|= IVT_CB0_MSR_PMON_BOX_FILTER_OPC
;
1280 static struct event_constraint
*
1281 ivt_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1283 return __snbep_cbox_get_constraint(box
, event
, ivt_cbox_filter_mask
);
1286 static int ivt_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1288 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1289 struct extra_reg
*er
;
1292 for (er
= ivt_uncore_cbox_extra_regs
; er
->msr
; er
++) {
1293 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
1299 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
1300 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
1301 reg1
->config
= event
->attr
.config1
& ivt_cbox_filter_mask(idx
);
1307 static void ivt_cbox_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1309 struct hw_perf_event
*hwc
= &event
->hw
;
1310 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1312 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1313 u64 filter
= uncore_shared_reg_config(box
, 0);
1314 wrmsrl(reg1
->reg
, filter
& 0xffffffff);
1315 wrmsrl(reg1
->reg
+ 6, filter
>> 32);
1318 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
1321 static struct intel_uncore_ops ivt_uncore_cbox_ops
= {
1322 .init_box
= ivt_uncore_msr_init_box
,
1323 .disable_box
= snbep_uncore_msr_disable_box
,
1324 .enable_box
= snbep_uncore_msr_enable_box
,
1325 .disable_event
= snbep_uncore_msr_disable_event
,
1326 .enable_event
= ivt_cbox_enable_event
,
1327 .read_counter
= uncore_msr_read_counter
,
1328 .hw_config
= ivt_cbox_hw_config
,
1329 .get_constraint
= ivt_cbox_get_constraint
,
1330 .put_constraint
= snbep_cbox_put_constraint
,
1333 static struct intel_uncore_type ivt_uncore_cbox
= {
1337 .perf_ctr_bits
= 44,
1338 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
1339 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
1340 .event_mask
= IVT_CBO_MSR_PMON_RAW_EVENT_MASK
,
1341 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
1342 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
1343 .num_shared_regs
= 1,
1344 .constraints
= snbep_uncore_cbox_constraints
,
1345 .ops
= &ivt_uncore_cbox_ops
,
1346 .format_group
= &ivt_uncore_cbox_format_group
,
1349 static struct intel_uncore_ops ivt_uncore_pcu_ops
= {
1350 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1351 .hw_config
= snbep_pcu_hw_config
,
1352 .get_constraint
= snbep_pcu_get_constraint
,
1353 .put_constraint
= snbep_pcu_put_constraint
,
1356 static struct intel_uncore_type ivt_uncore_pcu
= {
1360 .perf_ctr_bits
= 48,
1361 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
1362 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
1363 .event_mask
= IVT_PCU_MSR_PMON_RAW_EVENT_MASK
,
1364 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
1365 .num_shared_regs
= 1,
1366 .ops
= &ivt_uncore_pcu_ops
,
1367 .format_group
= &ivt_uncore_pcu_format_group
,
1370 static struct intel_uncore_type
*ivt_msr_uncores
[] = {
1377 static struct intel_uncore_type ivt_uncore_ha
= {
1381 .perf_ctr_bits
= 48,
1382 IVT_UNCORE_PCI_COMMON_INIT(),
1385 static struct intel_uncore_type ivt_uncore_imc
= {
1389 .perf_ctr_bits
= 48,
1390 .fixed_ctr_bits
= 48,
1391 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
1392 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
1393 IVT_UNCORE_PCI_COMMON_INIT(),
1396 /* registers in IRP boxes are not properly aligned */
1397 static unsigned ivt_uncore_irp_ctls
[] = {0xd8, 0xdc, 0xe0, 0xe4};
1398 static unsigned ivt_uncore_irp_ctrs
[] = {0xa0, 0xb0, 0xb8, 0xc0};
1400 static void ivt_uncore_irp_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1402 struct pci_dev
*pdev
= box
->pci_dev
;
1403 struct hw_perf_event
*hwc
= &event
->hw
;
1405 pci_write_config_dword(pdev
, ivt_uncore_irp_ctls
[hwc
->idx
],
1406 hwc
->config
| SNBEP_PMON_CTL_EN
);
1409 static void ivt_uncore_irp_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1411 struct pci_dev
*pdev
= box
->pci_dev
;
1412 struct hw_perf_event
*hwc
= &event
->hw
;
1414 pci_write_config_dword(pdev
, ivt_uncore_irp_ctls
[hwc
->idx
], hwc
->config
);
1417 static u64
ivt_uncore_irp_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
1419 struct pci_dev
*pdev
= box
->pci_dev
;
1420 struct hw_perf_event
*hwc
= &event
->hw
;
1423 pci_read_config_dword(pdev
, ivt_uncore_irp_ctrs
[hwc
->idx
], (u32
*)&count
);
1424 pci_read_config_dword(pdev
, ivt_uncore_irp_ctrs
[hwc
->idx
] + 4, (u32
*)&count
+ 1);
1429 static struct intel_uncore_ops ivt_uncore_irp_ops
= {
1430 .init_box
= ivt_uncore_pci_init_box
,
1431 .disable_box
= snbep_uncore_pci_disable_box
,
1432 .enable_box
= snbep_uncore_pci_enable_box
,
1433 .disable_event
= ivt_uncore_irp_disable_event
,
1434 .enable_event
= ivt_uncore_irp_enable_event
,
1435 .read_counter
= ivt_uncore_irp_read_counter
,
1438 static struct intel_uncore_type ivt_uncore_irp
= {
1442 .perf_ctr_bits
= 48,
1443 .event_mask
= IVT_PMON_RAW_EVENT_MASK
,
1444 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1445 .ops
= &ivt_uncore_irp_ops
,
1446 .format_group
= &ivt_uncore_format_group
,
1449 static struct intel_uncore_ops ivt_uncore_qpi_ops
= {
1450 .init_box
= ivt_uncore_pci_init_box
,
1451 .disable_box
= snbep_uncore_pci_disable_box
,
1452 .enable_box
= snbep_uncore_pci_enable_box
,
1453 .disable_event
= snbep_uncore_pci_disable_event
,
1454 .enable_event
= snbep_qpi_enable_event
,
1455 .read_counter
= snbep_uncore_pci_read_counter
,
1456 .hw_config
= snbep_qpi_hw_config
,
1457 .get_constraint
= uncore_get_constraint
,
1458 .put_constraint
= uncore_put_constraint
,
1461 static struct intel_uncore_type ivt_uncore_qpi
= {
1465 .perf_ctr_bits
= 48,
1466 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
1467 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
1468 .event_mask
= IVT_QPI_PCI_PMON_RAW_EVENT_MASK
,
1469 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1470 .num_shared_regs
= 1,
1471 .ops
= &ivt_uncore_qpi_ops
,
1472 .format_group
= &ivt_uncore_qpi_format_group
,
1475 static struct intel_uncore_type ivt_uncore_r2pcie
= {
1479 .perf_ctr_bits
= 44,
1480 .constraints
= snbep_uncore_r2pcie_constraints
,
1481 IVT_UNCORE_PCI_COMMON_INIT(),
1484 static struct intel_uncore_type ivt_uncore_r3qpi
= {
1488 .perf_ctr_bits
= 44,
1489 .constraints
= snbep_uncore_r3qpi_constraints
,
1490 IVT_UNCORE_PCI_COMMON_INIT(),
1498 IVT_PCI_UNCORE_R2PCIE
,
1499 IVT_PCI_UNCORE_R3QPI
,
1502 static struct intel_uncore_type
*ivt_pci_uncores
[] = {
1503 [IVT_PCI_UNCORE_HA
] = &ivt_uncore_ha
,
1504 [IVT_PCI_UNCORE_IMC
] = &ivt_uncore_imc
,
1505 [IVT_PCI_UNCORE_IRP
] = &ivt_uncore_irp
,
1506 [IVT_PCI_UNCORE_QPI
] = &ivt_uncore_qpi
,
1507 [IVT_PCI_UNCORE_R2PCIE
] = &ivt_uncore_r2pcie
,
1508 [IVT_PCI_UNCORE_R3QPI
] = &ivt_uncore_r3qpi
,
1512 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids
) = {
1513 { /* Home Agent 0 */
1514 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe30),
1515 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA
, 0),
1517 { /* Home Agent 1 */
1518 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe38),
1519 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA
, 1),
1521 { /* MC0 Channel 0 */
1522 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb4),
1523 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 0),
1525 { /* MC0 Channel 1 */
1526 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb5),
1527 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 1),
1529 { /* MC0 Channel 3 */
1530 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb0),
1531 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 2),
1533 { /* MC0 Channel 4 */
1534 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb1),
1535 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 3),
1537 { /* MC1 Channel 0 */
1538 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef4),
1539 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 4),
1541 { /* MC1 Channel 1 */
1542 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef5),
1543 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 5),
1545 { /* MC1 Channel 3 */
1546 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef0),
1547 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 6),
1549 { /* MC1 Channel 4 */
1550 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef1),
1551 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC
, 7),
1554 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe39),
1555 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP
, 0),
1558 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe32),
1559 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI
, 0),
1562 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe33),
1563 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI
, 1),
1566 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3a),
1567 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI
, 2),
1570 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe34),
1571 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE
, 0),
1573 { /* R3QPI0 Link 0 */
1574 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe36),
1575 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI
, 0),
1577 { /* R3QPI0 Link 1 */
1578 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe37),
1579 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI
, 1),
1581 { /* R3QPI1 Link 2 */
1582 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3e),
1583 .driver_data
= UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI
, 2),
1585 { /* QPI Port 0 filter */
1586 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe86),
1587 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1588 SNBEP_PCI_QPI_PORT0_FILTER
),
1590 { /* QPI Port 0 filter */
1591 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe96),
1592 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1593 SNBEP_PCI_QPI_PORT1_FILTER
),
1595 { /* end: all zeroes */ }
1598 static struct pci_driver ivt_uncore_pci_driver
= {
1599 .name
= "ivt_uncore",
1600 .id_table
= ivt_uncore_pci_ids
,
1602 /* end of IvyTown uncore support */
1604 /* Sandy Bridge uncore support */
1605 static void snb_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1607 struct hw_perf_event
*hwc
= &event
->hw
;
1609 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
1610 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
1612 wrmsrl(hwc
->config_base
, SNB_UNC_CTL_EN
);
1615 static void snb_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1617 wrmsrl(event
->hw
.config_base
, 0);
1620 static void snb_uncore_msr_init_box(struct intel_uncore_box
*box
)
1622 if (box
->pmu
->pmu_idx
== 0) {
1623 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
,
1624 SNB_UNC_GLOBAL_CTL_EN
| SNB_UNC_GLOBAL_CTL_CORE_ALL
);
1628 static struct uncore_event_desc snb_uncore_events
[] = {
1629 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
1630 { /* end: all zeroes */ },
1633 static struct attribute
*snb_uncore_formats_attr
[] = {
1634 &format_attr_event
.attr
,
1635 &format_attr_umask
.attr
,
1636 &format_attr_edge
.attr
,
1637 &format_attr_inv
.attr
,
1638 &format_attr_cmask5
.attr
,
1642 static struct attribute_group snb_uncore_format_group
= {
1644 .attrs
= snb_uncore_formats_attr
,
1647 static struct intel_uncore_ops snb_uncore_msr_ops
= {
1648 .init_box
= snb_uncore_msr_init_box
,
1649 .disable_event
= snb_uncore_msr_disable_event
,
1650 .enable_event
= snb_uncore_msr_enable_event
,
1651 .read_counter
= uncore_msr_read_counter
,
1654 static struct event_constraint snb_uncore_cbox_constraints
[] = {
1655 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1656 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1657 EVENT_CONSTRAINT_END
1660 static struct intel_uncore_type snb_uncore_cbox
= {
1664 .perf_ctr_bits
= 44,
1665 .fixed_ctr_bits
= 48,
1666 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
1667 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
1668 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
1669 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
1671 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
1672 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
1673 .constraints
= snb_uncore_cbox_constraints
,
1674 .ops
= &snb_uncore_msr_ops
,
1675 .format_group
= &snb_uncore_format_group
,
1676 .event_descs
= snb_uncore_events
,
1679 static struct intel_uncore_type
*snb_msr_uncores
[] = {
1688 static struct uncore_event_desc snb_uncore_imc_events
[] = {
1689 INTEL_UNCORE_EVENT_DESC(data_reads
, "event=0x01"),
1690 INTEL_UNCORE_EVENT_DESC(data_reads
.scale
, "6.103515625e-5"),
1691 INTEL_UNCORE_EVENT_DESC(data_reads
.unit
, "MiB"),
1693 INTEL_UNCORE_EVENT_DESC(data_writes
, "event=0x02"),
1694 INTEL_UNCORE_EVENT_DESC(data_writes
.scale
, "6.103515625e-5"),
1695 INTEL_UNCORE_EVENT_DESC(data_writes
.unit
, "MiB"),
1697 { /* end: all zeroes */ },
1700 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
1701 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
1703 /* page size multiple covering all config regs */
1704 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
1706 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
1707 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
1708 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
1709 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
1710 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
1712 static struct attribute
*snb_uncore_imc_formats_attr
[] = {
1713 &format_attr_event
.attr
,
1717 static struct attribute_group snb_uncore_imc_format_group
= {
1719 .attrs
= snb_uncore_imc_formats_attr
,
1722 static void snb_uncore_imc_init_box(struct intel_uncore_box
*box
)
1724 struct pci_dev
*pdev
= box
->pci_dev
;
1725 int where
= SNB_UNCORE_PCI_IMC_BAR_OFFSET
;
1726 resource_size_t addr
;
1729 pci_read_config_dword(pdev
, where
, &pci_dword
);
1732 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1733 pci_read_config_dword(pdev
, where
+ 4, &pci_dword
);
1734 addr
|= ((resource_size_t
)pci_dword
<< 32);
1737 addr
&= ~(PAGE_SIZE
- 1);
1739 box
->io_addr
= ioremap(addr
, SNB_UNCORE_PCI_IMC_MAP_SIZE
);
1740 box
->hrtimer_duration
= UNCORE_SNB_IMC_HRTIMER_INTERVAL
;
1743 static void snb_uncore_imc_enable_box(struct intel_uncore_box
*box
)
1746 static void snb_uncore_imc_disable_box(struct intel_uncore_box
*box
)
1749 static void snb_uncore_imc_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1752 static void snb_uncore_imc_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1755 static u64
snb_uncore_imc_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
1757 struct hw_perf_event
*hwc
= &event
->hw
;
1759 return (u64
)*(unsigned int *)(box
->io_addr
+ hwc
->event_base
);
1763 * custom event_init() function because we define our own fixed, free
1764 * running counters, so we do not want to conflict with generic uncore
1765 * logic. Also simplifies processing
1767 static int snb_uncore_imc_event_init(struct perf_event
*event
)
1769 struct intel_uncore_pmu
*pmu
;
1770 struct intel_uncore_box
*box
;
1771 struct hw_perf_event
*hwc
= &event
->hw
;
1772 u64 cfg
= event
->attr
.config
& SNB_UNCORE_PCI_IMC_EVENT_MASK
;
1775 if (event
->attr
.type
!= event
->pmu
->type
)
1778 pmu
= uncore_event_to_pmu(event
);
1779 /* no device found for this pmu */
1780 if (pmu
->func_id
< 0)
1783 /* Sampling not supported yet */
1784 if (hwc
->sample_period
)
1787 /* unsupported modes and filters */
1788 if (event
->attr
.exclude_user
||
1789 event
->attr
.exclude_kernel
||
1790 event
->attr
.exclude_hv
||
1791 event
->attr
.exclude_idle
||
1792 event
->attr
.exclude_host
||
1793 event
->attr
.exclude_guest
||
1794 event
->attr
.sample_period
) /* no sampling */
1798 * Place all uncore events for a particular physical package
1804 /* check only supported bits are set */
1805 if (event
->attr
.config
& ~SNB_UNCORE_PCI_IMC_EVENT_MASK
)
1808 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
1809 if (!box
|| box
->cpu
< 0)
1812 event
->cpu
= box
->cpu
;
1815 event
->hw
.last_tag
= ~0ULL;
1816 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
1817 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
1819 * check event is known (whitelist, determines counter)
1822 case SNB_UNCORE_PCI_IMC_DATA_READS
:
1823 base
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
;
1824 idx
= UNCORE_PMC_IDX_FIXED
;
1826 case SNB_UNCORE_PCI_IMC_DATA_WRITES
:
1827 base
= SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE
;
1828 idx
= UNCORE_PMC_IDX_FIXED
+ 1;
1834 /* must be done before validate_group */
1835 event
->hw
.event_base
= base
;
1836 event
->hw
.config
= cfg
;
1837 event
->hw
.idx
= idx
;
1839 /* no group validation needed, we have free running counters */
1844 static int snb_uncore_imc_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1849 static void snb_uncore_imc_event_start(struct perf_event
*event
, int flags
)
1851 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
1854 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
1857 event
->hw
.state
= 0;
1860 list_add_tail(&event
->active_entry
, &box
->active_list
);
1862 count
= snb_uncore_imc_read_counter(box
, event
);
1863 local64_set(&event
->hw
.prev_count
, count
);
1865 if (box
->n_active
== 1)
1866 uncore_pmu_start_hrtimer(box
);
1869 static void snb_uncore_imc_event_stop(struct perf_event
*event
, int flags
)
1871 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
1872 struct hw_perf_event
*hwc
= &event
->hw
;
1874 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
1877 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
1878 hwc
->state
|= PERF_HES_STOPPED
;
1880 list_del(&event
->active_entry
);
1882 if (box
->n_active
== 0)
1883 uncore_pmu_cancel_hrtimer(box
);
1886 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
1888 * Drain the remaining delta count out of a event
1889 * that we are disabling:
1891 uncore_perf_event_update(box
, event
);
1892 hwc
->state
|= PERF_HES_UPTODATE
;
1896 static int snb_uncore_imc_event_add(struct perf_event
*event
, int flags
)
1898 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
1899 struct hw_perf_event
*hwc
= &event
->hw
;
1904 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
1905 if (!(flags
& PERF_EF_START
))
1906 hwc
->state
|= PERF_HES_ARCH
;
1908 snb_uncore_imc_event_start(event
, 0);
1915 static void snb_uncore_imc_event_del(struct perf_event
*event
, int flags
)
1917 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
1920 snb_uncore_imc_event_stop(event
, PERF_EF_UPDATE
);
1922 for (i
= 0; i
< box
->n_events
; i
++) {
1923 if (event
== box
->event_list
[i
]) {
1930 static int snb_pci2phy_map_init(int devid
)
1932 struct pci_dev
*dev
= NULL
;
1935 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, dev
);
1939 bus
= dev
->bus
->number
;
1941 pcibus_to_physid
[bus
] = 0;
1948 static struct pmu snb_uncore_imc_pmu
= {
1949 .task_ctx_nr
= perf_invalid_context
,
1950 .event_init
= snb_uncore_imc_event_init
,
1951 .add
= snb_uncore_imc_event_add
,
1952 .del
= snb_uncore_imc_event_del
,
1953 .start
= snb_uncore_imc_event_start
,
1954 .stop
= snb_uncore_imc_event_stop
,
1955 .read
= uncore_pmu_event_read
,
1958 static struct intel_uncore_ops snb_uncore_imc_ops
= {
1959 .init_box
= snb_uncore_imc_init_box
,
1960 .enable_box
= snb_uncore_imc_enable_box
,
1961 .disable_box
= snb_uncore_imc_disable_box
,
1962 .disable_event
= snb_uncore_imc_disable_event
,
1963 .enable_event
= snb_uncore_imc_enable_event
,
1964 .hw_config
= snb_uncore_imc_hw_config
,
1965 .read_counter
= snb_uncore_imc_read_counter
,
1968 static struct intel_uncore_type snb_uncore_imc
= {
1972 .fixed_ctr_bits
= 32,
1973 .fixed_ctr
= SNB_UNCORE_PCI_IMC_CTR_BASE
,
1974 .event_descs
= snb_uncore_imc_events
,
1975 .format_group
= &snb_uncore_imc_format_group
,
1976 .perf_ctr
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
,
1977 .event_mask
= SNB_UNCORE_PCI_IMC_EVENT_MASK
,
1978 .ops
= &snb_uncore_imc_ops
,
1979 .pmu
= &snb_uncore_imc_pmu
,
1982 static struct intel_uncore_type
*snb_pci_uncores
[] = {
1983 [SNB_PCI_UNCORE_IMC
] = &snb_uncore_imc
,
1987 static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids
) = {
1989 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SNB_IMC
),
1990 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
1992 { /* end: all zeroes */ },
1995 static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids
) = {
1997 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_IMC
),
1998 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
2000 { /* end: all zeroes */ },
2003 static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids
) = {
2005 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_IMC
),
2006 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
2008 { /* end: all zeroes */ },
2011 static struct pci_driver snb_uncore_pci_driver
= {
2012 .name
= "snb_uncore",
2013 .id_table
= snb_uncore_pci_ids
,
2016 static struct pci_driver ivb_uncore_pci_driver
= {
2017 .name
= "ivb_uncore",
2018 .id_table
= ivb_uncore_pci_ids
,
2021 static struct pci_driver hsw_uncore_pci_driver
= {
2022 .name
= "hsw_uncore",
2023 .id_table
= hsw_uncore_pci_ids
,
2026 /* end of Sandy Bridge uncore support */
2028 /* Nehalem uncore support */
2029 static void nhm_uncore_msr_disable_box(struct intel_uncore_box
*box
)
2031 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, 0);
2034 static void nhm_uncore_msr_enable_box(struct intel_uncore_box
*box
)
2036 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, NHM_UNC_GLOBAL_CTL_EN_PC_ALL
| NHM_UNC_GLOBAL_CTL_EN_FC
);
2039 static void nhm_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
2041 struct hw_perf_event
*hwc
= &event
->hw
;
2043 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
2044 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
2046 wrmsrl(hwc
->config_base
, NHM_UNC_FIXED_CTR_CTL_EN
);
2049 static struct attribute
*nhm_uncore_formats_attr
[] = {
2050 &format_attr_event
.attr
,
2051 &format_attr_umask
.attr
,
2052 &format_attr_edge
.attr
,
2053 &format_attr_inv
.attr
,
2054 &format_attr_cmask8
.attr
,
2058 static struct attribute_group nhm_uncore_format_group
= {
2060 .attrs
= nhm_uncore_formats_attr
,
2063 static struct uncore_event_desc nhm_uncore_events
[] = {
2064 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
2065 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any
, "event=0x2f,umask=0x0f"),
2066 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any
, "event=0x2c,umask=0x0f"),
2067 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads
, "event=0x20,umask=0x01"),
2068 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes
, "event=0x20,umask=0x02"),
2069 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads
, "event=0x20,umask=0x04"),
2070 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes
, "event=0x20,umask=0x08"),
2071 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads
, "event=0x20,umask=0x10"),
2072 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes
, "event=0x20,umask=0x20"),
2073 { /* end: all zeroes */ },
2076 static struct intel_uncore_ops nhm_uncore_msr_ops
= {
2077 .disable_box
= nhm_uncore_msr_disable_box
,
2078 .enable_box
= nhm_uncore_msr_enable_box
,
2079 .disable_event
= snb_uncore_msr_disable_event
,
2080 .enable_event
= nhm_uncore_msr_enable_event
,
2081 .read_counter
= uncore_msr_read_counter
,
2084 static struct intel_uncore_type nhm_uncore
= {
2088 .perf_ctr_bits
= 48,
2089 .fixed_ctr_bits
= 48,
2090 .event_ctl
= NHM_UNC_PERFEVTSEL0
,
2091 .perf_ctr
= NHM_UNC_UNCORE_PMC0
,
2092 .fixed_ctr
= NHM_UNC_FIXED_CTR
,
2093 .fixed_ctl
= NHM_UNC_FIXED_CTR_CTRL
,
2094 .event_mask
= NHM_UNC_RAW_EVENT_MASK
,
2095 .event_descs
= nhm_uncore_events
,
2096 .ops
= &nhm_uncore_msr_ops
,
2097 .format_group
= &nhm_uncore_format_group
,
2100 static struct intel_uncore_type
*nhm_msr_uncores
[] = {
2104 /* end of Nehalem uncore support */
2106 /* Nehalem-EX uncore support */
2107 DEFINE_UNCORE_FORMAT_ATTR(event5
, event
, "config:1-5");
2108 DEFINE_UNCORE_FORMAT_ATTR(counter
, counter
, "config:6-7");
2109 DEFINE_UNCORE_FORMAT_ATTR(match
, match
, "config1:0-63");
2110 DEFINE_UNCORE_FORMAT_ATTR(mask
, mask
, "config2:0-63");
2112 static void nhmex_uncore_msr_init_box(struct intel_uncore_box
*box
)
2114 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL
, NHMEX_U_PMON_GLOBAL_EN_ALL
);
2117 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box
*box
)
2119 unsigned msr
= uncore_msr_box_ctl(box
);
2123 rdmsrl(msr
, config
);
2124 config
&= ~((1ULL << uncore_num_counters(box
)) - 1);
2125 /* WBox has a fixed counter */
2126 if (uncore_msr_fixed_ctl(box
))
2127 config
&= ~NHMEX_W_PMON_GLOBAL_FIXED_EN
;
2128 wrmsrl(msr
, config
);
2132 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box
*box
)
2134 unsigned msr
= uncore_msr_box_ctl(box
);
2138 rdmsrl(msr
, config
);
2139 config
|= (1ULL << uncore_num_counters(box
)) - 1;
2140 /* WBox has a fixed counter */
2141 if (uncore_msr_fixed_ctl(box
))
2142 config
|= NHMEX_W_PMON_GLOBAL_FIXED_EN
;
2143 wrmsrl(msr
, config
);
2147 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
2149 wrmsrl(event
->hw
.config_base
, 0);
2152 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
2154 struct hw_perf_event
*hwc
= &event
->hw
;
2156 if (hwc
->idx
>= UNCORE_PMC_IDX_FIXED
)
2157 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
);
2158 else if (box
->pmu
->type
->event_mask
& NHMEX_PMON_CTL_EN_BIT0
)
2159 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT22
);
2161 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT0
);
2164 #define NHMEX_UNCORE_OPS_COMMON_INIT() \
2165 .init_box = nhmex_uncore_msr_init_box, \
2166 .disable_box = nhmex_uncore_msr_disable_box, \
2167 .enable_box = nhmex_uncore_msr_enable_box, \
2168 .disable_event = nhmex_uncore_msr_disable_event, \
2169 .read_counter = uncore_msr_read_counter
2171 static struct intel_uncore_ops nhmex_uncore_ops
= {
2172 NHMEX_UNCORE_OPS_COMMON_INIT(),
2173 .enable_event
= nhmex_uncore_msr_enable_event
,
2176 static struct attribute
*nhmex_uncore_ubox_formats_attr
[] = {
2177 &format_attr_event
.attr
,
2178 &format_attr_edge
.attr
,
2182 static struct attribute_group nhmex_uncore_ubox_format_group
= {
2184 .attrs
= nhmex_uncore_ubox_formats_attr
,
2187 static struct intel_uncore_type nhmex_uncore_ubox
= {
2191 .perf_ctr_bits
= 48,
2192 .event_ctl
= NHMEX_U_MSR_PMON_EV_SEL
,
2193 .perf_ctr
= NHMEX_U_MSR_PMON_CTR
,
2194 .event_mask
= NHMEX_U_PMON_RAW_EVENT_MASK
,
2195 .box_ctl
= NHMEX_U_MSR_PMON_GLOBAL_CTL
,
2196 .ops
= &nhmex_uncore_ops
,
2197 .format_group
= &nhmex_uncore_ubox_format_group
2200 static struct attribute
*nhmex_uncore_cbox_formats_attr
[] = {
2201 &format_attr_event
.attr
,
2202 &format_attr_umask
.attr
,
2203 &format_attr_edge
.attr
,
2204 &format_attr_inv
.attr
,
2205 &format_attr_thresh8
.attr
,
2209 static struct attribute_group nhmex_uncore_cbox_format_group
= {
2211 .attrs
= nhmex_uncore_cbox_formats_attr
,
2214 /* msr offset for each instance of cbox */
2215 static unsigned nhmex_cbox_msr_offsets
[] = {
2216 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
2219 static struct intel_uncore_type nhmex_uncore_cbox
= {
2223 .perf_ctr_bits
= 48,
2224 .event_ctl
= NHMEX_C0_MSR_PMON_EV_SEL0
,
2225 .perf_ctr
= NHMEX_C0_MSR_PMON_CTR0
,
2226 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
2227 .box_ctl
= NHMEX_C0_MSR_PMON_GLOBAL_CTL
,
2228 .msr_offsets
= nhmex_cbox_msr_offsets
,
2230 .ops
= &nhmex_uncore_ops
,
2231 .format_group
= &nhmex_uncore_cbox_format_group
2234 static struct uncore_event_desc nhmex_uncore_wbox_events
[] = {
2235 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0"),
2236 { /* end: all zeroes */ },
2239 static struct intel_uncore_type nhmex_uncore_wbox
= {
2243 .perf_ctr_bits
= 48,
2244 .event_ctl
= NHMEX_W_MSR_PMON_CNT0
,
2245 .perf_ctr
= NHMEX_W_MSR_PMON_EVT_SEL0
,
2246 .fixed_ctr
= NHMEX_W_MSR_PMON_FIXED_CTR
,
2247 .fixed_ctl
= NHMEX_W_MSR_PMON_FIXED_CTL
,
2248 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
2249 .box_ctl
= NHMEX_W_MSR_GLOBAL_CTL
,
2251 .event_descs
= nhmex_uncore_wbox_events
,
2252 .ops
= &nhmex_uncore_ops
,
2253 .format_group
= &nhmex_uncore_cbox_format_group
2256 static int nhmex_bbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2258 struct hw_perf_event
*hwc
= &event
->hw
;
2259 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2260 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2263 ctr
= (hwc
->config
& NHMEX_B_PMON_CTR_MASK
) >>
2264 NHMEX_B_PMON_CTR_SHIFT
;
2265 ev_sel
= (hwc
->config
& NHMEX_B_PMON_CTL_EV_SEL_MASK
) >>
2266 NHMEX_B_PMON_CTL_EV_SEL_SHIFT
;
2268 /* events that do not use the match/mask registers */
2269 if ((ctr
== 0 && ev_sel
> 0x3) || (ctr
== 1 && ev_sel
> 0x6) ||
2270 (ctr
== 2 && ev_sel
!= 0x4) || ctr
== 3)
2273 if (box
->pmu
->pmu_idx
== 0)
2274 reg1
->reg
= NHMEX_B0_MSR_MATCH
;
2276 reg1
->reg
= NHMEX_B1_MSR_MATCH
;
2278 reg1
->config
= event
->attr
.config1
;
2279 reg2
->config
= event
->attr
.config2
;
2283 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
2285 struct hw_perf_event
*hwc
= &event
->hw
;
2286 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2287 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2289 if (reg1
->idx
!= EXTRA_REG_NONE
) {
2290 wrmsrl(reg1
->reg
, reg1
->config
);
2291 wrmsrl(reg1
->reg
+ 1, reg2
->config
);
2293 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
|
2294 (hwc
->config
& NHMEX_B_PMON_CTL_EV_SEL_MASK
));
2298 * The Bbox has 4 counters, but each counter monitors different events.
2299 * Use bits 6-7 in the event config to select counter.
2301 static struct event_constraint nhmex_uncore_bbox_constraints
[] = {
2302 EVENT_CONSTRAINT(0 , 1, 0xc0),
2303 EVENT_CONSTRAINT(0x40, 2, 0xc0),
2304 EVENT_CONSTRAINT(0x80, 4, 0xc0),
2305 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
2306 EVENT_CONSTRAINT_END
,
2309 static struct attribute
*nhmex_uncore_bbox_formats_attr
[] = {
2310 &format_attr_event5
.attr
,
2311 &format_attr_counter
.attr
,
2312 &format_attr_match
.attr
,
2313 &format_attr_mask
.attr
,
2317 static struct attribute_group nhmex_uncore_bbox_format_group
= {
2319 .attrs
= nhmex_uncore_bbox_formats_attr
,
2322 static struct intel_uncore_ops nhmex_uncore_bbox_ops
= {
2323 NHMEX_UNCORE_OPS_COMMON_INIT(),
2324 .enable_event
= nhmex_bbox_msr_enable_event
,
2325 .hw_config
= nhmex_bbox_hw_config
,
2326 .get_constraint
= uncore_get_constraint
,
2327 .put_constraint
= uncore_put_constraint
,
2330 static struct intel_uncore_type nhmex_uncore_bbox
= {
2334 .perf_ctr_bits
= 48,
2335 .event_ctl
= NHMEX_B0_MSR_PMON_CTL0
,
2336 .perf_ctr
= NHMEX_B0_MSR_PMON_CTR0
,
2337 .event_mask
= NHMEX_B_PMON_RAW_EVENT_MASK
,
2338 .box_ctl
= NHMEX_B0_MSR_PMON_GLOBAL_CTL
,
2339 .msr_offset
= NHMEX_B_MSR_OFFSET
,
2341 .num_shared_regs
= 1,
2342 .constraints
= nhmex_uncore_bbox_constraints
,
2343 .ops
= &nhmex_uncore_bbox_ops
,
2344 .format_group
= &nhmex_uncore_bbox_format_group
2347 static int nhmex_sbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2349 struct hw_perf_event
*hwc
= &event
->hw
;
2350 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2351 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2353 /* only TO_R_PROG_EV event uses the match/mask register */
2354 if ((hwc
->config
& NHMEX_PMON_CTL_EV_SEL_MASK
) !=
2355 NHMEX_S_EVENT_TO_R_PROG_EV
)
2358 if (box
->pmu
->pmu_idx
== 0)
2359 reg1
->reg
= NHMEX_S0_MSR_MM_CFG
;
2361 reg1
->reg
= NHMEX_S1_MSR_MM_CFG
;
2363 reg1
->config
= event
->attr
.config1
;
2364 reg2
->config
= event
->attr
.config2
;
2368 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
2370 struct hw_perf_event
*hwc
= &event
->hw
;
2371 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2372 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2374 if (reg1
->idx
!= EXTRA_REG_NONE
) {
2375 wrmsrl(reg1
->reg
, 0);
2376 wrmsrl(reg1
->reg
+ 1, reg1
->config
);
2377 wrmsrl(reg1
->reg
+ 2, reg2
->config
);
2378 wrmsrl(reg1
->reg
, NHMEX_S_PMON_MM_CFG_EN
);
2380 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT22
);
2383 static struct attribute
*nhmex_uncore_sbox_formats_attr
[] = {
2384 &format_attr_event
.attr
,
2385 &format_attr_umask
.attr
,
2386 &format_attr_edge
.attr
,
2387 &format_attr_inv
.attr
,
2388 &format_attr_thresh8
.attr
,
2389 &format_attr_match
.attr
,
2390 &format_attr_mask
.attr
,
2394 static struct attribute_group nhmex_uncore_sbox_format_group
= {
2396 .attrs
= nhmex_uncore_sbox_formats_attr
,
2399 static struct intel_uncore_ops nhmex_uncore_sbox_ops
= {
2400 NHMEX_UNCORE_OPS_COMMON_INIT(),
2401 .enable_event
= nhmex_sbox_msr_enable_event
,
2402 .hw_config
= nhmex_sbox_hw_config
,
2403 .get_constraint
= uncore_get_constraint
,
2404 .put_constraint
= uncore_put_constraint
,
2407 static struct intel_uncore_type nhmex_uncore_sbox
= {
2411 .perf_ctr_bits
= 48,
2412 .event_ctl
= NHMEX_S0_MSR_PMON_CTL0
,
2413 .perf_ctr
= NHMEX_S0_MSR_PMON_CTR0
,
2414 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
2415 .box_ctl
= NHMEX_S0_MSR_PMON_GLOBAL_CTL
,
2416 .msr_offset
= NHMEX_S_MSR_OFFSET
,
2418 .num_shared_regs
= 1,
2419 .ops
= &nhmex_uncore_sbox_ops
,
2420 .format_group
= &nhmex_uncore_sbox_format_group
2424 EXTRA_REG_NHMEX_M_FILTER
,
2425 EXTRA_REG_NHMEX_M_DSP
,
2426 EXTRA_REG_NHMEX_M_ISS
,
2427 EXTRA_REG_NHMEX_M_MAP
,
2428 EXTRA_REG_NHMEX_M_MSC_THR
,
2429 EXTRA_REG_NHMEX_M_PGT
,
2430 EXTRA_REG_NHMEX_M_PLD
,
2431 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
,
2434 static struct extra_reg nhmex_uncore_mbox_extra_regs
[] = {
2435 MBOX_INC_SEL_EXTAR_REG(0x0, DSP
),
2436 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR
),
2437 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR
),
2438 MBOX_INC_SEL_EXTAR_REG(0x9, ISS
),
2439 /* event 0xa uses two extra registers */
2440 MBOX_INC_SEL_EXTAR_REG(0xa, ISS
),
2441 MBOX_INC_SEL_EXTAR_REG(0xa, PLD
),
2442 MBOX_INC_SEL_EXTAR_REG(0xb, PLD
),
2443 /* events 0xd ~ 0x10 use the same extra register */
2444 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC
),
2445 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC
),
2446 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC
),
2447 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC
),
2448 MBOX_INC_SEL_EXTAR_REG(0x16, PGT
),
2449 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP
),
2450 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS
),
2451 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT
),
2452 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP
),
2456 /* Nehalem-EX or Westmere-EX ? */
2457 static bool uncore_nhmex
;
2459 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box
*box
, int idx
, u64 config
)
2461 struct intel_uncore_extra_reg
*er
;
2462 unsigned long flags
;
2466 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
2467 er
= &box
->shared_regs
[idx
];
2468 raw_spin_lock_irqsave(&er
->lock
, flags
);
2469 if (!atomic_read(&er
->ref
) || er
->config
== config
) {
2470 atomic_inc(&er
->ref
);
2471 er
->config
= config
;
2474 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
2479 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2480 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2481 * fields which are shared.
2483 idx
-= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2484 if (WARN_ON_ONCE(idx
>= 4))
2487 /* mask of the shared fields */
2489 mask
= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
;
2491 mask
= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
;
2492 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
2494 raw_spin_lock_irqsave(&er
->lock
, flags
);
2495 /* add mask of the non-shared field if it's in use */
2496 if (__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8)) {
2498 mask
|= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2500 mask
|= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2503 if (!atomic_read(&er
->ref
) || !((er
->config
^ config
) & mask
)) {
2504 atomic_add(1 << (idx
* 8), &er
->ref
);
2506 mask
= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
|
2507 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2509 mask
= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
|
2510 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2511 er
->config
&= ~mask
;
2512 er
->config
|= (config
& mask
);
2515 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
2520 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box
*box
, int idx
)
2522 struct intel_uncore_extra_reg
*er
;
2524 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
2525 er
= &box
->shared_regs
[idx
];
2526 atomic_dec(&er
->ref
);
2530 idx
-= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2531 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
2532 atomic_sub(1 << (idx
* 8), &er
->ref
);
2535 static u64
nhmex_mbox_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
2537 struct hw_perf_event
*hwc
= &event
->hw
;
2538 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2539 u64 idx
, orig_idx
= __BITS_VALUE(reg1
->idx
, 0, 8);
2540 u64 config
= reg1
->config
;
2542 /* get the non-shared control bits and shift them */
2543 idx
= orig_idx
- EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2545 config
&= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2547 config
&= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
2548 if (new_idx
> orig_idx
) {
2549 idx
= new_idx
- orig_idx
;
2552 idx
= orig_idx
- new_idx
;
2556 /* add the shared control bits back */
2558 config
|= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
2560 config
|= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
2561 config
|= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
2563 /* adjust the main event selector */
2564 if (new_idx
> orig_idx
)
2565 hwc
->config
+= idx
<< NHMEX_M_PMON_CTL_INC_SEL_SHIFT
;
2567 hwc
->config
-= idx
<< NHMEX_M_PMON_CTL_INC_SEL_SHIFT
;
2568 reg1
->config
= config
;
2569 reg1
->idx
= ~0xff | new_idx
;
2574 static struct event_constraint
*
2575 nhmex_mbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2577 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2578 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
2579 int i
, idx
[2], alloc
= 0;
2580 u64 config1
= reg1
->config
;
2582 idx
[0] = __BITS_VALUE(reg1
->idx
, 0, 8);
2583 idx
[1] = __BITS_VALUE(reg1
->idx
, 1, 8);
2585 for (i
= 0; i
< 2; i
++) {
2586 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
2592 if (!nhmex_mbox_get_shared_reg(box
, idx
[i
],
2593 __BITS_VALUE(config1
, i
, 32)))
2595 alloc
|= (0x1 << i
);
2598 /* for the match/mask registers */
2599 if (reg2
->idx
!= EXTRA_REG_NONE
&&
2600 (uncore_box_is_fake(box
) || !reg2
->alloc
) &&
2601 !nhmex_mbox_get_shared_reg(box
, reg2
->idx
, reg2
->config
))
2605 * If it's a fake box -- as per validate_{group,event}() we
2606 * shouldn't touch event state and we can avoid doing so
2607 * since both will only call get_event_constraints() once
2608 * on each event, this avoids the need for reg->alloc.
2610 if (!uncore_box_is_fake(box
)) {
2611 if (idx
[0] != 0xff && idx
[0] != __BITS_VALUE(reg1
->idx
, 0, 8))
2612 nhmex_mbox_alter_er(event
, idx
[0], true);
2613 reg1
->alloc
|= alloc
;
2614 if (reg2
->idx
!= EXTRA_REG_NONE
)
2619 if (idx
[0] != 0xff && !(alloc
& 0x1) &&
2620 idx
[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
2622 * events 0xd ~ 0x10 are functional identical, but are
2623 * controlled by different fields in the ZDP_CTL_FVC
2624 * register. If we failed to take one field, try the
2627 BUG_ON(__BITS_VALUE(reg1
->idx
, 1, 8) != 0xff);
2628 idx
[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2629 idx
[0] = (idx
[0] + 1) % 4;
2630 idx
[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
2631 if (idx
[0] != __BITS_VALUE(reg1
->idx
, 0, 8)) {
2632 config1
= nhmex_mbox_alter_er(event
, idx
[0], false);
2638 nhmex_mbox_put_shared_reg(box
, idx
[0]);
2640 nhmex_mbox_put_shared_reg(box
, idx
[1]);
2641 return &constraint_empty
;
2644 static void nhmex_mbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2646 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2647 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
2649 if (uncore_box_is_fake(box
))
2652 if (reg1
->alloc
& 0x1)
2653 nhmex_mbox_put_shared_reg(box
, __BITS_VALUE(reg1
->idx
, 0, 8));
2654 if (reg1
->alloc
& 0x2)
2655 nhmex_mbox_put_shared_reg(box
, __BITS_VALUE(reg1
->idx
, 1, 8));
2659 nhmex_mbox_put_shared_reg(box
, reg2
->idx
);
2664 static int nhmex_mbox_extra_reg_idx(struct extra_reg
*er
)
2666 if (er
->idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
)
2668 return er
->idx
+ (er
->event
>> NHMEX_M_PMON_CTL_INC_SEL_SHIFT
) - 0xd;
2671 static int nhmex_mbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2673 struct intel_uncore_type
*type
= box
->pmu
->type
;
2674 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2675 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
2676 struct extra_reg
*er
;
2680 * The mbox events may require 2 extra MSRs at the most. But only
2681 * the lower 32 bits in these MSRs are significant, so we can use
2682 * config1 to pass two MSRs' config.
2684 for (er
= nhmex_uncore_mbox_extra_regs
; er
->msr
; er
++) {
2685 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
2687 if (event
->attr
.config1
& ~er
->valid_mask
)
2690 msr
= er
->msr
+ type
->msr_offset
* box
->pmu
->pmu_idx
;
2691 if (WARN_ON_ONCE(msr
>= 0xffff || er
->idx
>= 0xff))
2694 /* always use the 32~63 bits to pass the PLD config */
2695 if (er
->idx
== EXTRA_REG_NHMEX_M_PLD
)
2697 else if (WARN_ON_ONCE(reg_idx
> 0))
2700 reg1
->idx
&= ~(0xff << (reg_idx
* 8));
2701 reg1
->reg
&= ~(0xffff << (reg_idx
* 16));
2702 reg1
->idx
|= nhmex_mbox_extra_reg_idx(er
) << (reg_idx
* 8);
2703 reg1
->reg
|= msr
<< (reg_idx
* 16);
2704 reg1
->config
= event
->attr
.config1
;
2708 * The mbox only provides ability to perform address matching
2709 * for the PLD events.
2712 reg2
->idx
= EXTRA_REG_NHMEX_M_FILTER
;
2713 if (event
->attr
.config2
& NHMEX_M_PMON_MM_CFG_EN
)
2714 reg2
->config
= event
->attr
.config2
;
2716 reg2
->config
= ~0ULL;
2717 if (box
->pmu
->pmu_idx
== 0)
2718 reg2
->reg
= NHMEX_M0_MSR_PMU_MM_CFG
;
2720 reg2
->reg
= NHMEX_M1_MSR_PMU_MM_CFG
;
2725 static u64
nhmex_mbox_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
2727 struct intel_uncore_extra_reg
*er
;
2728 unsigned long flags
;
2731 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
)
2732 return box
->shared_regs
[idx
].config
;
2734 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
2735 raw_spin_lock_irqsave(&er
->lock
, flags
);
2736 config
= er
->config
;
2737 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
2741 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
2743 struct hw_perf_event
*hwc
= &event
->hw
;
2744 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2745 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2748 idx
= __BITS_VALUE(reg1
->idx
, 0, 8);
2750 wrmsrl(__BITS_VALUE(reg1
->reg
, 0, 16),
2751 nhmex_mbox_shared_reg_config(box
, idx
));
2752 idx
= __BITS_VALUE(reg1
->idx
, 1, 8);
2754 wrmsrl(__BITS_VALUE(reg1
->reg
, 1, 16),
2755 nhmex_mbox_shared_reg_config(box
, idx
));
2757 if (reg2
->idx
!= EXTRA_REG_NONE
) {
2758 wrmsrl(reg2
->reg
, 0);
2759 if (reg2
->config
!= ~0ULL) {
2760 wrmsrl(reg2
->reg
+ 1,
2761 reg2
->config
& NHMEX_M_PMON_ADDR_MATCH_MASK
);
2762 wrmsrl(reg2
->reg
+ 2, NHMEX_M_PMON_ADDR_MASK_MASK
&
2763 (reg2
->config
>> NHMEX_M_PMON_ADDR_MASK_SHIFT
));
2764 wrmsrl(reg2
->reg
, NHMEX_M_PMON_MM_CFG_EN
);
2768 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT0
);
2771 DEFINE_UNCORE_FORMAT_ATTR(count_mode
, count_mode
, "config:2-3");
2772 DEFINE_UNCORE_FORMAT_ATTR(storage_mode
, storage_mode
, "config:4-5");
2773 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode
, wrap_mode
, "config:6");
2774 DEFINE_UNCORE_FORMAT_ATTR(flag_mode
, flag_mode
, "config:7");
2775 DEFINE_UNCORE_FORMAT_ATTR(inc_sel
, inc_sel
, "config:9-13");
2776 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel
, set_flag_sel
, "config:19-21");
2777 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en
, filter_cfg_en
, "config2:63");
2778 DEFINE_UNCORE_FORMAT_ATTR(filter_match
, filter_match
, "config2:0-33");
2779 DEFINE_UNCORE_FORMAT_ATTR(filter_mask
, filter_mask
, "config2:34-61");
2780 DEFINE_UNCORE_FORMAT_ATTR(dsp
, dsp
, "config1:0-31");
2781 DEFINE_UNCORE_FORMAT_ATTR(thr
, thr
, "config1:0-31");
2782 DEFINE_UNCORE_FORMAT_ATTR(fvc
, fvc
, "config1:0-31");
2783 DEFINE_UNCORE_FORMAT_ATTR(pgt
, pgt
, "config1:0-31");
2784 DEFINE_UNCORE_FORMAT_ATTR(map
, map
, "config1:0-31");
2785 DEFINE_UNCORE_FORMAT_ATTR(iss
, iss
, "config1:0-31");
2786 DEFINE_UNCORE_FORMAT_ATTR(pld
, pld
, "config1:32-63");
2788 static struct attribute
*nhmex_uncore_mbox_formats_attr
[] = {
2789 &format_attr_count_mode
.attr
,
2790 &format_attr_storage_mode
.attr
,
2791 &format_attr_wrap_mode
.attr
,
2792 &format_attr_flag_mode
.attr
,
2793 &format_attr_inc_sel
.attr
,
2794 &format_attr_set_flag_sel
.attr
,
2795 &format_attr_filter_cfg_en
.attr
,
2796 &format_attr_filter_match
.attr
,
2797 &format_attr_filter_mask
.attr
,
2798 &format_attr_dsp
.attr
,
2799 &format_attr_thr
.attr
,
2800 &format_attr_fvc
.attr
,
2801 &format_attr_pgt
.attr
,
2802 &format_attr_map
.attr
,
2803 &format_attr_iss
.attr
,
2804 &format_attr_pld
.attr
,
2808 static struct attribute_group nhmex_uncore_mbox_format_group
= {
2810 .attrs
= nhmex_uncore_mbox_formats_attr
,
2813 static struct uncore_event_desc nhmex_uncore_mbox_events
[] = {
2814 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read
, "inc_sel=0xd,fvc=0x2800"),
2815 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write
, "inc_sel=0xd,fvc=0x2820"),
2816 { /* end: all zeroes */ },
2819 static struct uncore_event_desc wsmex_uncore_mbox_events
[] = {
2820 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read
, "inc_sel=0xd,fvc=0x5000"),
2821 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write
, "inc_sel=0xd,fvc=0x5040"),
2822 { /* end: all zeroes */ },
2825 static struct intel_uncore_ops nhmex_uncore_mbox_ops
= {
2826 NHMEX_UNCORE_OPS_COMMON_INIT(),
2827 .enable_event
= nhmex_mbox_msr_enable_event
,
2828 .hw_config
= nhmex_mbox_hw_config
,
2829 .get_constraint
= nhmex_mbox_get_constraint
,
2830 .put_constraint
= nhmex_mbox_put_constraint
,
2833 static struct intel_uncore_type nhmex_uncore_mbox
= {
2837 .perf_ctr_bits
= 48,
2838 .event_ctl
= NHMEX_M0_MSR_PMU_CTL0
,
2839 .perf_ctr
= NHMEX_M0_MSR_PMU_CNT0
,
2840 .event_mask
= NHMEX_M_PMON_RAW_EVENT_MASK
,
2841 .box_ctl
= NHMEX_M0_MSR_GLOBAL_CTL
,
2842 .msr_offset
= NHMEX_M_MSR_OFFSET
,
2844 .num_shared_regs
= 8,
2845 .event_descs
= nhmex_uncore_mbox_events
,
2846 .ops
= &nhmex_uncore_mbox_ops
,
2847 .format_group
= &nhmex_uncore_mbox_format_group
,
2850 static void nhmex_rbox_alter_er(struct intel_uncore_box
*box
, struct perf_event
*event
)
2852 struct hw_perf_event
*hwc
= &event
->hw
;
2853 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2855 /* adjust the main event selector and extra register index */
2856 if (reg1
->idx
% 2) {
2858 hwc
->config
-= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
2861 hwc
->config
+= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
2864 /* adjust extra register config */
2865 switch (reg1
->idx
% 6) {
2867 /* shift the 8~15 bits to the 0~7 bits */
2871 /* shift the 0~7 bits to the 8~15 bits */
2878 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2879 * An event set consists of 6 events, the 3rd and 4th events in
2880 * an event set use the same extra register. So an event set uses
2881 * 5 extra registers.
2883 static struct event_constraint
*
2884 nhmex_rbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2886 struct hw_perf_event
*hwc
= &event
->hw
;
2887 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2888 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
2889 struct intel_uncore_extra_reg
*er
;
2890 unsigned long flags
;
2895 if (!uncore_box_is_fake(box
) && reg1
->alloc
)
2898 idx
= reg1
->idx
% 6;
2899 config1
= reg1
->config
;
2902 /* the 3rd and 4th events use the same extra register */
2905 er_idx
+= (reg1
->idx
/ 6) * 5;
2907 er
= &box
->shared_regs
[er_idx
];
2908 raw_spin_lock_irqsave(&er
->lock
, flags
);
2910 if (!atomic_read(&er
->ref
) || er
->config
== reg1
->config
) {
2911 atomic_inc(&er
->ref
);
2912 er
->config
= reg1
->config
;
2915 } else if (idx
== 2 || idx
== 3) {
2917 * these two events use different fields in a extra register,
2918 * the 0~7 bits and the 8~15 bits respectively.
2920 u64 mask
= 0xff << ((idx
- 2) * 8);
2921 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
- 2, 8) ||
2922 !((er
->config
^ config1
) & mask
)) {
2923 atomic_add(1 << ((idx
- 2) * 8), &er
->ref
);
2924 er
->config
&= ~mask
;
2925 er
->config
|= config1
& mask
;
2929 if (!atomic_read(&er
->ref
) ||
2930 (er
->config
== (hwc
->config
>> 32) &&
2931 er
->config1
== reg1
->config
&&
2932 er
->config2
== reg2
->config
)) {
2933 atomic_inc(&er
->ref
);
2934 er
->config
= (hwc
->config
>> 32);
2935 er
->config1
= reg1
->config
;
2936 er
->config2
= reg2
->config
;
2940 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
2944 * The Rbox events are always in pairs. The paired
2945 * events are functional identical, but use different
2946 * extra registers. If we failed to take an extra
2947 * register, try the alternative.
2953 if (idx
!= reg1
->idx
% 6) {
2961 if (!uncore_box_is_fake(box
)) {
2962 if (idx
!= reg1
->idx
% 6)
2963 nhmex_rbox_alter_er(box
, event
);
2968 return &constraint_empty
;
2971 static void nhmex_rbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2973 struct intel_uncore_extra_reg
*er
;
2974 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2977 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
2980 idx
= reg1
->idx
% 6;
2984 er_idx
+= (reg1
->idx
/ 6) * 5;
2986 er
= &box
->shared_regs
[er_idx
];
2987 if (idx
== 2 || idx
== 3)
2988 atomic_sub(1 << ((idx
- 2) * 8), &er
->ref
);
2990 atomic_dec(&er
->ref
);
2995 static int nhmex_rbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2997 struct hw_perf_event
*hwc
= &event
->hw
;
2998 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2999 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
3002 idx
= (event
->hw
.config
& NHMEX_R_PMON_CTL_EV_SEL_MASK
) >>
3003 NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
3008 reg1
->config
= event
->attr
.config1
;
3013 hwc
->config
|= event
->attr
.config
& (~0ULL << 32);
3014 reg2
->config
= event
->attr
.config2
;
3020 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
3022 struct hw_perf_event
*hwc
= &event
->hw
;
3023 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
3024 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
3028 port
= idx
/ 6 + box
->pmu
->pmu_idx
* 4;
3032 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port
), reg1
->config
);
3035 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port
), reg1
->config
);
3039 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port
),
3040 uncore_shared_reg_config(box
, 2 + (idx
/ 6) * 5));
3043 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port
),
3045 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port
), reg1
->config
);
3046 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port
), reg2
->config
);
3049 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port
),
3051 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port
), reg1
->config
);
3052 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port
), reg2
->config
);
3056 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
|
3057 (hwc
->config
& NHMEX_R_PMON_CTL_EV_SEL_MASK
));
3060 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg
, xbr_mm_cfg
, "config:32-63");
3061 DEFINE_UNCORE_FORMAT_ATTR(xbr_match
, xbr_match
, "config1:0-63");
3062 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask
, xbr_mask
, "config2:0-63");
3063 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg
, qlx_cfg
, "config1:0-15");
3064 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg
, iperf_cfg
, "config1:0-31");
3066 static struct attribute
*nhmex_uncore_rbox_formats_attr
[] = {
3067 &format_attr_event5
.attr
,
3068 &format_attr_xbr_mm_cfg
.attr
,
3069 &format_attr_xbr_match
.attr
,
3070 &format_attr_xbr_mask
.attr
,
3071 &format_attr_qlx_cfg
.attr
,
3072 &format_attr_iperf_cfg
.attr
,
3076 static struct attribute_group nhmex_uncore_rbox_format_group
= {
3078 .attrs
= nhmex_uncore_rbox_formats_attr
,
3081 static struct uncore_event_desc nhmex_uncore_rbox_events
[] = {
3082 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send
, "event=0x0,iperf_cfg=0x80000000"),
3083 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send
, "event=0x6,iperf_cfg=0x80000000"),
3084 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt
, "event=0x0,iperf_cfg=0x40000000"),
3085 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt
, "event=0x6,iperf_cfg=0x40000000"),
3086 INTEL_UNCORE_EVENT_DESC(qpi0_date_response
, "event=0x0,iperf_cfg=0xc4"),
3087 INTEL_UNCORE_EVENT_DESC(qpi1_date_response
, "event=0x6,iperf_cfg=0xc4"),
3088 { /* end: all zeroes */ },
3091 static struct intel_uncore_ops nhmex_uncore_rbox_ops
= {
3092 NHMEX_UNCORE_OPS_COMMON_INIT(),
3093 .enable_event
= nhmex_rbox_msr_enable_event
,
3094 .hw_config
= nhmex_rbox_hw_config
,
3095 .get_constraint
= nhmex_rbox_get_constraint
,
3096 .put_constraint
= nhmex_rbox_put_constraint
,
3099 static struct intel_uncore_type nhmex_uncore_rbox
= {
3103 .perf_ctr_bits
= 48,
3104 .event_ctl
= NHMEX_R_MSR_PMON_CTL0
,
3105 .perf_ctr
= NHMEX_R_MSR_PMON_CNT0
,
3106 .event_mask
= NHMEX_R_PMON_RAW_EVENT_MASK
,
3107 .box_ctl
= NHMEX_R_MSR_GLOBAL_CTL
,
3108 .msr_offset
= NHMEX_R_MSR_OFFSET
,
3110 .num_shared_regs
= 20,
3111 .event_descs
= nhmex_uncore_rbox_events
,
3112 .ops
= &nhmex_uncore_rbox_ops
,
3113 .format_group
= &nhmex_uncore_rbox_format_group
3116 static struct intel_uncore_type
*nhmex_msr_uncores
[] = {
3126 /* end of Nehalem-EX uncore support */
3128 static void uncore_assign_hw_event(struct intel_uncore_box
*box
, struct perf_event
*event
, int idx
)
3130 struct hw_perf_event
*hwc
= &event
->hw
;
3133 hwc
->last_tag
= ++box
->tags
[idx
];
3135 if (hwc
->idx
== UNCORE_PMC_IDX_FIXED
) {
3136 hwc
->event_base
= uncore_fixed_ctr(box
);
3137 hwc
->config_base
= uncore_fixed_ctl(box
);
3141 hwc
->config_base
= uncore_event_ctl(box
, hwc
->idx
);
3142 hwc
->event_base
= uncore_perf_ctr(box
, hwc
->idx
);
3145 static void uncore_perf_event_update(struct intel_uncore_box
*box
, struct perf_event
*event
)
3147 u64 prev_count
, new_count
, delta
;
3150 if (event
->hw
.idx
>= UNCORE_PMC_IDX_FIXED
)
3151 shift
= 64 - uncore_fixed_ctr_bits(box
);
3153 shift
= 64 - uncore_perf_ctr_bits(box
);
3155 /* the hrtimer might modify the previous event value */
3157 prev_count
= local64_read(&event
->hw
.prev_count
);
3158 new_count
= uncore_read_counter(box
, event
);
3159 if (local64_xchg(&event
->hw
.prev_count
, new_count
) != prev_count
)
3162 delta
= (new_count
<< shift
) - (prev_count
<< shift
);
3165 local64_add(delta
, &event
->count
);
3169 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
3170 * for SandyBridge. So we use hrtimer to periodically poll the counter
3171 * to avoid overflow.
3173 static enum hrtimer_restart
uncore_pmu_hrtimer(struct hrtimer
*hrtimer
)
3175 struct intel_uncore_box
*box
;
3176 struct perf_event
*event
;
3177 unsigned long flags
;
3180 box
= container_of(hrtimer
, struct intel_uncore_box
, hrtimer
);
3181 if (!box
->n_active
|| box
->cpu
!= smp_processor_id())
3182 return HRTIMER_NORESTART
;
3184 * disable local interrupt to prevent uncore_pmu_event_start/stop
3185 * to interrupt the update process
3187 local_irq_save(flags
);
3190 * handle boxes with an active event list as opposed to active
3193 list_for_each_entry(event
, &box
->active_list
, active_entry
) {
3194 uncore_perf_event_update(box
, event
);
3197 for_each_set_bit(bit
, box
->active_mask
, UNCORE_PMC_IDX_MAX
)
3198 uncore_perf_event_update(box
, box
->events
[bit
]);
3200 local_irq_restore(flags
);
3202 hrtimer_forward_now(hrtimer
, ns_to_ktime(box
->hrtimer_duration
));
3203 return HRTIMER_RESTART
;
3206 static void uncore_pmu_start_hrtimer(struct intel_uncore_box
*box
)
3208 __hrtimer_start_range_ns(&box
->hrtimer
,
3209 ns_to_ktime(box
->hrtimer_duration
), 0,
3210 HRTIMER_MODE_REL_PINNED
, 0);
3213 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box
*box
)
3215 hrtimer_cancel(&box
->hrtimer
);
3218 static void uncore_pmu_init_hrtimer(struct intel_uncore_box
*box
)
3220 hrtimer_init(&box
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3221 box
->hrtimer
.function
= uncore_pmu_hrtimer
;
3224 static struct intel_uncore_box
*uncore_alloc_box(struct intel_uncore_type
*type
, int node
)
3226 struct intel_uncore_box
*box
;
3229 size
= sizeof(*box
) + type
->num_shared_regs
* sizeof(struct intel_uncore_extra_reg
);
3231 box
= kzalloc_node(size
, GFP_KERNEL
, node
);
3235 for (i
= 0; i
< type
->num_shared_regs
; i
++)
3236 raw_spin_lock_init(&box
->shared_regs
[i
].lock
);
3238 uncore_pmu_init_hrtimer(box
);
3239 atomic_set(&box
->refcnt
, 1);
3243 /* set default hrtimer timeout */
3244 box
->hrtimer_duration
= UNCORE_PMU_HRTIMER_INTERVAL
;
3246 INIT_LIST_HEAD(&box
->active_list
);
3252 uncore_collect_events(struct intel_uncore_box
*box
, struct perf_event
*leader
, bool dogrp
)
3254 struct perf_event
*event
;
3257 max_count
= box
->pmu
->type
->num_counters
;
3258 if (box
->pmu
->type
->fixed_ctl
)
3261 if (box
->n_events
>= max_count
)
3265 box
->event_list
[n
] = leader
;
3270 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
) {
3271 if (event
->state
<= PERF_EVENT_STATE_OFF
)
3277 box
->event_list
[n
] = event
;
3283 static struct event_constraint
*
3284 uncore_get_event_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
3286 struct intel_uncore_type
*type
= box
->pmu
->type
;
3287 struct event_constraint
*c
;
3289 if (type
->ops
->get_constraint
) {
3290 c
= type
->ops
->get_constraint(box
, event
);
3295 if (event
->attr
.config
== UNCORE_FIXED_EVENT
)
3296 return &constraint_fixed
;
3298 if (type
->constraints
) {
3299 for_each_event_constraint(c
, type
->constraints
) {
3300 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
3305 return &type
->unconstrainted
;
3308 static void uncore_put_event_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
3310 if (box
->pmu
->type
->ops
->put_constraint
)
3311 box
->pmu
->type
->ops
->put_constraint(box
, event
);
3314 static int uncore_assign_events(struct intel_uncore_box
*box
, int assign
[], int n
)
3316 unsigned long used_mask
[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX
)];
3317 struct event_constraint
*c
;
3318 int i
, wmin
, wmax
, ret
= 0;
3319 struct hw_perf_event
*hwc
;
3321 bitmap_zero(used_mask
, UNCORE_PMC_IDX_MAX
);
3323 for (i
= 0, wmin
= UNCORE_PMC_IDX_MAX
, wmax
= 0; i
< n
; i
++) {
3324 hwc
= &box
->event_list
[i
]->hw
;
3325 c
= uncore_get_event_constraint(box
, box
->event_list
[i
]);
3326 hwc
->constraint
= c
;
3327 wmin
= min(wmin
, c
->weight
);
3328 wmax
= max(wmax
, c
->weight
);
3331 /* fastpath, try to reuse previous register */
3332 for (i
= 0; i
< n
; i
++) {
3333 hwc
= &box
->event_list
[i
]->hw
;
3334 c
= hwc
->constraint
;
3336 /* never assigned */
3340 /* constraint still honored */
3341 if (!test_bit(hwc
->idx
, c
->idxmsk
))
3344 /* not already used */
3345 if (test_bit(hwc
->idx
, used_mask
))
3348 __set_bit(hwc
->idx
, used_mask
);
3350 assign
[i
] = hwc
->idx
;
3354 ret
= perf_assign_events(box
->event_list
, n
,
3355 wmin
, wmax
, assign
);
3357 if (!assign
|| ret
) {
3358 for (i
= 0; i
< n
; i
++)
3359 uncore_put_event_constraint(box
, box
->event_list
[i
]);
3361 return ret
? -EINVAL
: 0;
3364 static void uncore_pmu_event_start(struct perf_event
*event
, int flags
)
3366 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
3367 int idx
= event
->hw
.idx
;
3369 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
3372 if (WARN_ON_ONCE(idx
== -1 || idx
>= UNCORE_PMC_IDX_MAX
))
3375 event
->hw
.state
= 0;
3376 box
->events
[idx
] = event
;
3378 __set_bit(idx
, box
->active_mask
);
3380 local64_set(&event
->hw
.prev_count
, uncore_read_counter(box
, event
));
3381 uncore_enable_event(box
, event
);
3383 if (box
->n_active
== 1) {
3384 uncore_enable_box(box
);
3385 uncore_pmu_start_hrtimer(box
);
3389 static void uncore_pmu_event_stop(struct perf_event
*event
, int flags
)
3391 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
3392 struct hw_perf_event
*hwc
= &event
->hw
;
3394 if (__test_and_clear_bit(hwc
->idx
, box
->active_mask
)) {
3395 uncore_disable_event(box
, event
);
3397 box
->events
[hwc
->idx
] = NULL
;
3398 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
3399 hwc
->state
|= PERF_HES_STOPPED
;
3401 if (box
->n_active
== 0) {
3402 uncore_disable_box(box
);
3403 uncore_pmu_cancel_hrtimer(box
);
3407 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
3409 * Drain the remaining delta count out of a event
3410 * that we are disabling:
3412 uncore_perf_event_update(box
, event
);
3413 hwc
->state
|= PERF_HES_UPTODATE
;
3417 static int uncore_pmu_event_add(struct perf_event
*event
, int flags
)
3419 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
3420 struct hw_perf_event
*hwc
= &event
->hw
;
3421 int assign
[UNCORE_PMC_IDX_MAX
];
3427 ret
= n
= uncore_collect_events(box
, event
, false);
3431 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
3432 if (!(flags
& PERF_EF_START
))
3433 hwc
->state
|= PERF_HES_ARCH
;
3435 ret
= uncore_assign_events(box
, assign
, n
);
3439 /* save events moving to new counters */
3440 for (i
= 0; i
< box
->n_events
; i
++) {
3441 event
= box
->event_list
[i
];
3444 if (hwc
->idx
== assign
[i
] &&
3445 hwc
->last_tag
== box
->tags
[assign
[i
]])
3448 * Ensure we don't accidentally enable a stopped
3449 * counter simply because we rescheduled.
3451 if (hwc
->state
& PERF_HES_STOPPED
)
3452 hwc
->state
|= PERF_HES_ARCH
;
3454 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
3457 /* reprogram moved events into new counters */
3458 for (i
= 0; i
< n
; i
++) {
3459 event
= box
->event_list
[i
];
3462 if (hwc
->idx
!= assign
[i
] ||
3463 hwc
->last_tag
!= box
->tags
[assign
[i
]])
3464 uncore_assign_hw_event(box
, event
, assign
[i
]);
3465 else if (i
< box
->n_events
)
3468 if (hwc
->state
& PERF_HES_ARCH
)
3471 uncore_pmu_event_start(event
, 0);
3478 static void uncore_pmu_event_del(struct perf_event
*event
, int flags
)
3480 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
3483 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
3485 for (i
= 0; i
< box
->n_events
; i
++) {
3486 if (event
== box
->event_list
[i
]) {
3487 uncore_put_event_constraint(box
, event
);
3489 while (++i
< box
->n_events
)
3490 box
->event_list
[i
- 1] = box
->event_list
[i
];
3498 event
->hw
.last_tag
= ~0ULL;
3501 static void uncore_pmu_event_read(struct perf_event
*event
)
3503 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
3504 uncore_perf_event_update(box
, event
);
3508 * validation ensures the group can be loaded onto the
3509 * PMU if it was the only group available.
3511 static int uncore_validate_group(struct intel_uncore_pmu
*pmu
,
3512 struct perf_event
*event
)
3514 struct perf_event
*leader
= event
->group_leader
;
3515 struct intel_uncore_box
*fake_box
;
3516 int ret
= -EINVAL
, n
;
3518 fake_box
= uncore_alloc_box(pmu
->type
, NUMA_NO_NODE
);
3522 fake_box
->pmu
= pmu
;
3524 * the event is not yet connected with its
3525 * siblings therefore we must first collect
3526 * existing siblings, then add the new event
3527 * before we can simulate the scheduling
3529 n
= uncore_collect_events(fake_box
, leader
, true);
3533 fake_box
->n_events
= n
;
3534 n
= uncore_collect_events(fake_box
, event
, false);
3538 fake_box
->n_events
= n
;
3540 ret
= uncore_assign_events(fake_box
, NULL
, n
);
3546 static int uncore_pmu_event_init(struct perf_event
*event
)
3548 struct intel_uncore_pmu
*pmu
;
3549 struct intel_uncore_box
*box
;
3550 struct hw_perf_event
*hwc
= &event
->hw
;
3553 if (event
->attr
.type
!= event
->pmu
->type
)
3556 pmu
= uncore_event_to_pmu(event
);
3557 /* no device found for this pmu */
3558 if (pmu
->func_id
< 0)
3562 * Uncore PMU does measure at all privilege level all the time.
3563 * So it doesn't make sense to specify any exclude bits.
3565 if (event
->attr
.exclude_user
|| event
->attr
.exclude_kernel
||
3566 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
)
3569 /* Sampling not supported yet */
3570 if (hwc
->sample_period
)
3574 * Place all uncore events for a particular physical package
3579 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
3580 if (!box
|| box
->cpu
< 0)
3582 event
->cpu
= box
->cpu
;
3585 event
->hw
.last_tag
= ~0ULL;
3586 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
3587 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
3589 if (event
->attr
.config
== UNCORE_FIXED_EVENT
) {
3590 /* no fixed counter */
3591 if (!pmu
->type
->fixed_ctl
)
3594 * if there is only one fixed counter, only the first pmu
3595 * can access the fixed counter
3597 if (pmu
->type
->single_fixed
&& pmu
->pmu_idx
> 0)
3600 /* fixed counters have event field hardcoded to zero */
3603 hwc
->config
= event
->attr
.config
& pmu
->type
->event_mask
;
3604 if (pmu
->type
->ops
->hw_config
) {
3605 ret
= pmu
->type
->ops
->hw_config(box
, event
);
3611 if (event
->group_leader
!= event
)
3612 ret
= uncore_validate_group(pmu
, event
);
3619 static ssize_t
uncore_get_attr_cpumask(struct device
*dev
,
3620 struct device_attribute
*attr
, char *buf
)
3622 int n
= cpulist_scnprintf(buf
, PAGE_SIZE
- 2, &uncore_cpu_mask
);
3629 static DEVICE_ATTR(cpumask
, S_IRUGO
, uncore_get_attr_cpumask
, NULL
);
3631 static struct attribute
*uncore_pmu_attrs
[] = {
3632 &dev_attr_cpumask
.attr
,
3636 static struct attribute_group uncore_pmu_attr_group
= {
3637 .attrs
= uncore_pmu_attrs
,
3640 static int __init
uncore_pmu_register(struct intel_uncore_pmu
*pmu
)
3644 if (!pmu
->type
->pmu
) {
3645 pmu
->pmu
= (struct pmu
) {
3646 .attr_groups
= pmu
->type
->attr_groups
,
3647 .task_ctx_nr
= perf_invalid_context
,
3648 .event_init
= uncore_pmu_event_init
,
3649 .add
= uncore_pmu_event_add
,
3650 .del
= uncore_pmu_event_del
,
3651 .start
= uncore_pmu_event_start
,
3652 .stop
= uncore_pmu_event_stop
,
3653 .read
= uncore_pmu_event_read
,
3656 pmu
->pmu
= *pmu
->type
->pmu
;
3657 pmu
->pmu
.attr_groups
= pmu
->type
->attr_groups
;
3660 if (pmu
->type
->num_boxes
== 1) {
3661 if (strlen(pmu
->type
->name
) > 0)
3662 sprintf(pmu
->name
, "uncore_%s", pmu
->type
->name
);
3664 sprintf(pmu
->name
, "uncore");
3666 sprintf(pmu
->name
, "uncore_%s_%d", pmu
->type
->name
,
3670 ret
= perf_pmu_register(&pmu
->pmu
, pmu
->name
, -1);
3674 static void __init
uncore_type_exit(struct intel_uncore_type
*type
)
3678 for (i
= 0; i
< type
->num_boxes
; i
++)
3679 free_percpu(type
->pmus
[i
].box
);
3682 kfree(type
->events_group
);
3683 type
->events_group
= NULL
;
3686 static void __init
uncore_types_exit(struct intel_uncore_type
**types
)
3689 for (i
= 0; types
[i
]; i
++)
3690 uncore_type_exit(types
[i
]);
3693 static int __init
uncore_type_init(struct intel_uncore_type
*type
)
3695 struct intel_uncore_pmu
*pmus
;
3696 struct attribute_group
*attr_group
;
3697 struct attribute
**attrs
;
3700 pmus
= kzalloc(sizeof(*pmus
) * type
->num_boxes
, GFP_KERNEL
);
3706 type
->unconstrainted
= (struct event_constraint
)
3707 __EVENT_CONSTRAINT(0, (1ULL << type
->num_counters
) - 1,
3708 0, type
->num_counters
, 0, 0);
3710 for (i
= 0; i
< type
->num_boxes
; i
++) {
3711 pmus
[i
].func_id
= -1;
3712 pmus
[i
].pmu_idx
= i
;
3713 pmus
[i
].type
= type
;
3714 INIT_LIST_HEAD(&pmus
[i
].box_list
);
3715 pmus
[i
].box
= alloc_percpu(struct intel_uncore_box
*);
3720 if (type
->event_descs
) {
3722 while (type
->event_descs
[i
].attr
.attr
.name
)
3725 attr_group
= kzalloc(sizeof(struct attribute
*) * (i
+ 1) +
3726 sizeof(*attr_group
), GFP_KERNEL
);
3730 attrs
= (struct attribute
**)(attr_group
+ 1);
3731 attr_group
->name
= "events";
3732 attr_group
->attrs
= attrs
;
3734 for (j
= 0; j
< i
; j
++)
3735 attrs
[j
] = &type
->event_descs
[j
].attr
.attr
;
3737 type
->events_group
= attr_group
;
3740 type
->pmu_group
= &uncore_pmu_attr_group
;
3743 uncore_type_exit(type
);
3747 static int __init
uncore_types_init(struct intel_uncore_type
**types
)
3751 for (i
= 0; types
[i
]; i
++) {
3752 ret
= uncore_type_init(types
[i
]);
3759 uncore_type_exit(types
[i
]);
3763 static struct pci_driver
*uncore_pci_driver
;
3764 static bool pcidrv_registered
;
3767 * add a pci uncore device
3769 static int uncore_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3771 struct intel_uncore_pmu
*pmu
;
3772 struct intel_uncore_box
*box
;
3773 struct intel_uncore_type
*type
;
3776 phys_id
= pcibus_to_physid
[pdev
->bus
->number
];
3780 if (UNCORE_PCI_DEV_TYPE(id
->driver_data
) == UNCORE_EXTRA_PCI_DEV
) {
3781 extra_pci_dev
[phys_id
][UNCORE_PCI_DEV_IDX(id
->driver_data
)] = pdev
;
3782 pci_set_drvdata(pdev
, NULL
);
3786 type
= pci_uncores
[UNCORE_PCI_DEV_TYPE(id
->driver_data
)];
3787 box
= uncore_alloc_box(type
, NUMA_NO_NODE
);
3792 * for performance monitoring unit with multiple boxes,
3793 * each box has a different function id.
3795 pmu
= &type
->pmus
[UNCORE_PCI_DEV_IDX(id
->driver_data
)];
3796 if (pmu
->func_id
< 0)
3797 pmu
->func_id
= pdev
->devfn
;
3799 WARN_ON_ONCE(pmu
->func_id
!= pdev
->devfn
);
3801 box
->phys_id
= phys_id
;
3802 box
->pci_dev
= pdev
;
3804 uncore_box_init(box
);
3805 pci_set_drvdata(pdev
, box
);
3807 raw_spin_lock(&uncore_box_lock
);
3808 list_add_tail(&box
->list
, &pmu
->box_list
);
3809 raw_spin_unlock(&uncore_box_lock
);
3814 static void uncore_pci_remove(struct pci_dev
*pdev
)
3816 struct intel_uncore_box
*box
= pci_get_drvdata(pdev
);
3817 struct intel_uncore_pmu
*pmu
;
3818 int i
, cpu
, phys_id
= pcibus_to_physid
[pdev
->bus
->number
];
3820 box
= pci_get_drvdata(pdev
);
3822 for (i
= 0; i
< UNCORE_EXTRA_PCI_DEV_MAX
; i
++) {
3823 if (extra_pci_dev
[phys_id
][i
] == pdev
) {
3824 extra_pci_dev
[phys_id
][i
] = NULL
;
3828 WARN_ON_ONCE(i
>= UNCORE_EXTRA_PCI_DEV_MAX
);
3833 if (WARN_ON_ONCE(phys_id
!= box
->phys_id
))
3836 pci_set_drvdata(pdev
, NULL
);
3838 raw_spin_lock(&uncore_box_lock
);
3839 list_del(&box
->list
);
3840 raw_spin_unlock(&uncore_box_lock
);
3842 for_each_possible_cpu(cpu
) {
3843 if (*per_cpu_ptr(pmu
->box
, cpu
) == box
) {
3844 *per_cpu_ptr(pmu
->box
, cpu
) = NULL
;
3845 atomic_dec(&box
->refcnt
);
3849 WARN_ON_ONCE(atomic_read(&box
->refcnt
) != 1);
3853 static int __init
uncore_pci_init(void)
3857 switch (boot_cpu_data
.x86_model
) {
3858 case 45: /* Sandy Bridge-EP */
3859 ret
= snbep_pci2phy_map_init(0x3ce0);
3862 pci_uncores
= snbep_pci_uncores
;
3863 uncore_pci_driver
= &snbep_uncore_pci_driver
;
3865 case 62: /* IvyTown */
3866 ret
= snbep_pci2phy_map_init(0x0e1e);
3869 pci_uncores
= ivt_pci_uncores
;
3870 uncore_pci_driver
= &ivt_uncore_pci_driver
;
3872 case 42: /* Sandy Bridge */
3873 ret
= snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC
);
3876 pci_uncores
= snb_pci_uncores
;
3877 uncore_pci_driver
= &snb_uncore_pci_driver
;
3879 case 58: /* Ivy Bridge */
3880 ret
= snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC
);
3883 pci_uncores
= snb_pci_uncores
;
3884 uncore_pci_driver
= &ivb_uncore_pci_driver
;
3886 case 60: /* Haswell */
3887 case 69: /* Haswell Celeron */
3888 ret
= snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC
);
3891 pci_uncores
= snb_pci_uncores
;
3892 uncore_pci_driver
= &hsw_uncore_pci_driver
;
3898 ret
= uncore_types_init(pci_uncores
);
3902 uncore_pci_driver
->probe
= uncore_pci_probe
;
3903 uncore_pci_driver
->remove
= uncore_pci_remove
;
3905 ret
= pci_register_driver(uncore_pci_driver
);
3907 pcidrv_registered
= true;
3909 uncore_types_exit(pci_uncores
);
3914 static void __init
uncore_pci_exit(void)
3916 if (pcidrv_registered
) {
3917 pcidrv_registered
= false;
3918 pci_unregister_driver(uncore_pci_driver
);
3919 uncore_types_exit(pci_uncores
);
3923 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3924 static LIST_HEAD(boxes_to_free
);
3926 static void uncore_kfree_boxes(void)
3928 struct intel_uncore_box
*box
;
3930 while (!list_empty(&boxes_to_free
)) {
3931 box
= list_entry(boxes_to_free
.next
,
3932 struct intel_uncore_box
, list
);
3933 list_del(&box
->list
);
3938 static void uncore_cpu_dying(int cpu
)
3940 struct intel_uncore_type
*type
;
3941 struct intel_uncore_pmu
*pmu
;
3942 struct intel_uncore_box
*box
;
3945 for (i
= 0; msr_uncores
[i
]; i
++) {
3946 type
= msr_uncores
[i
];
3947 for (j
= 0; j
< type
->num_boxes
; j
++) {
3948 pmu
= &type
->pmus
[j
];
3949 box
= *per_cpu_ptr(pmu
->box
, cpu
);
3950 *per_cpu_ptr(pmu
->box
, cpu
) = NULL
;
3951 if (box
&& atomic_dec_and_test(&box
->refcnt
))
3952 list_add(&box
->list
, &boxes_to_free
);
3957 static int uncore_cpu_starting(int cpu
)
3959 struct intel_uncore_type
*type
;
3960 struct intel_uncore_pmu
*pmu
;
3961 struct intel_uncore_box
*box
, *exist
;
3962 int i
, j
, k
, phys_id
;
3964 phys_id
= topology_physical_package_id(cpu
);
3966 for (i
= 0; msr_uncores
[i
]; i
++) {
3967 type
= msr_uncores
[i
];
3968 for (j
= 0; j
< type
->num_boxes
; j
++) {
3969 pmu
= &type
->pmus
[j
];
3970 box
= *per_cpu_ptr(pmu
->box
, cpu
);
3971 /* called by uncore_cpu_init? */
3972 if (box
&& box
->phys_id
>= 0) {
3973 uncore_box_init(box
);
3977 for_each_online_cpu(k
) {
3978 exist
= *per_cpu_ptr(pmu
->box
, k
);
3979 if (exist
&& exist
->phys_id
== phys_id
) {
3980 atomic_inc(&exist
->refcnt
);
3981 *per_cpu_ptr(pmu
->box
, cpu
) = exist
;
3983 list_add(&box
->list
,
3992 box
->phys_id
= phys_id
;
3993 uncore_box_init(box
);
4000 static int uncore_cpu_prepare(int cpu
, int phys_id
)
4002 struct intel_uncore_type
*type
;
4003 struct intel_uncore_pmu
*pmu
;
4004 struct intel_uncore_box
*box
;
4007 for (i
= 0; msr_uncores
[i
]; i
++) {
4008 type
= msr_uncores
[i
];
4009 for (j
= 0; j
< type
->num_boxes
; j
++) {
4010 pmu
= &type
->pmus
[j
];
4011 if (pmu
->func_id
< 0)
4014 box
= uncore_alloc_box(type
, cpu_to_node(cpu
));
4019 box
->phys_id
= phys_id
;
4020 *per_cpu_ptr(pmu
->box
, cpu
) = box
;
4027 uncore_change_context(struct intel_uncore_type
**uncores
, int old_cpu
, int new_cpu
)
4029 struct intel_uncore_type
*type
;
4030 struct intel_uncore_pmu
*pmu
;
4031 struct intel_uncore_box
*box
;
4034 for (i
= 0; uncores
[i
]; i
++) {
4036 for (j
= 0; j
< type
->num_boxes
; j
++) {
4037 pmu
= &type
->pmus
[j
];
4039 box
= uncore_pmu_to_box(pmu
, new_cpu
);
4041 box
= uncore_pmu_to_box(pmu
, old_cpu
);
4046 WARN_ON_ONCE(box
->cpu
!= -1);
4051 WARN_ON_ONCE(box
->cpu
!= old_cpu
);
4053 uncore_pmu_cancel_hrtimer(box
);
4054 perf_pmu_migrate_context(&pmu
->pmu
,
4064 static void uncore_event_exit_cpu(int cpu
)
4066 int i
, phys_id
, target
;
4068 /* if exiting cpu is used for collecting uncore events */
4069 if (!cpumask_test_and_clear_cpu(cpu
, &uncore_cpu_mask
))
4072 /* find a new cpu to collect uncore events */
4073 phys_id
= topology_physical_package_id(cpu
);
4075 for_each_online_cpu(i
) {
4078 if (phys_id
== topology_physical_package_id(i
)) {
4084 /* migrate uncore events to the new cpu */
4086 cpumask_set_cpu(target
, &uncore_cpu_mask
);
4088 uncore_change_context(msr_uncores
, cpu
, target
);
4089 uncore_change_context(pci_uncores
, cpu
, target
);
4092 static void uncore_event_init_cpu(int cpu
)
4096 phys_id
= topology_physical_package_id(cpu
);
4097 for_each_cpu(i
, &uncore_cpu_mask
) {
4098 if (phys_id
== topology_physical_package_id(i
))
4102 cpumask_set_cpu(cpu
, &uncore_cpu_mask
);
4104 uncore_change_context(msr_uncores
, -1, cpu
);
4105 uncore_change_context(pci_uncores
, -1, cpu
);
4108 static int uncore_cpu_notifier(struct notifier_block
*self
,
4109 unsigned long action
, void *hcpu
)
4111 unsigned int cpu
= (long)hcpu
;
4113 /* allocate/free data structure for uncore box */
4114 switch (action
& ~CPU_TASKS_FROZEN
) {
4115 case CPU_UP_PREPARE
:
4116 uncore_cpu_prepare(cpu
, -1);
4119 uncore_cpu_starting(cpu
);
4121 case CPU_UP_CANCELED
:
4123 uncore_cpu_dying(cpu
);
4127 uncore_kfree_boxes();
4133 /* select the cpu that collects uncore events */
4134 switch (action
& ~CPU_TASKS_FROZEN
) {
4135 case CPU_DOWN_FAILED
:
4137 uncore_event_init_cpu(cpu
);
4139 case CPU_DOWN_PREPARE
:
4140 uncore_event_exit_cpu(cpu
);
4149 static struct notifier_block uncore_cpu_nb
= {
4150 .notifier_call
= uncore_cpu_notifier
,
4152 * to migrate uncore events, our notifier should be executed
4153 * before perf core's notifier.
4155 .priority
= CPU_PRI_PERF
+ 1,
4158 static void __init
uncore_cpu_setup(void *dummy
)
4160 uncore_cpu_starting(smp_processor_id());
4163 static int __init
uncore_cpu_init(void)
4167 max_cores
= boot_cpu_data
.x86_max_cores
;
4168 switch (boot_cpu_data
.x86_model
) {
4169 case 26: /* Nehalem */
4171 case 37: /* Westmere */
4173 msr_uncores
= nhm_msr_uncores
;
4175 case 42: /* Sandy Bridge */
4176 case 58: /* Ivy Bridge */
4177 if (snb_uncore_cbox
.num_boxes
> max_cores
)
4178 snb_uncore_cbox
.num_boxes
= max_cores
;
4179 msr_uncores
= snb_msr_uncores
;
4181 case 45: /* Sandy Bridge-EP */
4182 if (snbep_uncore_cbox
.num_boxes
> max_cores
)
4183 snbep_uncore_cbox
.num_boxes
= max_cores
;
4184 msr_uncores
= snbep_msr_uncores
;
4186 case 46: /* Nehalem-EX */
4187 uncore_nhmex
= true;
4188 case 47: /* Westmere-EX aka. Xeon E7 */
4190 nhmex_uncore_mbox
.event_descs
= wsmex_uncore_mbox_events
;
4191 if (nhmex_uncore_cbox
.num_boxes
> max_cores
)
4192 nhmex_uncore_cbox
.num_boxes
= max_cores
;
4193 msr_uncores
= nhmex_msr_uncores
;
4195 case 62: /* IvyTown */
4196 if (ivt_uncore_cbox
.num_boxes
> max_cores
)
4197 ivt_uncore_cbox
.num_boxes
= max_cores
;
4198 msr_uncores
= ivt_msr_uncores
;
4205 ret
= uncore_types_init(msr_uncores
);
4212 static int __init
uncore_pmus_register(void)
4214 struct intel_uncore_pmu
*pmu
;
4215 struct intel_uncore_type
*type
;
4218 for (i
= 0; msr_uncores
[i
]; i
++) {
4219 type
= msr_uncores
[i
];
4220 for (j
= 0; j
< type
->num_boxes
; j
++) {
4221 pmu
= &type
->pmus
[j
];
4222 uncore_pmu_register(pmu
);
4226 for (i
= 0; pci_uncores
[i
]; i
++) {
4227 type
= pci_uncores
[i
];
4228 for (j
= 0; j
< type
->num_boxes
; j
++) {
4229 pmu
= &type
->pmus
[j
];
4230 uncore_pmu_register(pmu
);
4237 static void __init
uncore_cpumask_init(void)
4242 * ony invoke once from msr or pci init code
4244 if (!cpumask_empty(&uncore_cpu_mask
))
4247 cpu_notifier_register_begin();
4249 for_each_online_cpu(cpu
) {
4250 int i
, phys_id
= topology_physical_package_id(cpu
);
4252 for_each_cpu(i
, &uncore_cpu_mask
) {
4253 if (phys_id
== topology_physical_package_id(i
)) {
4261 uncore_cpu_prepare(cpu
, phys_id
);
4262 uncore_event_init_cpu(cpu
);
4264 on_each_cpu(uncore_cpu_setup
, NULL
, 1);
4266 __register_cpu_notifier(&uncore_cpu_nb
);
4268 cpu_notifier_register_done();
4272 static int __init
intel_uncore_init(void)
4276 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
4279 if (cpu_has_hypervisor
)
4282 ret
= uncore_pci_init();
4285 ret
= uncore_cpu_init();
4290 uncore_cpumask_init();
4292 uncore_pmus_register();
4297 device_initcall(intel_uncore_init
);