perf/x86: Make bitfield unsigned
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.h
CommitLineData
087bfbb0
YZ
1#include <linux/module.h>
2#include <linux/slab.h>
14371cce 3#include <linux/pci.h>
087bfbb0
YZ
4#include <linux/perf_event.h>
5#include "perf_event.h"
6
7#define UNCORE_PMU_NAME_LEN 32
087bfbb0
YZ
8#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
9
eca26c99 10#define UNCORE_FIXED_EVENT 0xff
087bfbb0
YZ
11#define UNCORE_PMC_IDX_MAX_GENERIC 8
12#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
13#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
14
15#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
16
fcde10e9
YZ
17/* SNB event control */
18#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
19#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
20#define SNB_UNC_CTL_EDGE_DET (1 << 18)
21#define SNB_UNC_CTL_EN (1 << 22)
22#define SNB_UNC_CTL_INVERT (1 << 23)
23#define SNB_UNC_CTL_CMASK_MASK 0x1f000000
24#define NHM_UNC_CTL_CMASK_MASK 0xff000000
25#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
26
27#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
28 SNB_UNC_CTL_UMASK_MASK | \
29 SNB_UNC_CTL_EDGE_DET | \
30 SNB_UNC_CTL_INVERT | \
31 SNB_UNC_CTL_CMASK_MASK)
32
33#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
34 SNB_UNC_CTL_UMASK_MASK | \
35 SNB_UNC_CTL_EDGE_DET | \
36 SNB_UNC_CTL_INVERT | \
37 NHM_UNC_CTL_CMASK_MASK)
38
39/* SNB global control register */
40#define SNB_UNC_PERF_GLOBAL_CTL 0x391
41#define SNB_UNC_FIXED_CTR_CTRL 0x394
42#define SNB_UNC_FIXED_CTR 0x395
43
44/* SNB uncore global control */
45#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
46#define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
47
48/* SNB Cbo register */
49#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
50#define SNB_UNC_CBO_0_PER_CTR0 0x706
51#define SNB_UNC_CBO_MSR_OFFSET 0x10
52
53/* NHM global control register */
54#define NHM_UNC_PERF_GLOBAL_CTL 0x391
55#define NHM_UNC_FIXED_CTR 0x394
56#define NHM_UNC_FIXED_CTR_CTRL 0x395
57
58/* NHM uncore global control */
59#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
60#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
61
62/* NHM uncore register */
63#define NHM_UNC_PERFEVTSEL0 0x3c0
64#define NHM_UNC_UNCORE_PMC0 0x3b0
65
7c94ee2e
YZ
66/* SNB-EP Box level control */
67#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
68#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
69#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
70#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
71#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
72 SNBEP_PMON_BOX_CTL_RST_CTRS | \
73 SNBEP_PMON_BOX_CTL_FRZ_EN)
74/* SNB-EP event control */
75#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
76#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
77#define SNBEP_PMON_CTL_RST (1 << 17)
78#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
79#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */
80#define SNBEP_PMON_CTL_EN (1 << 22)
81#define SNBEP_PMON_CTL_INVERT (1 << 23)
82#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
83#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
84 SNBEP_PMON_CTL_UMASK_MASK | \
85 SNBEP_PMON_CTL_EDGE_DET | \
86 SNBEP_PMON_CTL_INVERT | \
87 SNBEP_PMON_CTL_TRESH_MASK)
88
89/* SNB-EP Ubox event control */
90#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
91#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
92 (SNBEP_PMON_CTL_EV_SEL_MASK | \
93 SNBEP_PMON_CTL_UMASK_MASK | \
94 SNBEP_PMON_CTL_EDGE_DET | \
95 SNBEP_PMON_CTL_INVERT | \
96 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
97
6a67943a
YZ
98#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
99#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
100 SNBEP_CBO_PMON_CTL_TID_EN)
101
7c94ee2e
YZ
102/* SNB-EP PCU event control */
103#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
104#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
105#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
106#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
107#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
108 (SNBEP_PMON_CTL_EV_SEL_MASK | \
109 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
110 SNBEP_PMON_CTL_EDGE_DET | \
111 SNBEP_PMON_CTL_INVERT | \
112 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
113 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
114 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
115
116/* SNB-EP pci control register */
117#define SNBEP_PCI_PMON_BOX_CTL 0xf4
118#define SNBEP_PCI_PMON_CTL0 0xd8
119/* SNB-EP pci counter register */
120#define SNBEP_PCI_PMON_CTR0 0xa0
121
122/* SNB-EP home agent register */
123#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
124#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
125#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
126/* SNB-EP memory controller register */
127#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
128#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
129/* SNB-EP QPI register */
130#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
131#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
132#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
133#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
134
135/* SNB-EP Ubox register */
136#define SNBEP_U_MSR_PMON_CTR0 0xc16
137#define SNBEP_U_MSR_PMON_CTL0 0xc10
138
139#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
140#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
141
142/* SNB-EP Cbo register */
143#define SNBEP_C0_MSR_PMON_CTR0 0xd16
144#define SNBEP_C0_MSR_PMON_CTL0 0xd10
7c94ee2e 145#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
6a67943a
YZ
146#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
147#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
7c94ee2e
YZ
148#define SNBEP_CBO_MSR_OFFSET 0x20
149
150/* SNB-EP PCU register */
151#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
152#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
7c94ee2e 153#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
6a67943a
YZ
154#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
155#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
7c94ee2e
YZ
156#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
157#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
fcde10e9 158
254298c7
YZ
159/* NHM-EX event control */
160#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
161#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
162#define NHMEX_PMON_CTL_EN_BIT0 (1 << 0)
163#define NHMEX_PMON_CTL_EDGE_DET (1 << 18)
164#define NHMEX_PMON_CTL_PMI_EN (1 << 20)
165#define NHMEX_PMON_CTL_EN_BIT22 (1 << 22)
166#define NHMEX_PMON_CTL_INVERT (1 << 23)
167#define NHMEX_PMON_CTL_TRESH_MASK 0xff000000
168#define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \
169 NHMEX_PMON_CTL_UMASK_MASK | \
170 NHMEX_PMON_CTL_EDGE_DET | \
171 NHMEX_PMON_CTL_INVERT | \
172 NHMEX_PMON_CTL_TRESH_MASK)
173
174/* NHM-EX Ubox */
175#define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00
176#define NHMEX_U_MSR_PMON_CTR 0xc11
177#define NHMEX_U_MSR_PMON_EV_SEL 0xc10
178
179#define NHMEX_U_PMON_GLOBAL_EN (1 << 0)
180#define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e
181#define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28)
182#define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29)
183#define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
184
185#define NHMEX_U_PMON_RAW_EVENT_MASK \
186 (NHMEX_PMON_CTL_EV_SEL_MASK | \
187 NHMEX_PMON_CTL_EDGE_DET)
188
189/* NHM-EX Cbox */
190#define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00
191#define NHMEX_C0_MSR_PMON_CTR0 0xd11
192#define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10
193#define NHMEX_C_MSR_OFFSET 0x20
194
195/* NHM-EX Bbox */
196#define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20
197#define NHMEX_B0_MSR_PMON_CTR0 0xc31
198#define NHMEX_B0_MSR_PMON_CTL0 0xc30
199#define NHMEX_B_MSR_OFFSET 0x40
200#define NHMEX_B0_MSR_MATCH 0xe45
201#define NHMEX_B0_MSR_MASK 0xe46
202#define NHMEX_B1_MSR_MATCH 0xe4d
203#define NHMEX_B1_MSR_MASK 0xe4e
204
205#define NHMEX_B_PMON_CTL_EN (1 << 0)
206#define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1
207#define NHMEX_B_PMON_CTL_EV_SEL_MASK \
208 (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
209#define NHMEX_B_PMON_CTR_SHIFT 6
210#define NHMEX_B_PMON_CTR_MASK \
211 (0x3 << NHMEX_B_PMON_CTR_SHIFT)
212#define NHMEX_B_PMON_RAW_EVENT_MASK \
213 (NHMEX_B_PMON_CTL_EV_SEL_MASK | \
214 NHMEX_B_PMON_CTR_MASK)
215
216/* NHM-EX Sbox */
217#define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40
218#define NHMEX_S0_MSR_PMON_CTR0 0xc51
219#define NHMEX_S0_MSR_PMON_CTL0 0xc50
220#define NHMEX_S_MSR_OFFSET 0x80
221#define NHMEX_S0_MSR_MM_CFG 0xe48
222#define NHMEX_S0_MSR_MATCH 0xe49
223#define NHMEX_S0_MSR_MASK 0xe4a
224#define NHMEX_S1_MSR_MM_CFG 0xe58
225#define NHMEX_S1_MSR_MATCH 0xe59
226#define NHMEX_S1_MSR_MASK 0xe5a
227
228#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
229
230/* NHM-EX Mbox */
231#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
232#define NHMEX_M0_MSR_PMU_DSP 0xca5
233#define NHMEX_M0_MSR_PMU_ISS 0xca6
234#define NHMEX_M0_MSR_PMU_MAP 0xca7
235#define NHMEX_M0_MSR_PMU_MSC_THR 0xca8
236#define NHMEX_M0_MSR_PMU_PGT 0xca9
237#define NHMEX_M0_MSR_PMU_PLD 0xcaa
238#define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab
239#define NHMEX_M0_MSR_PMU_CTL0 0xcb0
240#define NHMEX_M0_MSR_PMU_CNT0 0xcb1
241#define NHMEX_M_MSR_OFFSET 0x40
242#define NHMEX_M0_MSR_PMU_MM_CFG 0xe54
243#define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c
244
245#define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63)
246#define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL
247#define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL
248#define NHMEX_M_PMON_ADDR_MASK_SHIFT 34
249
250#define NHMEX_M_PMON_CTL_EN (1 << 0)
251#define NHMEX_M_PMON_CTL_PMI_EN (1 << 1)
252#define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2
253#define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \
254 (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
255#define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4
256#define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \
257 (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
258#define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6)
259#define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7)
260#define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9
261#define NHMEX_M_PMON_CTL_INC_SEL_MASK \
262 (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
263#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19
264#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \
265 (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
266#define NHMEX_M_PMON_RAW_EVENT_MASK \
267 (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \
268 NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \
269 NHMEX_M_PMON_CTL_WRAP_MODE | \
270 NHMEX_M_PMON_CTL_FLAG_MODE | \
271 NHMEX_M_PMON_CTL_INC_SEL_MASK | \
272 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
273
274
275#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f
276#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5)
277#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8)
278#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23)
279#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \
280 (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \
281 NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \
282 NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \
283 NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR)
284#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n)))
285
286/*
287 * use the 9~13 bits to select event If the 7th bit is not set,
288 * otherwise use the 19~21 bits to select event.
289 */
290#define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
291#define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
292 NHMEX_M_PMON_CTL_FLAG_MODE)
293#define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
294 NHMEX_M_PMON_CTL_FLAG_MODE)
295#define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
296 NHMEX_M_PMON_CTL_FLAG_MODE)
297#define MBOX_INC_SEL_EXTAR_REG(c, r) \
298 EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
299 MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
300#define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
301 EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
302 MBOX_SET_FLAG_SEL_MASK, \
303 (u64)-1, NHMEX_M_##r)
304
305/* NHM-EX Rbox */
306#define NHMEX_R_MSR_GLOBAL_CTL 0xe00
307#define NHMEX_R_MSR_PMON_CTL0 0xe10
308#define NHMEX_R_MSR_PMON_CNT0 0xe11
309#define NHMEX_R_MSR_OFFSET 0x20
310
311#define NHMEX_R_MSR_PORTN_QLX_CFG(n) \
312 ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
313#define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n))
314#define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n))
315#define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \
316 (((n) < 4 ? 0 : 0x10) + (n) * 4)
317#define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \
318 (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
319#define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \
320 (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
321#define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \
322 (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
323#define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \
324 (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
325#define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \
326 (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
327#define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \
328 (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
329
330#define NHMEX_R_PMON_CTL_EN (1 << 0)
331#define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1
332#define NHMEX_R_PMON_CTL_EV_SEL_MASK \
333 (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
334#define NHMEX_R_PMON_CTL_PMI_EN (1 << 6)
335#define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK
336
337/* NHM-EX Wbox */
338#define NHMEX_W_MSR_GLOBAL_CTL 0xc80
339#define NHMEX_W_MSR_PMON_CNT0 0xc90
340#define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91
341#define NHMEX_W_MSR_PMON_FIXED_CTR 0x394
342#define NHMEX_W_MSR_PMON_FIXED_CTL 0x395
343
344#define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31)
345
087bfbb0
YZ
346struct intel_uncore_ops;
347struct intel_uncore_pmu;
348struct intel_uncore_box;
349struct uncore_event_desc;
350
351struct intel_uncore_type {
352 const char *name;
353 int num_counters;
354 int num_boxes;
355 int perf_ctr_bits;
356 int fixed_ctr_bits;
087bfbb0
YZ
357 unsigned perf_ctr;
358 unsigned event_ctl;
359 unsigned event_mask;
360 unsigned fixed_ctr;
361 unsigned fixed_ctl;
362 unsigned box_ctl;
363 unsigned msr_offset;
6a67943a
YZ
364 unsigned num_shared_regs:8;
365 unsigned single_fixed:1;
254298c7 366 unsigned pair_ctr_ctl:1;
087bfbb0
YZ
367 struct event_constraint unconstrainted;
368 struct event_constraint *constraints;
369 struct intel_uncore_pmu *pmus;
370 struct intel_uncore_ops *ops;
371 struct uncore_event_desc *event_descs;
372 const struct attribute_group *attr_groups[3];
373};
374
375#define format_group attr_groups[0]
376
377struct intel_uncore_ops {
378 void (*init_box)(struct intel_uncore_box *);
379 void (*disable_box)(struct intel_uncore_box *);
380 void (*enable_box)(struct intel_uncore_box *);
381 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
382 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
383 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
6a67943a
YZ
384 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
385 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
386 struct perf_event *);
387 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
087bfbb0
YZ
388};
389
390struct intel_uncore_pmu {
391 struct pmu pmu;
392 char name[UNCORE_PMU_NAME_LEN];
393 int pmu_idx;
394 int func_id;
395 struct intel_uncore_type *type;
396 struct intel_uncore_box ** __percpu box;
14371cce 397 struct list_head box_list;
087bfbb0
YZ
398};
399
6a67943a
YZ
400struct intel_uncore_extra_reg {
401 raw_spinlock_t lock;
254298c7 402 u64 config, config1, config2;
6a67943a
YZ
403 atomic_t ref;
404};
405
087bfbb0
YZ
406struct intel_uncore_box {
407 int phys_id;
408 int n_active; /* number of active events */
409 int n_events;
410 int cpu; /* cpu to collect events */
411 unsigned long flags;
412 atomic_t refcnt;
413 struct perf_event *events[UNCORE_PMC_IDX_MAX];
414 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
415 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
416 u64 tags[UNCORE_PMC_IDX_MAX];
14371cce 417 struct pci_dev *pci_dev;
087bfbb0
YZ
418 struct intel_uncore_pmu *pmu;
419 struct hrtimer hrtimer;
420 struct list_head list;
6a67943a 421 struct intel_uncore_extra_reg shared_regs[0];
087bfbb0
YZ
422};
423
424#define UNCORE_BOX_FLAG_INITIATED 0
425
426struct uncore_event_desc {
427 struct kobj_attribute attr;
428 const char *config;
429};
430
431#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
432{ \
433 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
434 .config = _config, \
435}
436
437#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
438static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
439 struct kobj_attribute *attr, \
440 char *page) \
441{ \
442 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
443 return sprintf(page, _format "\n"); \
444} \
445static struct kobj_attribute format_attr_##_var = \
446 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
447
448
449static ssize_t uncore_event_show(struct kobject *kobj,
450 struct kobj_attribute *attr, char *buf)
451{
452 struct uncore_event_desc *event =
453 container_of(attr, struct uncore_event_desc, attr);
454 return sprintf(buf, "%s", event->config);
455}
456
14371cce
YZ
457static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
458{
459 return box->pmu->type->box_ctl;
460}
461
462static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
463{
464 return box->pmu->type->fixed_ctl;
465}
466
467static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
468{
469 return box->pmu->type->fixed_ctr;
470}
471
472static inline
473unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
474{
475 return idx * 4 + box->pmu->type->event_ctl;
476}
477
478static inline
479unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
480{
481 return idx * 8 + box->pmu->type->perf_ctr;
482}
483
087bfbb0
YZ
484static inline
485unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
486{
487 if (!box->pmu->type->box_ctl)
488 return 0;
489 return box->pmu->type->box_ctl +
490 box->pmu->type->msr_offset * box->pmu->pmu_idx;
491}
492
493static inline
494unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
495{
496 if (!box->pmu->type->fixed_ctl)
497 return 0;
498 return box->pmu->type->fixed_ctl +
499 box->pmu->type->msr_offset * box->pmu->pmu_idx;
500}
501
502static inline
503unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
504{
505 return box->pmu->type->fixed_ctr +
506 box->pmu->type->msr_offset * box->pmu->pmu_idx;
507}
508
509static inline
510unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
511{
254298c7
YZ
512 return box->pmu->type->event_ctl +
513 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
087bfbb0
YZ
514 box->pmu->type->msr_offset * box->pmu->pmu_idx;
515}
516
517static inline
518unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
519{
254298c7
YZ
520 return box->pmu->type->perf_ctr +
521 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
087bfbb0
YZ
522 box->pmu->type->msr_offset * box->pmu->pmu_idx;
523}
524
14371cce
YZ
525static inline
526unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
527{
528 if (box->pci_dev)
529 return uncore_pci_fixed_ctl(box);
530 else
531 return uncore_msr_fixed_ctl(box);
532}
533
534static inline
535unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
536{
537 if (box->pci_dev)
538 return uncore_pci_fixed_ctr(box);
539 else
540 return uncore_msr_fixed_ctr(box);
541}
542
543static inline
544unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
545{
546 if (box->pci_dev)
547 return uncore_pci_event_ctl(box, idx);
548 else
549 return uncore_msr_event_ctl(box, idx);
550}
551
552static inline
553unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
554{
555 if (box->pci_dev)
556 return uncore_pci_perf_ctr(box, idx);
557 else
558 return uncore_msr_perf_ctr(box, idx);
559}
560
087bfbb0
YZ
561static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
562{
563 return box->pmu->type->perf_ctr_bits;
564}
565
566static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
567{
568 return box->pmu->type->fixed_ctr_bits;
569}
570
571static inline int uncore_num_counters(struct intel_uncore_box *box)
572{
573 return box->pmu->type->num_counters;
574}
575
576static inline void uncore_disable_box(struct intel_uncore_box *box)
577{
578 if (box->pmu->type->ops->disable_box)
579 box->pmu->type->ops->disable_box(box);
580}
581
582static inline void uncore_enable_box(struct intel_uncore_box *box)
583{
584 if (box->pmu->type->ops->enable_box)
585 box->pmu->type->ops->enable_box(box);
586}
587
588static inline void uncore_disable_event(struct intel_uncore_box *box,
589 struct perf_event *event)
590{
591 box->pmu->type->ops->disable_event(box, event);
592}
593
594static inline void uncore_enable_event(struct intel_uncore_box *box,
595 struct perf_event *event)
596{
597 box->pmu->type->ops->enable_event(box, event);
598}
599
600static inline u64 uncore_read_counter(struct intel_uncore_box *box,
601 struct perf_event *event)
602{
603 return box->pmu->type->ops->read_counter(box, event);
604}
605
606static inline void uncore_box_init(struct intel_uncore_box *box)
607{
608 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
609 if (box->pmu->type->ops->init_box)
610 box->pmu->type->ops->init_box(box);
611 }
612}
254298c7
YZ
613
614static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
615{
616 return (box->phys_id < 0);
617}
This page took 0.053081 seconds and 5 git commands to generate.