1 /* SandyBridge-EP/IvyTown uncore support */
4 /* SNB-EP Box level control */
5 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
6 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
7 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
8 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
9 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
10 SNBEP_PMON_BOX_CTL_RST_CTRS | \
11 SNBEP_PMON_BOX_CTL_FRZ_EN)
12 /* SNB-EP event control */
13 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
14 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
15 #define SNBEP_PMON_CTL_RST (1 << 17)
16 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
17 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
18 #define SNBEP_PMON_CTL_EN (1 << 22)
19 #define SNBEP_PMON_CTL_INVERT (1 << 23)
20 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
21 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
22 SNBEP_PMON_CTL_UMASK_MASK | \
23 SNBEP_PMON_CTL_EDGE_DET | \
24 SNBEP_PMON_CTL_INVERT | \
25 SNBEP_PMON_CTL_TRESH_MASK)
27 /* SNB-EP Ubox event control */
28 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
29 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
30 (SNBEP_PMON_CTL_EV_SEL_MASK | \
31 SNBEP_PMON_CTL_UMASK_MASK | \
32 SNBEP_PMON_CTL_EDGE_DET | \
33 SNBEP_PMON_CTL_INVERT | \
34 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
36 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
37 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
38 SNBEP_CBO_PMON_CTL_TID_EN)
40 /* SNB-EP PCU event control */
41 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
42 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
43 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
45 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
46 (SNBEP_PMON_CTL_EV_SEL_MASK | \
47 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
48 SNBEP_PMON_CTL_EDGE_DET | \
49 SNBEP_PMON_CTL_INVERT | \
50 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
51 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
54 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
55 (SNBEP_PMON_RAW_EVENT_MASK | \
56 SNBEP_PMON_CTL_EV_SEL_EXT)
58 /* SNB-EP pci control register */
59 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
60 #define SNBEP_PCI_PMON_CTL0 0xd8
61 /* SNB-EP pci counter register */
62 #define SNBEP_PCI_PMON_CTR0 0xa0
64 /* SNB-EP home agent register */
65 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
66 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
67 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
68 /* SNB-EP memory controller register */
69 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
70 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
71 /* SNB-EP QPI register */
72 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
73 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
77 /* SNB-EP Ubox register */
78 #define SNBEP_U_MSR_PMON_CTR0 0xc16
79 #define SNBEP_U_MSR_PMON_CTL0 0xc10
81 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
82 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
84 /* SNB-EP Cbo register */
85 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
86 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
87 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
88 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
89 #define SNBEP_CBO_MSR_OFFSET 0x20
91 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
92 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
96 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
98 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
103 /* SNB-EP PCU register */
104 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
105 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
106 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
107 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
108 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
109 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
110 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
112 /* IVBEP event control */
113 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
114 SNBEP_PMON_BOX_CTL_RST_CTRS)
115 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
116 SNBEP_PMON_CTL_UMASK_MASK | \
117 SNBEP_PMON_CTL_EDGE_DET | \
118 SNBEP_PMON_CTL_TRESH_MASK)
120 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
121 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
122 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
124 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
125 (SNBEP_PMON_CTL_EV_SEL_MASK | \
126 SNBEP_PMON_CTL_UMASK_MASK | \
127 SNBEP_PMON_CTL_EDGE_DET | \
128 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
130 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
131 SNBEP_CBO_PMON_CTL_TID_EN)
133 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
134 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
142 /* IVBEP home agent */
143 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
144 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
145 (IVBEP_PMON_RAW_EVENT_MASK | \
146 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
148 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
149 (SNBEP_PMON_CTL_EV_SEL_MASK | \
150 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
151 SNBEP_PMON_CTL_EDGE_DET | \
152 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
153 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
154 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
156 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
157 (IVBEP_PMON_RAW_EVENT_MASK | \
158 SNBEP_PMON_CTL_EV_SEL_EXT)
160 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
161 ((1ULL << (n)) - 1)))
163 /* Haswell-EP Ubox */
164 #define HSWEP_U_MSR_PMON_CTR0 0x709
165 #define HSWEP_U_MSR_PMON_CTL0 0x705
166 #define HSWEP_U_MSR_PMON_FILTER 0x707
168 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
169 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
171 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
172 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
173 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
174 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
175 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
178 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
179 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
180 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
181 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
182 #define HSWEP_CBO_MSR_OFFSET 0x10
185 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
186 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
187 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
195 /* Haswell-EP Sbox */
196 #define HSWEP_S0_MSR_PMON_CTR0 0x726
197 #define HSWEP_S0_MSR_PMON_CTL0 0x721
198 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
199 #define HSWEP_SBOX_MSR_OFFSET 0xa
200 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
201 SNBEP_CBO_PMON_CTL_TID_EN)
204 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
205 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
206 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
207 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
210 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
211 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
212 SNBEP_CBO_PMON_CTL_TID_EN)
214 #define KNL_CHA_MSR_OFFSET 0xc
215 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
216 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
217 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
218 KNL_CHA_MSR_PMON_CTL_QOR)
219 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
220 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
221 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
222 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
223 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
226 /* KNL EDC/MC UCLK */
227 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
228 #define KNL_UCLK_MSR_PMON_CTL0 0x420
229 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
230 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
231 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
232 #define KNL_PMON_FIXED_CTL_EN 0x1
235 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
236 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
237 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
238 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
239 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
242 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
243 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
244 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
245 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
246 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
249 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
250 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
251 KNL_CHA_MSR_PMON_CTL_QOR)
253 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
254 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
255 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
256 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
257 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
258 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
259 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
260 SNBEP_PMON_CTL_EDGE_DET | \
261 SNBEP_CBO_PMON_CTL_TID_EN | \
262 SNBEP_PMON_CTL_INVERT | \
263 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
265 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
267 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
268 DEFINE_UNCORE_FORMAT_ATTR(event2
, event
, "config:0-6");
269 DEFINE_UNCORE_FORMAT_ATTR(event_ext
, event
, "config:0-7,21");
270 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr
, use_occ_ctr
, "config:7");
271 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
272 DEFINE_UNCORE_FORMAT_ATTR(qor
, qor
, "config:16");
273 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
274 DEFINE_UNCORE_FORMAT_ATTR(tid_en
, tid_en
, "config:19");
275 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
276 DEFINE_UNCORE_FORMAT_ATTR(thresh8
, thresh
, "config:24-31");
277 DEFINE_UNCORE_FORMAT_ATTR(thresh6
, thresh
, "config:24-29");
278 DEFINE_UNCORE_FORMAT_ATTR(thresh5
, thresh
, "config:24-28");
279 DEFINE_UNCORE_FORMAT_ATTR(occ_sel
, occ_sel
, "config:14-15");
280 DEFINE_UNCORE_FORMAT_ATTR(occ_invert
, occ_invert
, "config:30");
281 DEFINE_UNCORE_FORMAT_ATTR(occ_edge
, occ_edge
, "config:14-51");
282 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det
, occ_edge_det
, "config:31");
283 DEFINE_UNCORE_FORMAT_ATTR(filter_tid
, filter_tid
, "config1:0-4");
284 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2
, filter_tid
, "config1:0");
285 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3
, filter_tid
, "config1:0-5");
286 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4
, filter_tid
, "config1:0-8");
287 DEFINE_UNCORE_FORMAT_ATTR(filter_cid
, filter_cid
, "config1:5");
288 DEFINE_UNCORE_FORMAT_ATTR(filter_link
, filter_link
, "config1:5-8");
289 DEFINE_UNCORE_FORMAT_ATTR(filter_link2
, filter_link
, "config1:6-8");
290 DEFINE_UNCORE_FORMAT_ATTR(filter_link3
, filter_link
, "config1:12");
291 DEFINE_UNCORE_FORMAT_ATTR(filter_nid
, filter_nid
, "config1:10-17");
292 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2
, filter_nid
, "config1:32-47");
293 DEFINE_UNCORE_FORMAT_ATTR(filter_state
, filter_state
, "config1:18-22");
294 DEFINE_UNCORE_FORMAT_ATTR(filter_state2
, filter_state
, "config1:17-22");
295 DEFINE_UNCORE_FORMAT_ATTR(filter_state3
, filter_state
, "config1:17-23");
296 DEFINE_UNCORE_FORMAT_ATTR(filter_state4
, filter_state
, "config1:18-20");
297 DEFINE_UNCORE_FORMAT_ATTR(filter_local
, filter_local
, "config1:33");
298 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op
, filter_all_op
, "config1:35");
299 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm
, filter_nnm
, "config1:37");
300 DEFINE_UNCORE_FORMAT_ATTR(filter_opc
, filter_opc
, "config1:23-31");
301 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2
, filter_opc
, "config1:52-60");
302 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3
, filter_opc
, "config1:41-60");
303 DEFINE_UNCORE_FORMAT_ATTR(filter_nc
, filter_nc
, "config1:62");
304 DEFINE_UNCORE_FORMAT_ATTR(filter_c6
, filter_c6
, "config1:61");
305 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc
, filter_isoc
, "config1:63");
306 DEFINE_UNCORE_FORMAT_ATTR(filter_band0
, filter_band0
, "config1:0-7");
307 DEFINE_UNCORE_FORMAT_ATTR(filter_band1
, filter_band1
, "config1:8-15");
308 DEFINE_UNCORE_FORMAT_ATTR(filter_band2
, filter_band2
, "config1:16-23");
309 DEFINE_UNCORE_FORMAT_ATTR(filter_band3
, filter_band3
, "config1:24-31");
310 DEFINE_UNCORE_FORMAT_ATTR(match_rds
, match_rds
, "config1:48-51");
311 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30
, match_rnid30
, "config1:32-35");
312 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4
, match_rnid4
, "config1:31");
313 DEFINE_UNCORE_FORMAT_ATTR(match_dnid
, match_dnid
, "config1:13-17");
314 DEFINE_UNCORE_FORMAT_ATTR(match_mc
, match_mc
, "config1:9-12");
315 DEFINE_UNCORE_FORMAT_ATTR(match_opc
, match_opc
, "config1:5-8");
316 DEFINE_UNCORE_FORMAT_ATTR(match_vnw
, match_vnw
, "config1:3-4");
317 DEFINE_UNCORE_FORMAT_ATTR(match0
, match0
, "config1:0-31");
318 DEFINE_UNCORE_FORMAT_ATTR(match1
, match1
, "config1:32-63");
319 DEFINE_UNCORE_FORMAT_ATTR(mask_rds
, mask_rds
, "config2:48-51");
320 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30
, mask_rnid30
, "config2:32-35");
321 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4
, mask_rnid4
, "config2:31");
322 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid
, mask_dnid
, "config2:13-17");
323 DEFINE_UNCORE_FORMAT_ATTR(mask_mc
, mask_mc
, "config2:9-12");
324 DEFINE_UNCORE_FORMAT_ATTR(mask_opc
, mask_opc
, "config2:5-8");
325 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw
, mask_vnw
, "config2:3-4");
326 DEFINE_UNCORE_FORMAT_ATTR(mask0
, mask0
, "config2:0-31");
327 DEFINE_UNCORE_FORMAT_ATTR(mask1
, mask1
, "config2:32-63");
329 static void snbep_uncore_pci_disable_box(struct intel_uncore_box
*box
)
331 struct pci_dev
*pdev
= box
->pci_dev
;
332 int box_ctl
= uncore_pci_box_ctl(box
);
335 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
336 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
337 pci_write_config_dword(pdev
, box_ctl
, config
);
341 static void snbep_uncore_pci_enable_box(struct intel_uncore_box
*box
)
343 struct pci_dev
*pdev
= box
->pci_dev
;
344 int box_ctl
= uncore_pci_box_ctl(box
);
347 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
348 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
349 pci_write_config_dword(pdev
, box_ctl
, config
);
353 static void snbep_uncore_pci_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
355 struct pci_dev
*pdev
= box
->pci_dev
;
356 struct hw_perf_event
*hwc
= &event
->hw
;
358 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
361 static void snbep_uncore_pci_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
363 struct pci_dev
*pdev
= box
->pci_dev
;
364 struct hw_perf_event
*hwc
= &event
->hw
;
366 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
);
369 static u64
snbep_uncore_pci_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
371 struct pci_dev
*pdev
= box
->pci_dev
;
372 struct hw_perf_event
*hwc
= &event
->hw
;
375 pci_read_config_dword(pdev
, hwc
->event_base
, (u32
*)&count
);
376 pci_read_config_dword(pdev
, hwc
->event_base
+ 4, (u32
*)&count
+ 1);
381 static void snbep_uncore_pci_init_box(struct intel_uncore_box
*box
)
383 struct pci_dev
*pdev
= box
->pci_dev
;
384 int box_ctl
= uncore_pci_box_ctl(box
);
386 pci_write_config_dword(pdev
, box_ctl
, SNBEP_PMON_BOX_CTL_INT
);
389 static void snbep_uncore_msr_disable_box(struct intel_uncore_box
*box
)
394 msr
= uncore_msr_box_ctl(box
);
397 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
402 static void snbep_uncore_msr_enable_box(struct intel_uncore_box
*box
)
407 msr
= uncore_msr_box_ctl(box
);
410 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
415 static void snbep_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
417 struct hw_perf_event
*hwc
= &event
->hw
;
418 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
420 if (reg1
->idx
!= EXTRA_REG_NONE
)
421 wrmsrl(reg1
->reg
, uncore_shared_reg_config(box
, 0));
423 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
426 static void snbep_uncore_msr_disable_event(struct intel_uncore_box
*box
,
427 struct perf_event
*event
)
429 struct hw_perf_event
*hwc
= &event
->hw
;
431 wrmsrl(hwc
->config_base
, hwc
->config
);
434 static void snbep_uncore_msr_init_box(struct intel_uncore_box
*box
)
436 unsigned msr
= uncore_msr_box_ctl(box
);
439 wrmsrl(msr
, SNBEP_PMON_BOX_CTL_INT
);
442 static struct attribute
*snbep_uncore_formats_attr
[] = {
443 &format_attr_event
.attr
,
444 &format_attr_umask
.attr
,
445 &format_attr_edge
.attr
,
446 &format_attr_inv
.attr
,
447 &format_attr_thresh8
.attr
,
451 static struct attribute
*snbep_uncore_ubox_formats_attr
[] = {
452 &format_attr_event
.attr
,
453 &format_attr_umask
.attr
,
454 &format_attr_edge
.attr
,
455 &format_attr_inv
.attr
,
456 &format_attr_thresh5
.attr
,
460 static struct attribute
*snbep_uncore_cbox_formats_attr
[] = {
461 &format_attr_event
.attr
,
462 &format_attr_umask
.attr
,
463 &format_attr_edge
.attr
,
464 &format_attr_tid_en
.attr
,
465 &format_attr_inv
.attr
,
466 &format_attr_thresh8
.attr
,
467 &format_attr_filter_tid
.attr
,
468 &format_attr_filter_nid
.attr
,
469 &format_attr_filter_state
.attr
,
470 &format_attr_filter_opc
.attr
,
474 static struct attribute
*snbep_uncore_pcu_formats_attr
[] = {
475 &format_attr_event
.attr
,
476 &format_attr_occ_sel
.attr
,
477 &format_attr_edge
.attr
,
478 &format_attr_inv
.attr
,
479 &format_attr_thresh5
.attr
,
480 &format_attr_occ_invert
.attr
,
481 &format_attr_occ_edge
.attr
,
482 &format_attr_filter_band0
.attr
,
483 &format_attr_filter_band1
.attr
,
484 &format_attr_filter_band2
.attr
,
485 &format_attr_filter_band3
.attr
,
489 static struct attribute
*snbep_uncore_qpi_formats_attr
[] = {
490 &format_attr_event_ext
.attr
,
491 &format_attr_umask
.attr
,
492 &format_attr_edge
.attr
,
493 &format_attr_inv
.attr
,
494 &format_attr_thresh8
.attr
,
495 &format_attr_match_rds
.attr
,
496 &format_attr_match_rnid30
.attr
,
497 &format_attr_match_rnid4
.attr
,
498 &format_attr_match_dnid
.attr
,
499 &format_attr_match_mc
.attr
,
500 &format_attr_match_opc
.attr
,
501 &format_attr_match_vnw
.attr
,
502 &format_attr_match0
.attr
,
503 &format_attr_match1
.attr
,
504 &format_attr_mask_rds
.attr
,
505 &format_attr_mask_rnid30
.attr
,
506 &format_attr_mask_rnid4
.attr
,
507 &format_attr_mask_dnid
.attr
,
508 &format_attr_mask_mc
.attr
,
509 &format_attr_mask_opc
.attr
,
510 &format_attr_mask_vnw
.attr
,
511 &format_attr_mask0
.attr
,
512 &format_attr_mask1
.attr
,
516 static struct uncore_event_desc snbep_uncore_imc_events
[] = {
517 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
518 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x03"),
519 INTEL_UNCORE_EVENT_DESC(cas_count_read
.scale
, "6.103515625e-5"),
520 INTEL_UNCORE_EVENT_DESC(cas_count_read
.unit
, "MiB"),
521 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x0c"),
522 INTEL_UNCORE_EVENT_DESC(cas_count_write
.scale
, "6.103515625e-5"),
523 INTEL_UNCORE_EVENT_DESC(cas_count_write
.unit
, "MiB"),
524 { /* end: all zeroes */ },
527 static struct uncore_event_desc snbep_uncore_qpi_events
[] = {
528 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x14"),
529 INTEL_UNCORE_EVENT_DESC(txl_flits_active
, "event=0x00,umask=0x06"),
530 INTEL_UNCORE_EVENT_DESC(drs_data
, "event=0x102,umask=0x08"),
531 INTEL_UNCORE_EVENT_DESC(ncb_data
, "event=0x103,umask=0x04"),
532 { /* end: all zeroes */ },
535 static struct attribute_group snbep_uncore_format_group
= {
537 .attrs
= snbep_uncore_formats_attr
,
540 static struct attribute_group snbep_uncore_ubox_format_group
= {
542 .attrs
= snbep_uncore_ubox_formats_attr
,
545 static struct attribute_group snbep_uncore_cbox_format_group
= {
547 .attrs
= snbep_uncore_cbox_formats_attr
,
550 static struct attribute_group snbep_uncore_pcu_format_group
= {
552 .attrs
= snbep_uncore_pcu_formats_attr
,
555 static struct attribute_group snbep_uncore_qpi_format_group
= {
557 .attrs
= snbep_uncore_qpi_formats_attr
,
560 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
561 .disable_box = snbep_uncore_msr_disable_box, \
562 .enable_box = snbep_uncore_msr_enable_box, \
563 .disable_event = snbep_uncore_msr_disable_event, \
564 .enable_event = snbep_uncore_msr_enable_event, \
565 .read_counter = uncore_msr_read_counter
567 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
568 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
569 .init_box = snbep_uncore_msr_init_box \
571 static struct intel_uncore_ops snbep_uncore_msr_ops = {
572 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
575 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
576 .init_box = snbep_uncore_pci_init_box, \
577 .disable_box = snbep_uncore_pci_disable_box, \
578 .enable_box = snbep_uncore_pci_enable_box, \
579 .disable_event = snbep_uncore_pci_disable_event, \
580 .read_counter = snbep_uncore_pci_read_counter
582 static struct intel_uncore_ops snbep_uncore_pci_ops
= {
583 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
584 .enable_event
= snbep_uncore_pci_enable_event
, \
587 static struct event_constraint snbep_uncore_cbox_constraints
[] = {
588 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
589 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
590 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
591 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
592 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
593 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
594 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
595 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
596 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
597 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
598 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
599 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
600 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
601 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
602 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
603 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
604 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
605 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
606 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
607 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
608 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
609 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
610 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
611 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
612 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
613 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
617 static struct event_constraint snbep_uncore_r2pcie_constraints
[] = {
618 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
619 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
620 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
621 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
622 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
623 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
624 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
625 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
626 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
627 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
631 static struct event_constraint snbep_uncore_r3qpi_constraints
[] = {
632 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
633 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
634 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
635 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
636 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
637 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
638 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
639 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
640 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
641 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
642 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
643 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
644 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
645 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
646 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
647 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
648 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
649 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
650 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
651 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
652 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
653 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
654 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
655 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
656 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
657 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
658 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
659 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
663 static struct intel_uncore_type snbep_uncore_ubox
= {
668 .fixed_ctr_bits
= 48,
669 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
670 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
671 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
672 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
673 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
674 .ops
= &snbep_uncore_msr_ops
,
675 .format_group
= &snbep_uncore_ubox_format_group
,
678 static struct extra_reg snbep_uncore_cbox_extra_regs
[] = {
679 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
680 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
681 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
682 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
683 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
684 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
685 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
686 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
687 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
688 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
689 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
690 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
691 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
692 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
693 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
694 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
695 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
696 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
697 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
698 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
699 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
700 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
701 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
702 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
703 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
707 static void snbep_cbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
709 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
710 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
713 if (uncore_box_is_fake(box
))
716 for (i
= 0; i
< 5; i
++) {
717 if (reg1
->alloc
& (0x1 << i
))
718 atomic_sub(1 << (i
* 6), &er
->ref
);
723 static struct event_constraint
*
724 __snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
,
725 u64 (*cbox_filter_mask
)(int fields
))
727 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
728 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
733 if (reg1
->idx
== EXTRA_REG_NONE
)
736 raw_spin_lock_irqsave(&er
->lock
, flags
);
737 for (i
= 0; i
< 5; i
++) {
738 if (!(reg1
->idx
& (0x1 << i
)))
740 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
743 mask
= cbox_filter_mask(0x1 << i
);
744 if (!__BITS_VALUE(atomic_read(&er
->ref
), i
, 6) ||
745 !((reg1
->config
^ er
->config
) & mask
)) {
746 atomic_add(1 << (i
* 6), &er
->ref
);
748 er
->config
|= reg1
->config
& mask
;
754 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
758 if (!uncore_box_is_fake(box
))
759 reg1
->alloc
|= alloc
;
763 for (; i
>= 0; i
--) {
764 if (alloc
& (0x1 << i
))
765 atomic_sub(1 << (i
* 6), &er
->ref
);
767 return &uncore_constraint_empty
;
770 static u64
snbep_cbox_filter_mask(int fields
)
775 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID
;
777 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID
;
779 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
781 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
786 static struct event_constraint
*
787 snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
789 return __snbep_cbox_get_constraint(box
, event
, snbep_cbox_filter_mask
);
792 static int snbep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
794 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
795 struct extra_reg
*er
;
798 for (er
= snbep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
799 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
805 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
806 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
807 reg1
->config
= event
->attr
.config1
& snbep_cbox_filter_mask(idx
);
813 static struct intel_uncore_ops snbep_uncore_cbox_ops
= {
814 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
815 .hw_config
= snbep_cbox_hw_config
,
816 .get_constraint
= snbep_cbox_get_constraint
,
817 .put_constraint
= snbep_cbox_put_constraint
,
820 static struct intel_uncore_type snbep_uncore_cbox
= {
825 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
826 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
827 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
828 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
829 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
830 .num_shared_regs
= 1,
831 .constraints
= snbep_uncore_cbox_constraints
,
832 .ops
= &snbep_uncore_cbox_ops
,
833 .format_group
= &snbep_uncore_cbox_format_group
,
836 static u64
snbep_pcu_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
838 struct hw_perf_event
*hwc
= &event
->hw
;
839 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
840 u64 config
= reg1
->config
;
842 if (new_idx
> reg1
->idx
)
843 config
<<= 8 * (new_idx
- reg1
->idx
);
845 config
>>= 8 * (reg1
->idx
- new_idx
);
848 hwc
->config
+= new_idx
- reg1
->idx
;
849 reg1
->config
= config
;
855 static struct event_constraint
*
856 snbep_pcu_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
858 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
859 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
862 u64 mask
, config1
= reg1
->config
;
865 if (reg1
->idx
== EXTRA_REG_NONE
||
866 (!uncore_box_is_fake(box
) && reg1
->alloc
))
869 mask
= 0xffULL
<< (idx
* 8);
870 raw_spin_lock_irqsave(&er
->lock
, flags
);
871 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8) ||
872 !((config1
^ er
->config
) & mask
)) {
873 atomic_add(1 << (idx
* 8), &er
->ref
);
875 er
->config
|= config1
& mask
;
878 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
882 if (idx
!= reg1
->idx
) {
883 config1
= snbep_pcu_alter_er(event
, idx
, false);
886 return &uncore_constraint_empty
;
889 if (!uncore_box_is_fake(box
)) {
890 if (idx
!= reg1
->idx
)
891 snbep_pcu_alter_er(event
, idx
, true);
897 static void snbep_pcu_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
899 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
900 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
902 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
905 atomic_sub(1 << (reg1
->idx
* 8), &er
->ref
);
909 static int snbep_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
911 struct hw_perf_event
*hwc
= &event
->hw
;
912 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
913 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
915 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
916 reg1
->reg
= SNBEP_PCU_MSR_PMON_BOX_FILTER
;
917 reg1
->idx
= ev_sel
- 0xb;
918 reg1
->config
= event
->attr
.config1
& (0xff << (reg1
->idx
* 8));
923 static struct intel_uncore_ops snbep_uncore_pcu_ops
= {
924 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
925 .hw_config
= snbep_pcu_hw_config
,
926 .get_constraint
= snbep_pcu_get_constraint
,
927 .put_constraint
= snbep_pcu_put_constraint
,
930 static struct intel_uncore_type snbep_uncore_pcu
= {
935 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
936 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
937 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
938 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
939 .num_shared_regs
= 1,
940 .ops
= &snbep_uncore_pcu_ops
,
941 .format_group
= &snbep_uncore_pcu_format_group
,
944 static struct intel_uncore_type
*snbep_msr_uncores
[] = {
951 void snbep_uncore_cpu_init(void)
953 if (snbep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
954 snbep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
955 uncore_msr_uncores
= snbep_msr_uncores
;
959 SNBEP_PCI_QPI_PORT0_FILTER
,
960 SNBEP_PCI_QPI_PORT1_FILTER
,
964 static int snbep_qpi_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
966 struct hw_perf_event
*hwc
= &event
->hw
;
967 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
968 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
970 if ((hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
) == 0x38) {
972 reg1
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MATCH0
;
973 reg1
->config
= event
->attr
.config1
;
974 reg2
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MASK0
;
975 reg2
->config
= event
->attr
.config2
;
980 static void snbep_qpi_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
982 struct pci_dev
*pdev
= box
->pci_dev
;
983 struct hw_perf_event
*hwc
= &event
->hw
;
984 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
985 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
987 if (reg1
->idx
!= EXTRA_REG_NONE
) {
988 int idx
= box
->pmu
->pmu_idx
+ SNBEP_PCI_QPI_PORT0_FILTER
;
989 int pkg
= topology_phys_to_logical_pkg(box
->pci_phys_id
);
990 struct pci_dev
*filter_pdev
= uncore_extra_pci_dev
[pkg
].dev
[idx
];
993 pci_write_config_dword(filter_pdev
, reg1
->reg
,
995 pci_write_config_dword(filter_pdev
, reg1
->reg
+ 4,
996 (u32
)(reg1
->config
>> 32));
997 pci_write_config_dword(filter_pdev
, reg2
->reg
,
999 pci_write_config_dword(filter_pdev
, reg2
->reg
+ 4,
1000 (u32
)(reg2
->config
>> 32));
1004 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
1007 static struct intel_uncore_ops snbep_uncore_qpi_ops
= {
1008 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1009 .enable_event
= snbep_qpi_enable_event
,
1010 .hw_config
= snbep_qpi_hw_config
,
1011 .get_constraint
= uncore_get_constraint
,
1012 .put_constraint
= uncore_put_constraint
,
1015 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1016 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1017 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1018 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1019 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1020 .ops = &snbep_uncore_pci_ops, \
1021 .format_group = &snbep_uncore_format_group
1023 static struct intel_uncore_type snbep_uncore_ha
= {
1027 .perf_ctr_bits
= 48,
1028 SNBEP_UNCORE_PCI_COMMON_INIT(),
1031 static struct intel_uncore_type snbep_uncore_imc
= {
1035 .perf_ctr_bits
= 48,
1036 .fixed_ctr_bits
= 48,
1037 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
1038 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
1039 .event_descs
= snbep_uncore_imc_events
,
1040 SNBEP_UNCORE_PCI_COMMON_INIT(),
1043 static struct intel_uncore_type snbep_uncore_qpi
= {
1047 .perf_ctr_bits
= 48,
1048 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
1049 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
1050 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
1051 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1052 .num_shared_regs
= 1,
1053 .ops
= &snbep_uncore_qpi_ops
,
1054 .event_descs
= snbep_uncore_qpi_events
,
1055 .format_group
= &snbep_uncore_qpi_format_group
,
1059 static struct intel_uncore_type snbep_uncore_r2pcie
= {
1063 .perf_ctr_bits
= 44,
1064 .constraints
= snbep_uncore_r2pcie_constraints
,
1065 SNBEP_UNCORE_PCI_COMMON_INIT(),
1068 static struct intel_uncore_type snbep_uncore_r3qpi
= {
1072 .perf_ctr_bits
= 44,
1073 .constraints
= snbep_uncore_r3qpi_constraints
,
1074 SNBEP_UNCORE_PCI_COMMON_INIT(),
1078 SNBEP_PCI_UNCORE_HA
,
1079 SNBEP_PCI_UNCORE_IMC
,
1080 SNBEP_PCI_UNCORE_QPI
,
1081 SNBEP_PCI_UNCORE_R2PCIE
,
1082 SNBEP_PCI_UNCORE_R3QPI
,
1085 static struct intel_uncore_type
*snbep_pci_uncores
[] = {
1086 [SNBEP_PCI_UNCORE_HA
] = &snbep_uncore_ha
,
1087 [SNBEP_PCI_UNCORE_IMC
] = &snbep_uncore_imc
,
1088 [SNBEP_PCI_UNCORE_QPI
] = &snbep_uncore_qpi
,
1089 [SNBEP_PCI_UNCORE_R2PCIE
] = &snbep_uncore_r2pcie
,
1090 [SNBEP_PCI_UNCORE_R3QPI
] = &snbep_uncore_r3qpi
,
1094 static const struct pci_device_id snbep_uncore_pci_ids
[] = {
1096 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_HA
),
1097 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA
, 0),
1099 { /* MC Channel 0 */
1100 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC0
),
1101 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 0),
1103 { /* MC Channel 1 */
1104 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC1
),
1105 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 1),
1107 { /* MC Channel 2 */
1108 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC2
),
1109 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 2),
1111 { /* MC Channel 3 */
1112 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC3
),
1113 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 3),
1116 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI0
),
1117 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 0),
1120 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI1
),
1121 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 1),
1124 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R2PCIE
),
1125 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE
, 0),
1127 { /* R3QPI Link 0 */
1128 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI0
),
1129 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 0),
1131 { /* R3QPI Link 1 */
1132 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI1
),
1133 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 1),
1135 { /* QPI Port 0 filter */
1136 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c86),
1137 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1138 SNBEP_PCI_QPI_PORT0_FILTER
),
1140 { /* QPI Port 0 filter */
1141 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c96),
1142 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1143 SNBEP_PCI_QPI_PORT1_FILTER
),
1145 { /* end: all zeroes */ }
1148 static struct pci_driver snbep_uncore_pci_driver
= {
1149 .name
= "snbep_uncore",
1150 .id_table
= snbep_uncore_pci_ids
,
1154 * build pci bus to socket mapping
1156 static int snbep_pci2phy_map_init(int devid
)
1158 struct pci_dev
*ubox_dev
= NULL
;
1159 int i
, bus
, nodeid
, segment
;
1160 struct pci2phy_map
*map
;
1165 /* find the UBOX device */
1166 ubox_dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, ubox_dev
);
1169 bus
= ubox_dev
->bus
->number
;
1170 /* get the Node ID of the local register */
1171 err
= pci_read_config_dword(ubox_dev
, 0x40, &config
);
1175 /* get the Node ID mapping */
1176 err
= pci_read_config_dword(ubox_dev
, 0x54, &config
);
1180 segment
= pci_domain_nr(ubox_dev
->bus
);
1181 raw_spin_lock(&pci2phy_map_lock
);
1182 map
= __find_pci2phy_map(segment
);
1184 raw_spin_unlock(&pci2phy_map_lock
);
1190 * every three bits in the Node ID mapping register maps
1191 * to a particular node.
1193 for (i
= 0; i
< 8; i
++) {
1194 if (nodeid
== ((config
>> (3 * i
)) & 0x7)) {
1195 map
->pbus_to_physid
[bus
] = i
;
1199 raw_spin_unlock(&pci2phy_map_lock
);
1204 * For PCI bus with no UBOX device, find the next bus
1205 * that has UBOX device and use its mapping.
1207 raw_spin_lock(&pci2phy_map_lock
);
1208 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
1210 for (bus
= 255; bus
>= 0; bus
--) {
1211 if (map
->pbus_to_physid
[bus
] >= 0)
1212 i
= map
->pbus_to_physid
[bus
];
1214 map
->pbus_to_physid
[bus
] = i
;
1217 raw_spin_unlock(&pci2phy_map_lock
);
1220 pci_dev_put(ubox_dev
);
1222 return err
? pcibios_err_to_errno(err
) : 0;
1225 int snbep_uncore_pci_init(void)
1227 int ret
= snbep_pci2phy_map_init(0x3ce0);
1230 uncore_pci_uncores
= snbep_pci_uncores
;
1231 uncore_pci_driver
= &snbep_uncore_pci_driver
;
1234 /* end of Sandy Bridge-EP uncore support */
1236 /* IvyTown uncore support */
1237 static void ivbep_uncore_msr_init_box(struct intel_uncore_box
*box
)
1239 unsigned msr
= uncore_msr_box_ctl(box
);
1241 wrmsrl(msr
, IVBEP_PMON_BOX_CTL_INT
);
1244 static void ivbep_uncore_pci_init_box(struct intel_uncore_box
*box
)
1246 struct pci_dev
*pdev
= box
->pci_dev
;
1248 pci_write_config_dword(pdev
, SNBEP_PCI_PMON_BOX_CTL
, IVBEP_PMON_BOX_CTL_INT
);
1251 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1252 .init_box = ivbep_uncore_msr_init_box, \
1253 .disable_box = snbep_uncore_msr_disable_box, \
1254 .enable_box = snbep_uncore_msr_enable_box, \
1255 .disable_event = snbep_uncore_msr_disable_event, \
1256 .enable_event = snbep_uncore_msr_enable_event, \
1257 .read_counter = uncore_msr_read_counter
1259 static struct intel_uncore_ops ivbep_uncore_msr_ops
= {
1260 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1263 static struct intel_uncore_ops ivbep_uncore_pci_ops
= {
1264 .init_box
= ivbep_uncore_pci_init_box
,
1265 .disable_box
= snbep_uncore_pci_disable_box
,
1266 .enable_box
= snbep_uncore_pci_enable_box
,
1267 .disable_event
= snbep_uncore_pci_disable_event
,
1268 .enable_event
= snbep_uncore_pci_enable_event
,
1269 .read_counter
= snbep_uncore_pci_read_counter
,
1272 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1273 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1274 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1275 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1276 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1277 .ops = &ivbep_uncore_pci_ops, \
1278 .format_group = &ivbep_uncore_format_group
1280 static struct attribute
*ivbep_uncore_formats_attr
[] = {
1281 &format_attr_event
.attr
,
1282 &format_attr_umask
.attr
,
1283 &format_attr_edge
.attr
,
1284 &format_attr_inv
.attr
,
1285 &format_attr_thresh8
.attr
,
1289 static struct attribute
*ivbep_uncore_ubox_formats_attr
[] = {
1290 &format_attr_event
.attr
,
1291 &format_attr_umask
.attr
,
1292 &format_attr_edge
.attr
,
1293 &format_attr_inv
.attr
,
1294 &format_attr_thresh5
.attr
,
1298 static struct attribute
*ivbep_uncore_cbox_formats_attr
[] = {
1299 &format_attr_event
.attr
,
1300 &format_attr_umask
.attr
,
1301 &format_attr_edge
.attr
,
1302 &format_attr_tid_en
.attr
,
1303 &format_attr_thresh8
.attr
,
1304 &format_attr_filter_tid
.attr
,
1305 &format_attr_filter_link
.attr
,
1306 &format_attr_filter_state2
.attr
,
1307 &format_attr_filter_nid2
.attr
,
1308 &format_attr_filter_opc2
.attr
,
1309 &format_attr_filter_nc
.attr
,
1310 &format_attr_filter_c6
.attr
,
1311 &format_attr_filter_isoc
.attr
,
1315 static struct attribute
*ivbep_uncore_pcu_formats_attr
[] = {
1316 &format_attr_event
.attr
,
1317 &format_attr_occ_sel
.attr
,
1318 &format_attr_edge
.attr
,
1319 &format_attr_thresh5
.attr
,
1320 &format_attr_occ_invert
.attr
,
1321 &format_attr_occ_edge
.attr
,
1322 &format_attr_filter_band0
.attr
,
1323 &format_attr_filter_band1
.attr
,
1324 &format_attr_filter_band2
.attr
,
1325 &format_attr_filter_band3
.attr
,
1329 static struct attribute
*ivbep_uncore_qpi_formats_attr
[] = {
1330 &format_attr_event_ext
.attr
,
1331 &format_attr_umask
.attr
,
1332 &format_attr_edge
.attr
,
1333 &format_attr_thresh8
.attr
,
1334 &format_attr_match_rds
.attr
,
1335 &format_attr_match_rnid30
.attr
,
1336 &format_attr_match_rnid4
.attr
,
1337 &format_attr_match_dnid
.attr
,
1338 &format_attr_match_mc
.attr
,
1339 &format_attr_match_opc
.attr
,
1340 &format_attr_match_vnw
.attr
,
1341 &format_attr_match0
.attr
,
1342 &format_attr_match1
.attr
,
1343 &format_attr_mask_rds
.attr
,
1344 &format_attr_mask_rnid30
.attr
,
1345 &format_attr_mask_rnid4
.attr
,
1346 &format_attr_mask_dnid
.attr
,
1347 &format_attr_mask_mc
.attr
,
1348 &format_attr_mask_opc
.attr
,
1349 &format_attr_mask_vnw
.attr
,
1350 &format_attr_mask0
.attr
,
1351 &format_attr_mask1
.attr
,
1355 static struct attribute_group ivbep_uncore_format_group
= {
1357 .attrs
= ivbep_uncore_formats_attr
,
1360 static struct attribute_group ivbep_uncore_ubox_format_group
= {
1362 .attrs
= ivbep_uncore_ubox_formats_attr
,
1365 static struct attribute_group ivbep_uncore_cbox_format_group
= {
1367 .attrs
= ivbep_uncore_cbox_formats_attr
,
1370 static struct attribute_group ivbep_uncore_pcu_format_group
= {
1372 .attrs
= ivbep_uncore_pcu_formats_attr
,
1375 static struct attribute_group ivbep_uncore_qpi_format_group
= {
1377 .attrs
= ivbep_uncore_qpi_formats_attr
,
1380 static struct intel_uncore_type ivbep_uncore_ubox
= {
1384 .perf_ctr_bits
= 44,
1385 .fixed_ctr_bits
= 48,
1386 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
1387 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
1388 .event_mask
= IVBEP_U_MSR_PMON_RAW_EVENT_MASK
,
1389 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
1390 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
1391 .ops
= &ivbep_uncore_msr_ops
,
1392 .format_group
= &ivbep_uncore_ubox_format_group
,
1395 static struct extra_reg ivbep_uncore_cbox_extra_regs
[] = {
1396 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
1397 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
1398 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1399 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1400 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1401 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1402 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1403 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1404 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1405 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1406 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1407 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1408 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1409 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1410 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1411 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1412 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1413 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1414 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1415 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1416 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1417 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1418 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1419 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1420 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1421 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1422 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1423 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1424 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1425 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1426 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1427 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1428 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1429 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1430 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1431 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1432 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1436 static u64
ivbep_cbox_filter_mask(int fields
)
1441 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID
;
1443 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK
;
1445 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
1447 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID
;
1448 if (fields
& 0x10) {
1449 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
1450 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC
;
1451 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6
;
1452 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC
;
1458 static struct event_constraint
*
1459 ivbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1461 return __snbep_cbox_get_constraint(box
, event
, ivbep_cbox_filter_mask
);
1464 static int ivbep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1466 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1467 struct extra_reg
*er
;
1470 for (er
= ivbep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
1471 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
1477 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
1478 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
1479 reg1
->config
= event
->attr
.config1
& ivbep_cbox_filter_mask(idx
);
1485 static void ivbep_cbox_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1487 struct hw_perf_event
*hwc
= &event
->hw
;
1488 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1490 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1491 u64 filter
= uncore_shared_reg_config(box
, 0);
1492 wrmsrl(reg1
->reg
, filter
& 0xffffffff);
1493 wrmsrl(reg1
->reg
+ 6, filter
>> 32);
1496 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
1499 static struct intel_uncore_ops ivbep_uncore_cbox_ops
= {
1500 .init_box
= ivbep_uncore_msr_init_box
,
1501 .disable_box
= snbep_uncore_msr_disable_box
,
1502 .enable_box
= snbep_uncore_msr_enable_box
,
1503 .disable_event
= snbep_uncore_msr_disable_event
,
1504 .enable_event
= ivbep_cbox_enable_event
,
1505 .read_counter
= uncore_msr_read_counter
,
1506 .hw_config
= ivbep_cbox_hw_config
,
1507 .get_constraint
= ivbep_cbox_get_constraint
,
1508 .put_constraint
= snbep_cbox_put_constraint
,
1511 static struct intel_uncore_type ivbep_uncore_cbox
= {
1515 .perf_ctr_bits
= 44,
1516 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
1517 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
1518 .event_mask
= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
1519 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
1520 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
1521 .num_shared_regs
= 1,
1522 .constraints
= snbep_uncore_cbox_constraints
,
1523 .ops
= &ivbep_uncore_cbox_ops
,
1524 .format_group
= &ivbep_uncore_cbox_format_group
,
1527 static struct intel_uncore_ops ivbep_uncore_pcu_ops
= {
1528 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1529 .hw_config
= snbep_pcu_hw_config
,
1530 .get_constraint
= snbep_pcu_get_constraint
,
1531 .put_constraint
= snbep_pcu_put_constraint
,
1534 static struct intel_uncore_type ivbep_uncore_pcu
= {
1538 .perf_ctr_bits
= 48,
1539 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
1540 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
1541 .event_mask
= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
1542 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
1543 .num_shared_regs
= 1,
1544 .ops
= &ivbep_uncore_pcu_ops
,
1545 .format_group
= &ivbep_uncore_pcu_format_group
,
1548 static struct intel_uncore_type
*ivbep_msr_uncores
[] = {
1555 void ivbep_uncore_cpu_init(void)
1557 if (ivbep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
1558 ivbep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
1559 uncore_msr_uncores
= ivbep_msr_uncores
;
1562 static struct intel_uncore_type ivbep_uncore_ha
= {
1566 .perf_ctr_bits
= 48,
1567 IVBEP_UNCORE_PCI_COMMON_INIT(),
1570 static struct intel_uncore_type ivbep_uncore_imc
= {
1574 .perf_ctr_bits
= 48,
1575 .fixed_ctr_bits
= 48,
1576 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
1577 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
1578 .event_descs
= snbep_uncore_imc_events
,
1579 IVBEP_UNCORE_PCI_COMMON_INIT(),
1582 /* registers in IRP boxes are not properly aligned */
1583 static unsigned ivbep_uncore_irp_ctls
[] = {0xd8, 0xdc, 0xe0, 0xe4};
1584 static unsigned ivbep_uncore_irp_ctrs
[] = {0xa0, 0xb0, 0xb8, 0xc0};
1586 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1588 struct pci_dev
*pdev
= box
->pci_dev
;
1589 struct hw_perf_event
*hwc
= &event
->hw
;
1591 pci_write_config_dword(pdev
, ivbep_uncore_irp_ctls
[hwc
->idx
],
1592 hwc
->config
| SNBEP_PMON_CTL_EN
);
1595 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1597 struct pci_dev
*pdev
= box
->pci_dev
;
1598 struct hw_perf_event
*hwc
= &event
->hw
;
1600 pci_write_config_dword(pdev
, ivbep_uncore_irp_ctls
[hwc
->idx
], hwc
->config
);
1603 static u64
ivbep_uncore_irp_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
1605 struct pci_dev
*pdev
= box
->pci_dev
;
1606 struct hw_perf_event
*hwc
= &event
->hw
;
1609 pci_read_config_dword(pdev
, ivbep_uncore_irp_ctrs
[hwc
->idx
], (u32
*)&count
);
1610 pci_read_config_dword(pdev
, ivbep_uncore_irp_ctrs
[hwc
->idx
] + 4, (u32
*)&count
+ 1);
1615 static struct intel_uncore_ops ivbep_uncore_irp_ops
= {
1616 .init_box
= ivbep_uncore_pci_init_box
,
1617 .disable_box
= snbep_uncore_pci_disable_box
,
1618 .enable_box
= snbep_uncore_pci_enable_box
,
1619 .disable_event
= ivbep_uncore_irp_disable_event
,
1620 .enable_event
= ivbep_uncore_irp_enable_event
,
1621 .read_counter
= ivbep_uncore_irp_read_counter
,
1624 static struct intel_uncore_type ivbep_uncore_irp
= {
1628 .perf_ctr_bits
= 48,
1629 .event_mask
= IVBEP_PMON_RAW_EVENT_MASK
,
1630 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1631 .ops
= &ivbep_uncore_irp_ops
,
1632 .format_group
= &ivbep_uncore_format_group
,
1635 static struct intel_uncore_ops ivbep_uncore_qpi_ops
= {
1636 .init_box
= ivbep_uncore_pci_init_box
,
1637 .disable_box
= snbep_uncore_pci_disable_box
,
1638 .enable_box
= snbep_uncore_pci_enable_box
,
1639 .disable_event
= snbep_uncore_pci_disable_event
,
1640 .enable_event
= snbep_qpi_enable_event
,
1641 .read_counter
= snbep_uncore_pci_read_counter
,
1642 .hw_config
= snbep_qpi_hw_config
,
1643 .get_constraint
= uncore_get_constraint
,
1644 .put_constraint
= uncore_put_constraint
,
1647 static struct intel_uncore_type ivbep_uncore_qpi
= {
1651 .perf_ctr_bits
= 48,
1652 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
1653 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
1654 .event_mask
= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
1655 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1656 .num_shared_regs
= 1,
1657 .ops
= &ivbep_uncore_qpi_ops
,
1658 .format_group
= &ivbep_uncore_qpi_format_group
,
1661 static struct intel_uncore_type ivbep_uncore_r2pcie
= {
1665 .perf_ctr_bits
= 44,
1666 .constraints
= snbep_uncore_r2pcie_constraints
,
1667 IVBEP_UNCORE_PCI_COMMON_INIT(),
1670 static struct intel_uncore_type ivbep_uncore_r3qpi
= {
1674 .perf_ctr_bits
= 44,
1675 .constraints
= snbep_uncore_r3qpi_constraints
,
1676 IVBEP_UNCORE_PCI_COMMON_INIT(),
1680 IVBEP_PCI_UNCORE_HA
,
1681 IVBEP_PCI_UNCORE_IMC
,
1682 IVBEP_PCI_UNCORE_IRP
,
1683 IVBEP_PCI_UNCORE_QPI
,
1684 IVBEP_PCI_UNCORE_R2PCIE
,
1685 IVBEP_PCI_UNCORE_R3QPI
,
1688 static struct intel_uncore_type
*ivbep_pci_uncores
[] = {
1689 [IVBEP_PCI_UNCORE_HA
] = &ivbep_uncore_ha
,
1690 [IVBEP_PCI_UNCORE_IMC
] = &ivbep_uncore_imc
,
1691 [IVBEP_PCI_UNCORE_IRP
] = &ivbep_uncore_irp
,
1692 [IVBEP_PCI_UNCORE_QPI
] = &ivbep_uncore_qpi
,
1693 [IVBEP_PCI_UNCORE_R2PCIE
] = &ivbep_uncore_r2pcie
,
1694 [IVBEP_PCI_UNCORE_R3QPI
] = &ivbep_uncore_r3qpi
,
1698 static const struct pci_device_id ivbep_uncore_pci_ids
[] = {
1699 { /* Home Agent 0 */
1700 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe30),
1701 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA
, 0),
1703 { /* Home Agent 1 */
1704 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe38),
1705 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA
, 1),
1707 { /* MC0 Channel 0 */
1708 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb4),
1709 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 0),
1711 { /* MC0 Channel 1 */
1712 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb5),
1713 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 1),
1715 { /* MC0 Channel 3 */
1716 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb0),
1717 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 2),
1719 { /* MC0 Channel 4 */
1720 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb1),
1721 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 3),
1723 { /* MC1 Channel 0 */
1724 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef4),
1725 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 4),
1727 { /* MC1 Channel 1 */
1728 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef5),
1729 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 5),
1731 { /* MC1 Channel 3 */
1732 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef0),
1733 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 6),
1735 { /* MC1 Channel 4 */
1736 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef1),
1737 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 7),
1740 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe39),
1741 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP
, 0),
1744 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe32),
1745 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 0),
1748 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe33),
1749 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 1),
1752 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3a),
1753 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 2),
1756 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe34),
1757 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE
, 0),
1759 { /* R3QPI0 Link 0 */
1760 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe36),
1761 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 0),
1763 { /* R3QPI0 Link 1 */
1764 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe37),
1765 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 1),
1767 { /* R3QPI1 Link 2 */
1768 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3e),
1769 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 2),
1771 { /* QPI Port 0 filter */
1772 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe86),
1773 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1774 SNBEP_PCI_QPI_PORT0_FILTER
),
1776 { /* QPI Port 0 filter */
1777 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe96),
1778 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1779 SNBEP_PCI_QPI_PORT1_FILTER
),
1781 { /* end: all zeroes */ }
1784 static struct pci_driver ivbep_uncore_pci_driver
= {
1785 .name
= "ivbep_uncore",
1786 .id_table
= ivbep_uncore_pci_ids
,
1789 int ivbep_uncore_pci_init(void)
1791 int ret
= snbep_pci2phy_map_init(0x0e1e);
1794 uncore_pci_uncores
= ivbep_pci_uncores
;
1795 uncore_pci_driver
= &ivbep_uncore_pci_driver
;
1798 /* end of IvyTown uncore support */
1800 /* KNL uncore support */
1801 static struct attribute
*knl_uncore_ubox_formats_attr
[] = {
1802 &format_attr_event
.attr
,
1803 &format_attr_umask
.attr
,
1804 &format_attr_edge
.attr
,
1805 &format_attr_tid_en
.attr
,
1806 &format_attr_inv
.attr
,
1807 &format_attr_thresh5
.attr
,
1811 static struct attribute_group knl_uncore_ubox_format_group
= {
1813 .attrs
= knl_uncore_ubox_formats_attr
,
1816 static struct intel_uncore_type knl_uncore_ubox
= {
1820 .perf_ctr_bits
= 48,
1821 .fixed_ctr_bits
= 48,
1822 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
1823 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
1824 .event_mask
= KNL_U_MSR_PMON_RAW_EVENT_MASK
,
1825 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
1826 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
1827 .ops
= &snbep_uncore_msr_ops
,
1828 .format_group
= &knl_uncore_ubox_format_group
,
1831 static struct attribute
*knl_uncore_cha_formats_attr
[] = {
1832 &format_attr_event
.attr
,
1833 &format_attr_umask
.attr
,
1834 &format_attr_qor
.attr
,
1835 &format_attr_edge
.attr
,
1836 &format_attr_tid_en
.attr
,
1837 &format_attr_inv
.attr
,
1838 &format_attr_thresh8
.attr
,
1839 &format_attr_filter_tid4
.attr
,
1840 &format_attr_filter_link3
.attr
,
1841 &format_attr_filter_state4
.attr
,
1842 &format_attr_filter_local
.attr
,
1843 &format_attr_filter_all_op
.attr
,
1844 &format_attr_filter_nnm
.attr
,
1845 &format_attr_filter_opc3
.attr
,
1846 &format_attr_filter_nc
.attr
,
1847 &format_attr_filter_isoc
.attr
,
1851 static struct attribute_group knl_uncore_cha_format_group
= {
1853 .attrs
= knl_uncore_cha_formats_attr
,
1856 static struct event_constraint knl_uncore_cha_constraints
[] = {
1857 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1858 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1859 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1860 EVENT_CONSTRAINT_END
1863 static struct extra_reg knl_uncore_cha_extra_regs
[] = {
1864 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
1865 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
1866 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1867 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1868 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1872 static u64
knl_cha_filter_mask(int fields
)
1877 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_TID
;
1879 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_STATE
;
1881 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_OP
;
1885 static struct event_constraint
*
1886 knl_cha_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1888 return __snbep_cbox_get_constraint(box
, event
, knl_cha_filter_mask
);
1891 static int knl_cha_hw_config(struct intel_uncore_box
*box
,
1892 struct perf_event
*event
)
1894 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1895 struct extra_reg
*er
;
1898 for (er
= knl_uncore_cha_extra_regs
; er
->msr
; er
++) {
1899 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
1905 reg1
->reg
= HSWEP_C0_MSR_PMON_BOX_FILTER0
+
1906 KNL_CHA_MSR_OFFSET
* box
->pmu
->pmu_idx
;
1907 reg1
->config
= event
->attr
.config1
& knl_cha_filter_mask(idx
);
1909 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE
;
1910 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE
;
1911 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_NNC
;
1917 static void hswep_cbox_enable_event(struct intel_uncore_box
*box
,
1918 struct perf_event
*event
);
1920 static struct intel_uncore_ops knl_uncore_cha_ops
= {
1921 .init_box
= snbep_uncore_msr_init_box
,
1922 .disable_box
= snbep_uncore_msr_disable_box
,
1923 .enable_box
= snbep_uncore_msr_enable_box
,
1924 .disable_event
= snbep_uncore_msr_disable_event
,
1925 .enable_event
= hswep_cbox_enable_event
,
1926 .read_counter
= uncore_msr_read_counter
,
1927 .hw_config
= knl_cha_hw_config
,
1928 .get_constraint
= knl_cha_get_constraint
,
1929 .put_constraint
= snbep_cbox_put_constraint
,
1932 static struct intel_uncore_type knl_uncore_cha
= {
1936 .perf_ctr_bits
= 48,
1937 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
1938 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
1939 .event_mask
= KNL_CHA_MSR_PMON_RAW_EVENT_MASK
,
1940 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
1941 .msr_offset
= KNL_CHA_MSR_OFFSET
,
1942 .num_shared_regs
= 1,
1943 .constraints
= knl_uncore_cha_constraints
,
1944 .ops
= &knl_uncore_cha_ops
,
1945 .format_group
= &knl_uncore_cha_format_group
,
1948 static struct attribute
*knl_uncore_pcu_formats_attr
[] = {
1949 &format_attr_event2
.attr
,
1950 &format_attr_use_occ_ctr
.attr
,
1951 &format_attr_occ_sel
.attr
,
1952 &format_attr_edge
.attr
,
1953 &format_attr_tid_en
.attr
,
1954 &format_attr_inv
.attr
,
1955 &format_attr_thresh6
.attr
,
1956 &format_attr_occ_invert
.attr
,
1957 &format_attr_occ_edge_det
.attr
,
1961 static struct attribute_group knl_uncore_pcu_format_group
= {
1963 .attrs
= knl_uncore_pcu_formats_attr
,
1966 static struct intel_uncore_type knl_uncore_pcu
= {
1970 .perf_ctr_bits
= 48,
1971 .perf_ctr
= HSWEP_PCU_MSR_PMON_CTR0
,
1972 .event_ctl
= HSWEP_PCU_MSR_PMON_CTL0
,
1973 .event_mask
= KNL_PCU_MSR_PMON_RAW_EVENT_MASK
,
1974 .box_ctl
= HSWEP_PCU_MSR_PMON_BOX_CTL
,
1975 .ops
= &snbep_uncore_msr_ops
,
1976 .format_group
= &knl_uncore_pcu_format_group
,
1979 static struct intel_uncore_type
*knl_msr_uncores
[] = {
1986 void knl_uncore_cpu_init(void)
1988 uncore_msr_uncores
= knl_msr_uncores
;
1991 static void knl_uncore_imc_enable_box(struct intel_uncore_box
*box
)
1993 struct pci_dev
*pdev
= box
->pci_dev
;
1994 int box_ctl
= uncore_pci_box_ctl(box
);
1996 pci_write_config_dword(pdev
, box_ctl
, 0);
1999 static void knl_uncore_imc_enable_event(struct intel_uncore_box
*box
,
2000 struct perf_event
*event
)
2002 struct pci_dev
*pdev
= box
->pci_dev
;
2003 struct hw_perf_event
*hwc
= &event
->hw
;
2005 if ((event
->attr
.config
& SNBEP_PMON_CTL_EV_SEL_MASK
)
2006 == UNCORE_FIXED_EVENT
)
2007 pci_write_config_dword(pdev
, hwc
->config_base
,
2008 hwc
->config
| KNL_PMON_FIXED_CTL_EN
);
2010 pci_write_config_dword(pdev
, hwc
->config_base
,
2011 hwc
->config
| SNBEP_PMON_CTL_EN
);
2014 static struct intel_uncore_ops knl_uncore_imc_ops
= {
2015 .init_box
= snbep_uncore_pci_init_box
,
2016 .disable_box
= snbep_uncore_pci_disable_box
,
2017 .enable_box
= knl_uncore_imc_enable_box
,
2018 .read_counter
= snbep_uncore_pci_read_counter
,
2019 .enable_event
= knl_uncore_imc_enable_event
,
2020 .disable_event
= snbep_uncore_pci_disable_event
,
2023 static struct intel_uncore_type knl_uncore_imc_uclk
= {
2027 .perf_ctr_bits
= 48,
2028 .fixed_ctr_bits
= 48,
2029 .perf_ctr
= KNL_UCLK_MSR_PMON_CTR0_LOW
,
2030 .event_ctl
= KNL_UCLK_MSR_PMON_CTL0
,
2031 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2032 .fixed_ctr
= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW
,
2033 .fixed_ctl
= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL
,
2034 .box_ctl
= KNL_UCLK_MSR_PMON_BOX_CTL
,
2035 .ops
= &knl_uncore_imc_ops
,
2036 .format_group
= &snbep_uncore_format_group
,
2039 static struct intel_uncore_type knl_uncore_imc_dclk
= {
2043 .perf_ctr_bits
= 48,
2044 .fixed_ctr_bits
= 48,
2045 .perf_ctr
= KNL_MC0_CH0_MSR_PMON_CTR0_LOW
,
2046 .event_ctl
= KNL_MC0_CH0_MSR_PMON_CTL0
,
2047 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2048 .fixed_ctr
= KNL_MC0_CH0_MSR_PMON_FIXED_LOW
,
2049 .fixed_ctl
= KNL_MC0_CH0_MSR_PMON_FIXED_CTL
,
2050 .box_ctl
= KNL_MC0_CH0_MSR_PMON_BOX_CTL
,
2051 .ops
= &knl_uncore_imc_ops
,
2052 .format_group
= &snbep_uncore_format_group
,
2055 static struct intel_uncore_type knl_uncore_edc_uclk
= {
2059 .perf_ctr_bits
= 48,
2060 .fixed_ctr_bits
= 48,
2061 .perf_ctr
= KNL_UCLK_MSR_PMON_CTR0_LOW
,
2062 .event_ctl
= KNL_UCLK_MSR_PMON_CTL0
,
2063 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2064 .fixed_ctr
= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW
,
2065 .fixed_ctl
= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL
,
2066 .box_ctl
= KNL_UCLK_MSR_PMON_BOX_CTL
,
2067 .ops
= &knl_uncore_imc_ops
,
2068 .format_group
= &snbep_uncore_format_group
,
2071 static struct intel_uncore_type knl_uncore_edc_eclk
= {
2075 .perf_ctr_bits
= 48,
2076 .fixed_ctr_bits
= 48,
2077 .perf_ctr
= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW
,
2078 .event_ctl
= KNL_EDC0_ECLK_MSR_PMON_CTL0
,
2079 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2080 .fixed_ctr
= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW
,
2081 .fixed_ctl
= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL
,
2082 .box_ctl
= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL
,
2083 .ops
= &knl_uncore_imc_ops
,
2084 .format_group
= &snbep_uncore_format_group
,
2087 static struct event_constraint knl_uncore_m2pcie_constraints
[] = {
2088 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2089 EVENT_CONSTRAINT_END
2092 static struct intel_uncore_type knl_uncore_m2pcie
= {
2096 .perf_ctr_bits
= 48,
2097 .constraints
= knl_uncore_m2pcie_constraints
,
2098 SNBEP_UNCORE_PCI_COMMON_INIT(),
2101 static struct attribute
*knl_uncore_irp_formats_attr
[] = {
2102 &format_attr_event
.attr
,
2103 &format_attr_umask
.attr
,
2104 &format_attr_qor
.attr
,
2105 &format_attr_edge
.attr
,
2106 &format_attr_inv
.attr
,
2107 &format_attr_thresh8
.attr
,
2111 static struct attribute_group knl_uncore_irp_format_group
= {
2113 .attrs
= knl_uncore_irp_formats_attr
,
2116 static struct intel_uncore_type knl_uncore_irp
= {
2120 .perf_ctr_bits
= 48,
2121 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
2122 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
2123 .event_mask
= KNL_IRP_PCI_PMON_RAW_EVENT_MASK
,
2124 .box_ctl
= KNL_IRP_PCI_PMON_BOX_CTL
,
2125 .ops
= &snbep_uncore_pci_ops
,
2126 .format_group
= &knl_uncore_irp_format_group
,
2130 KNL_PCI_UNCORE_MC_UCLK
,
2131 KNL_PCI_UNCORE_MC_DCLK
,
2132 KNL_PCI_UNCORE_EDC_UCLK
,
2133 KNL_PCI_UNCORE_EDC_ECLK
,
2134 KNL_PCI_UNCORE_M2PCIE
,
2138 static struct intel_uncore_type
*knl_pci_uncores
[] = {
2139 [KNL_PCI_UNCORE_MC_UCLK
] = &knl_uncore_imc_uclk
,
2140 [KNL_PCI_UNCORE_MC_DCLK
] = &knl_uncore_imc_dclk
,
2141 [KNL_PCI_UNCORE_EDC_UCLK
] = &knl_uncore_edc_uclk
,
2142 [KNL_PCI_UNCORE_EDC_ECLK
] = &knl_uncore_edc_eclk
,
2143 [KNL_PCI_UNCORE_M2PCIE
] = &knl_uncore_m2pcie
,
2144 [KNL_PCI_UNCORE_IRP
] = &knl_uncore_irp
,
2149 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2150 * device type. prior to KNL, each instance of a PMU device type had a unique
2153 * PCI Device ID Uncore PMU Devices
2154 * ----------------------------------
2155 * 0x7841 MC0 UClk, MC1 UClk
2156 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2157 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2158 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2159 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2160 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2161 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2166 static const struct pci_device_id knl_uncore_pci_ids
[] = {
2168 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7841),
2169 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK
, 0),
2172 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7841),
2173 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK
, 1),
2175 { /* MC0 DClk CH 0 */
2176 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2177 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK
, 0),
2179 { /* MC0 DClk CH 1 */
2180 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2181 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK
, 1),
2183 { /* MC0 DClk CH 2 */
2184 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2185 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK
, 2),
2187 { /* MC1 DClk CH 0 */
2188 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2189 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK
, 3),
2191 { /* MC1 DClk CH 1 */
2192 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2193 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK
, 4),
2195 { /* MC1 DClk CH 2 */
2196 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2197 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK
, 5),
2200 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2201 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK
, 0),
2204 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2205 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK
, 1),
2208 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2209 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK
, 2),
2212 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2213 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK
, 3),
2216 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2217 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK
, 4),
2220 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2221 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK
, 5),
2224 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2225 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK
, 6),
2228 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2229 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK
, 7),
2232 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2233 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK
, 0),
2236 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2237 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK
, 1),
2240 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2241 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK
, 2),
2244 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2245 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK
, 3),
2248 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2249 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK
, 4),
2252 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2253 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK
, 5),
2256 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2257 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK
, 6),
2260 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2261 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK
, 7),
2264 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7817),
2265 .driver_data
= UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE
, 0),
2268 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7814),
2269 .driver_data
= UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP
, 0),
2271 { /* end: all zeroes */ }
2274 static struct pci_driver knl_uncore_pci_driver
= {
2275 .name
= "knl_uncore",
2276 .id_table
= knl_uncore_pci_ids
,
2279 int knl_uncore_pci_init(void)
2283 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2284 ret
= snb_pci2phy_map_init(0x7814); /* IRP */
2287 ret
= snb_pci2phy_map_init(0x7817); /* M2PCIe */
2290 uncore_pci_uncores
= knl_pci_uncores
;
2291 uncore_pci_driver
= &knl_uncore_pci_driver
;
2295 /* end of KNL uncore support */
2297 /* Haswell-EP uncore support */
2298 static struct attribute
*hswep_uncore_ubox_formats_attr
[] = {
2299 &format_attr_event
.attr
,
2300 &format_attr_umask
.attr
,
2301 &format_attr_edge
.attr
,
2302 &format_attr_inv
.attr
,
2303 &format_attr_thresh5
.attr
,
2304 &format_attr_filter_tid2
.attr
,
2305 &format_attr_filter_cid
.attr
,
2309 static struct attribute_group hswep_uncore_ubox_format_group
= {
2311 .attrs
= hswep_uncore_ubox_formats_attr
,
2314 static int hswep_ubox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2316 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2317 reg1
->reg
= HSWEP_U_MSR_PMON_FILTER
;
2318 reg1
->config
= event
->attr
.config1
& HSWEP_U_MSR_PMON_BOX_FILTER_MASK
;
2323 static struct intel_uncore_ops hswep_uncore_ubox_ops
= {
2324 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2325 .hw_config
= hswep_ubox_hw_config
,
2326 .get_constraint
= uncore_get_constraint
,
2327 .put_constraint
= uncore_put_constraint
,
2330 static struct intel_uncore_type hswep_uncore_ubox
= {
2334 .perf_ctr_bits
= 44,
2335 .fixed_ctr_bits
= 48,
2336 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
2337 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
2338 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
2339 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
2340 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
2341 .num_shared_regs
= 1,
2342 .ops
= &hswep_uncore_ubox_ops
,
2343 .format_group
= &hswep_uncore_ubox_format_group
,
2346 static struct attribute
*hswep_uncore_cbox_formats_attr
[] = {
2347 &format_attr_event
.attr
,
2348 &format_attr_umask
.attr
,
2349 &format_attr_edge
.attr
,
2350 &format_attr_tid_en
.attr
,
2351 &format_attr_thresh8
.attr
,
2352 &format_attr_filter_tid3
.attr
,
2353 &format_attr_filter_link2
.attr
,
2354 &format_attr_filter_state3
.attr
,
2355 &format_attr_filter_nid2
.attr
,
2356 &format_attr_filter_opc2
.attr
,
2357 &format_attr_filter_nc
.attr
,
2358 &format_attr_filter_c6
.attr
,
2359 &format_attr_filter_isoc
.attr
,
2363 static struct attribute_group hswep_uncore_cbox_format_group
= {
2365 .attrs
= hswep_uncore_cbox_formats_attr
,
2368 static struct event_constraint hswep_uncore_cbox_constraints
[] = {
2369 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2370 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2371 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2372 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2373 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2374 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2375 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2376 EVENT_CONSTRAINT_END
2379 static struct extra_reg hswep_uncore_cbox_extra_regs
[] = {
2380 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
2381 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
2382 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2383 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2384 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2385 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2386 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2387 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2388 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2389 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2390 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2391 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2392 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2393 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2394 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2395 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2396 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2397 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2398 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2399 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2400 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2401 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2402 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2403 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2404 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2405 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2406 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2407 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2408 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2409 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2410 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2411 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2412 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2413 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2414 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2415 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2416 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2417 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2421 static u64
hswep_cbox_filter_mask(int fields
)
2425 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID
;
2427 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK
;
2429 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
2431 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID
;
2432 if (fields
& 0x10) {
2433 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
2434 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC
;
2435 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6
;
2436 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC
;
2441 static struct event_constraint
*
2442 hswep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2444 return __snbep_cbox_get_constraint(box
, event
, hswep_cbox_filter_mask
);
2447 static int hswep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2449 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2450 struct extra_reg
*er
;
2453 for (er
= hswep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
2454 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
2460 reg1
->reg
= HSWEP_C0_MSR_PMON_BOX_FILTER0
+
2461 HSWEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
2462 reg1
->config
= event
->attr
.config1
& hswep_cbox_filter_mask(idx
);
2468 static void hswep_cbox_enable_event(struct intel_uncore_box
*box
,
2469 struct perf_event
*event
)
2471 struct hw_perf_event
*hwc
= &event
->hw
;
2472 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2474 if (reg1
->idx
!= EXTRA_REG_NONE
) {
2475 u64 filter
= uncore_shared_reg_config(box
, 0);
2476 wrmsrl(reg1
->reg
, filter
& 0xffffffff);
2477 wrmsrl(reg1
->reg
+ 1, filter
>> 32);
2480 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
2483 static struct intel_uncore_ops hswep_uncore_cbox_ops
= {
2484 .init_box
= snbep_uncore_msr_init_box
,
2485 .disable_box
= snbep_uncore_msr_disable_box
,
2486 .enable_box
= snbep_uncore_msr_enable_box
,
2487 .disable_event
= snbep_uncore_msr_disable_event
,
2488 .enable_event
= hswep_cbox_enable_event
,
2489 .read_counter
= uncore_msr_read_counter
,
2490 .hw_config
= hswep_cbox_hw_config
,
2491 .get_constraint
= hswep_cbox_get_constraint
,
2492 .put_constraint
= snbep_cbox_put_constraint
,
2495 static struct intel_uncore_type hswep_uncore_cbox
= {
2499 .perf_ctr_bits
= 48,
2500 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
2501 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
2502 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
2503 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
2504 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
2505 .num_shared_regs
= 1,
2506 .constraints
= hswep_uncore_cbox_constraints
,
2507 .ops
= &hswep_uncore_cbox_ops
,
2508 .format_group
= &hswep_uncore_cbox_format_group
,
2512 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2514 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box
*box
)
2516 unsigned msr
= uncore_msr_box_ctl(box
);
2519 u64 init
= SNBEP_PMON_BOX_CTL_INT
;
2523 for_each_set_bit(i
, (unsigned long *)&init
, 64) {
2524 flags
|= (1ULL << i
);
2530 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops
= {
2531 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2532 .init_box
= hswep_uncore_sbox_msr_init_box
2535 static struct attribute
*hswep_uncore_sbox_formats_attr
[] = {
2536 &format_attr_event
.attr
,
2537 &format_attr_umask
.attr
,
2538 &format_attr_edge
.attr
,
2539 &format_attr_tid_en
.attr
,
2540 &format_attr_inv
.attr
,
2541 &format_attr_thresh8
.attr
,
2545 static struct attribute_group hswep_uncore_sbox_format_group
= {
2547 .attrs
= hswep_uncore_sbox_formats_attr
,
2550 static struct intel_uncore_type hswep_uncore_sbox
= {
2554 .perf_ctr_bits
= 44,
2555 .event_ctl
= HSWEP_S0_MSR_PMON_CTL0
,
2556 .perf_ctr
= HSWEP_S0_MSR_PMON_CTR0
,
2557 .event_mask
= HSWEP_S_MSR_PMON_RAW_EVENT_MASK
,
2558 .box_ctl
= HSWEP_S0_MSR_PMON_BOX_CTL
,
2559 .msr_offset
= HSWEP_SBOX_MSR_OFFSET
,
2560 .ops
= &hswep_uncore_sbox_msr_ops
,
2561 .format_group
= &hswep_uncore_sbox_format_group
,
2564 static int hswep_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2566 struct hw_perf_event
*hwc
= &event
->hw
;
2567 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2568 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
2570 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
2571 reg1
->reg
= HSWEP_PCU_MSR_PMON_BOX_FILTER
;
2572 reg1
->idx
= ev_sel
- 0xb;
2573 reg1
->config
= event
->attr
.config1
& (0xff << reg1
->idx
);
2578 static struct intel_uncore_ops hswep_uncore_pcu_ops
= {
2579 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2580 .hw_config
= hswep_pcu_hw_config
,
2581 .get_constraint
= snbep_pcu_get_constraint
,
2582 .put_constraint
= snbep_pcu_put_constraint
,
2585 static struct intel_uncore_type hswep_uncore_pcu
= {
2589 .perf_ctr_bits
= 48,
2590 .perf_ctr
= HSWEP_PCU_MSR_PMON_CTR0
,
2591 .event_ctl
= HSWEP_PCU_MSR_PMON_CTL0
,
2592 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
2593 .box_ctl
= HSWEP_PCU_MSR_PMON_BOX_CTL
,
2594 .num_shared_regs
= 1,
2595 .ops
= &hswep_uncore_pcu_ops
,
2596 .format_group
= &snbep_uncore_pcu_format_group
,
2599 static struct intel_uncore_type
*hswep_msr_uncores
[] = {
2607 void hswep_uncore_cpu_init(void)
2609 int pkg
= topology_phys_to_logical_pkg(0);
2611 if (hswep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
2612 hswep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
2614 /* Detect 6-8 core systems with only two SBOXes */
2615 if (uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
]) {
2618 pci_read_config_dword(uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
],
2620 if (((capid4
>> 6) & 0x3) == 0)
2621 hswep_uncore_sbox
.num_boxes
= 2;
2624 uncore_msr_uncores
= hswep_msr_uncores
;
2627 static struct intel_uncore_type hswep_uncore_ha
= {
2631 .perf_ctr_bits
= 48,
2632 SNBEP_UNCORE_PCI_COMMON_INIT(),
2635 static struct uncore_event_desc hswep_uncore_imc_events
[] = {
2636 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x00,umask=0x00"),
2637 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x03"),
2638 INTEL_UNCORE_EVENT_DESC(cas_count_read
.scale
, "6.103515625e-5"),
2639 INTEL_UNCORE_EVENT_DESC(cas_count_read
.unit
, "MiB"),
2640 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x0c"),
2641 INTEL_UNCORE_EVENT_DESC(cas_count_write
.scale
, "6.103515625e-5"),
2642 INTEL_UNCORE_EVENT_DESC(cas_count_write
.unit
, "MiB"),
2643 { /* end: all zeroes */ },
2646 static struct intel_uncore_type hswep_uncore_imc
= {
2650 .perf_ctr_bits
= 48,
2651 .fixed_ctr_bits
= 48,
2652 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
2653 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
2654 .event_descs
= hswep_uncore_imc_events
,
2655 SNBEP_UNCORE_PCI_COMMON_INIT(),
2658 static unsigned hswep_uncore_irp_ctrs
[] = {0xa0, 0xa8, 0xb0, 0xb8};
2660 static u64
hswep_uncore_irp_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
2662 struct pci_dev
*pdev
= box
->pci_dev
;
2663 struct hw_perf_event
*hwc
= &event
->hw
;
2666 pci_read_config_dword(pdev
, hswep_uncore_irp_ctrs
[hwc
->idx
], (u32
*)&count
);
2667 pci_read_config_dword(pdev
, hswep_uncore_irp_ctrs
[hwc
->idx
] + 4, (u32
*)&count
+ 1);
2672 static struct intel_uncore_ops hswep_uncore_irp_ops
= {
2673 .init_box
= snbep_uncore_pci_init_box
,
2674 .disable_box
= snbep_uncore_pci_disable_box
,
2675 .enable_box
= snbep_uncore_pci_enable_box
,
2676 .disable_event
= ivbep_uncore_irp_disable_event
,
2677 .enable_event
= ivbep_uncore_irp_enable_event
,
2678 .read_counter
= hswep_uncore_irp_read_counter
,
2681 static struct intel_uncore_type hswep_uncore_irp
= {
2685 .perf_ctr_bits
= 48,
2686 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2687 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
2688 .ops
= &hswep_uncore_irp_ops
,
2689 .format_group
= &snbep_uncore_format_group
,
2692 static struct intel_uncore_type hswep_uncore_qpi
= {
2696 .perf_ctr_bits
= 48,
2697 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
2698 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
2699 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
2700 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
2701 .num_shared_regs
= 1,
2702 .ops
= &snbep_uncore_qpi_ops
,
2703 .format_group
= &snbep_uncore_qpi_format_group
,
2706 static struct event_constraint hswep_uncore_r2pcie_constraints
[] = {
2707 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2708 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2709 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2710 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2711 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2712 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2713 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2714 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2715 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2716 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2717 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2718 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2719 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2720 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2721 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2722 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2723 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2724 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2725 EVENT_CONSTRAINT_END
2728 static struct intel_uncore_type hswep_uncore_r2pcie
= {
2732 .perf_ctr_bits
= 48,
2733 .constraints
= hswep_uncore_r2pcie_constraints
,
2734 SNBEP_UNCORE_PCI_COMMON_INIT(),
2737 static struct event_constraint hswep_uncore_r3qpi_constraints
[] = {
2738 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2739 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2740 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2741 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2742 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2743 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2744 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2745 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2746 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2747 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2748 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2749 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2750 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2751 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2752 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2753 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2754 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2755 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2756 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2757 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2758 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2759 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2760 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2761 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2762 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2763 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2764 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2765 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2766 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2767 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2768 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2769 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2770 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2771 EVENT_CONSTRAINT_END
2774 static struct intel_uncore_type hswep_uncore_r3qpi
= {
2778 .perf_ctr_bits
= 44,
2779 .constraints
= hswep_uncore_r3qpi_constraints
,
2780 SNBEP_UNCORE_PCI_COMMON_INIT(),
2784 HSWEP_PCI_UNCORE_HA
,
2785 HSWEP_PCI_UNCORE_IMC
,
2786 HSWEP_PCI_UNCORE_IRP
,
2787 HSWEP_PCI_UNCORE_QPI
,
2788 HSWEP_PCI_UNCORE_R2PCIE
,
2789 HSWEP_PCI_UNCORE_R3QPI
,
2792 static struct intel_uncore_type
*hswep_pci_uncores
[] = {
2793 [HSWEP_PCI_UNCORE_HA
] = &hswep_uncore_ha
,
2794 [HSWEP_PCI_UNCORE_IMC
] = &hswep_uncore_imc
,
2795 [HSWEP_PCI_UNCORE_IRP
] = &hswep_uncore_irp
,
2796 [HSWEP_PCI_UNCORE_QPI
] = &hswep_uncore_qpi
,
2797 [HSWEP_PCI_UNCORE_R2PCIE
] = &hswep_uncore_r2pcie
,
2798 [HSWEP_PCI_UNCORE_R3QPI
] = &hswep_uncore_r3qpi
,
2802 static const struct pci_device_id hswep_uncore_pci_ids
[] = {
2803 { /* Home Agent 0 */
2804 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f30),
2805 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA
, 0),
2807 { /* Home Agent 1 */
2808 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f38),
2809 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA
, 1),
2811 { /* MC0 Channel 0 */
2812 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb0),
2813 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 0),
2815 { /* MC0 Channel 1 */
2816 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb1),
2817 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 1),
2819 { /* MC0 Channel 2 */
2820 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb4),
2821 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 2),
2823 { /* MC0 Channel 3 */
2824 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb5),
2825 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 3),
2827 { /* MC1 Channel 0 */
2828 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd0),
2829 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 4),
2831 { /* MC1 Channel 1 */
2832 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd1),
2833 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 5),
2835 { /* MC1 Channel 2 */
2836 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd4),
2837 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 6),
2839 { /* MC1 Channel 3 */
2840 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd5),
2841 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 7),
2844 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f39),
2845 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP
, 0),
2848 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f32),
2849 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 0),
2852 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f33),
2853 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 1),
2856 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f3a),
2857 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 2),
2860 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f34),
2861 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE
, 0),
2863 { /* R3QPI0 Link 0 */
2864 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f36),
2865 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 0),
2867 { /* R3QPI0 Link 1 */
2868 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f37),
2869 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 1),
2871 { /* R3QPI1 Link 2 */
2872 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f3e),
2873 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 2),
2875 { /* QPI Port 0 filter */
2876 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f86),
2877 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
2878 SNBEP_PCI_QPI_PORT0_FILTER
),
2880 { /* QPI Port 1 filter */
2881 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f96),
2882 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
2883 SNBEP_PCI_QPI_PORT1_FILTER
),
2885 { /* PCU.3 (for Capability registers) */
2886 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fc0),
2887 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
2890 { /* end: all zeroes */ }
2893 static struct pci_driver hswep_uncore_pci_driver
= {
2894 .name
= "hswep_uncore",
2895 .id_table
= hswep_uncore_pci_ids
,
2898 int hswep_uncore_pci_init(void)
2900 int ret
= snbep_pci2phy_map_init(0x2f1e);
2903 uncore_pci_uncores
= hswep_pci_uncores
;
2904 uncore_pci_driver
= &hswep_uncore_pci_driver
;
2907 /* end of Haswell-EP uncore support */
2909 /* BDX uncore support */
2911 static struct intel_uncore_type bdx_uncore_ubox
= {
2915 .perf_ctr_bits
= 48,
2916 .fixed_ctr_bits
= 48,
2917 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
2918 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
2919 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
2920 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
2921 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
2922 .num_shared_regs
= 1,
2923 .ops
= &ivbep_uncore_msr_ops
,
2924 .format_group
= &ivbep_uncore_ubox_format_group
,
2927 static struct event_constraint bdx_uncore_cbox_constraints
[] = {
2928 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
2929 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2930 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2931 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2932 EVENT_CONSTRAINT_END
2935 static struct intel_uncore_type bdx_uncore_cbox
= {
2939 .perf_ctr_bits
= 48,
2940 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
2941 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
2942 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
2943 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
2944 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
2945 .num_shared_regs
= 1,
2946 .constraints
= bdx_uncore_cbox_constraints
,
2947 .ops
= &hswep_uncore_cbox_ops
,
2948 .format_group
= &hswep_uncore_cbox_format_group
,
2951 static struct intel_uncore_type
*bdx_msr_uncores
[] = {
2958 void bdx_uncore_cpu_init(void)
2960 if (bdx_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
2961 bdx_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
2962 uncore_msr_uncores
= bdx_msr_uncores
;
2965 static struct intel_uncore_type bdx_uncore_ha
= {
2969 .perf_ctr_bits
= 48,
2970 SNBEP_UNCORE_PCI_COMMON_INIT(),
2973 static struct intel_uncore_type bdx_uncore_imc
= {
2977 .perf_ctr_bits
= 48,
2978 .fixed_ctr_bits
= 48,
2979 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
2980 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
2981 .event_descs
= hswep_uncore_imc_events
,
2982 SNBEP_UNCORE_PCI_COMMON_INIT(),
2985 static struct intel_uncore_type bdx_uncore_irp
= {
2989 .perf_ctr_bits
= 48,
2990 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2991 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
2992 .ops
= &hswep_uncore_irp_ops
,
2993 .format_group
= &snbep_uncore_format_group
,
2996 static struct intel_uncore_type bdx_uncore_qpi
= {
3000 .perf_ctr_bits
= 48,
3001 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3002 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3003 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
3004 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3005 .num_shared_regs
= 1,
3006 .ops
= &snbep_uncore_qpi_ops
,
3007 .format_group
= &snbep_uncore_qpi_format_group
,
3010 static struct event_constraint bdx_uncore_r2pcie_constraints
[] = {
3011 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3012 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3013 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3014 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3015 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3016 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3017 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3018 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3019 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3020 EVENT_CONSTRAINT_END
3023 static struct intel_uncore_type bdx_uncore_r2pcie
= {
3027 .perf_ctr_bits
= 48,
3028 .constraints
= bdx_uncore_r2pcie_constraints
,
3029 SNBEP_UNCORE_PCI_COMMON_INIT(),
3032 static struct event_constraint bdx_uncore_r3qpi_constraints
[] = {
3033 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3034 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3035 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3036 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3037 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3038 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3039 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3040 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3041 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3042 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3043 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3044 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3045 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3046 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3047 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3048 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3049 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3050 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3051 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3052 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3053 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3054 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3055 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3056 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3057 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3058 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3059 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3060 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3061 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3062 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3063 EVENT_CONSTRAINT_END
3066 static struct intel_uncore_type bdx_uncore_r3qpi
= {
3070 .perf_ctr_bits
= 48,
3071 .constraints
= bdx_uncore_r3qpi_constraints
,
3072 SNBEP_UNCORE_PCI_COMMON_INIT(),
3080 BDX_PCI_UNCORE_R2PCIE
,
3081 BDX_PCI_UNCORE_R3QPI
,
3084 static struct intel_uncore_type
*bdx_pci_uncores
[] = {
3085 [BDX_PCI_UNCORE_HA
] = &bdx_uncore_ha
,
3086 [BDX_PCI_UNCORE_IMC
] = &bdx_uncore_imc
,
3087 [BDX_PCI_UNCORE_IRP
] = &bdx_uncore_irp
,
3088 [BDX_PCI_UNCORE_QPI
] = &bdx_uncore_qpi
,
3089 [BDX_PCI_UNCORE_R2PCIE
] = &bdx_uncore_r2pcie
,
3090 [BDX_PCI_UNCORE_R3QPI
] = &bdx_uncore_r3qpi
,
3094 static const struct pci_device_id bdx_uncore_pci_ids
[] = {
3095 { /* Home Agent 0 */
3096 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f30),
3097 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA
, 0),
3099 { /* Home Agent 1 */
3100 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f38),
3101 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA
, 1),
3103 { /* MC0 Channel 0 */
3104 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb0),
3105 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 0),
3107 { /* MC0 Channel 1 */
3108 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb1),
3109 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 1),
3111 { /* MC0 Channel 2 */
3112 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb4),
3113 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 2),
3115 { /* MC0 Channel 3 */
3116 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb5),
3117 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 3),
3119 { /* MC1 Channel 0 */
3120 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd0),
3121 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 4),
3123 { /* MC1 Channel 1 */
3124 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd1),
3125 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 5),
3127 { /* MC1 Channel 2 */
3128 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd4),
3129 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 6),
3131 { /* MC1 Channel 3 */
3132 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd5),
3133 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 7),
3136 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f39),
3137 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP
, 0),
3140 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f32),
3141 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 0),
3144 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f33),
3145 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 1),
3148 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f3a),
3149 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 2),
3152 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f34),
3153 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE
, 0),
3155 { /* R3QPI0 Link 0 */
3156 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f36),
3157 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 0),
3159 { /* R3QPI0 Link 1 */
3160 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f37),
3161 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 1),
3163 { /* R3QPI1 Link 2 */
3164 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f3e),
3165 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 2),
3167 { /* QPI Port 0 filter */
3168 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f86),
3169 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
, 0),
3171 { /* QPI Port 1 filter */
3172 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f96),
3173 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
, 1),
3175 { /* QPI Port 2 filter */
3176 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f46),
3177 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
, 2),
3179 { /* end: all zeroes */ }
3182 static struct pci_driver bdx_uncore_pci_driver
= {
3183 .name
= "bdx_uncore",
3184 .id_table
= bdx_uncore_pci_ids
,
3187 int bdx_uncore_pci_init(void)
3189 int ret
= snbep_pci2phy_map_init(0x6f1e);
3193 uncore_pci_uncores
= bdx_pci_uncores
;
3194 uncore_pci_driver
= &bdx_uncore_pci_driver
;
3198 /* end of BDX uncore support */