Commit | Line | Data |
---|---|---|
c43ca509 JS |
1 | /* |
2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Author: Jacob Shin <jacob.shin@amd.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/perf_event.h> | |
12 | #include <linux/percpu.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/cpu.h> | |
17 | #include <linux/cpumask.h> | |
18 | ||
19 | #include <asm/cpufeature.h> | |
20 | #include <asm/perf_event.h> | |
21 | #include <asm/msr.h> | |
22 | ||
23 | #define NUM_COUNTERS_NB 4 | |
24 | #define NUM_COUNTERS_L2 4 | |
25 | #define MAX_COUNTERS NUM_COUNTERS_NB | |
26 | ||
27 | #define RDPMC_BASE_NB 6 | |
28 | #define RDPMC_BASE_L2 10 | |
29 | ||
30 | #define COUNTER_SHIFT 16 | |
31 | ||
32 | struct amd_uncore { | |
33 | int id; | |
34 | int refcnt; | |
35 | int cpu; | |
36 | int num_counters; | |
37 | int rdpmc_base; | |
38 | u32 msr_base; | |
39 | cpumask_t *active_mask; | |
40 | struct pmu *pmu; | |
41 | struct perf_event *events[MAX_COUNTERS]; | |
42 | struct amd_uncore *free_when_cpu_online; | |
43 | }; | |
44 | ||
45 | static struct amd_uncore * __percpu *amd_uncore_nb; | |
46 | static struct amd_uncore * __percpu *amd_uncore_l2; | |
47 | ||
48 | static struct pmu amd_nb_pmu; | |
49 | static struct pmu amd_l2_pmu; | |
50 | ||
51 | static cpumask_t amd_nb_active_mask; | |
52 | static cpumask_t amd_l2_active_mask; | |
53 | ||
54 | static bool is_nb_event(struct perf_event *event) | |
55 | { | |
56 | return event->pmu->type == amd_nb_pmu.type; | |
57 | } | |
58 | ||
59 | static bool is_l2_event(struct perf_event *event) | |
60 | { | |
61 | return event->pmu->type == amd_l2_pmu.type; | |
62 | } | |
63 | ||
64 | static struct amd_uncore *event_to_amd_uncore(struct perf_event *event) | |
65 | { | |
66 | if (is_nb_event(event) && amd_uncore_nb) | |
67 | return *per_cpu_ptr(amd_uncore_nb, event->cpu); | |
68 | else if (is_l2_event(event) && amd_uncore_l2) | |
69 | return *per_cpu_ptr(amd_uncore_l2, event->cpu); | |
70 | ||
71 | return NULL; | |
72 | } | |
73 | ||
74 | static void amd_uncore_read(struct perf_event *event) | |
75 | { | |
76 | struct hw_perf_event *hwc = &event->hw; | |
77 | u64 prev, new; | |
78 | s64 delta; | |
79 | ||
80 | /* | |
81 | * since we do not enable counter overflow interrupts, | |
82 | * we do not have to worry about prev_count changing on us | |
83 | */ | |
84 | ||
85 | prev = local64_read(&hwc->prev_count); | |
86 | rdpmcl(hwc->event_base_rdpmc, new); | |
87 | local64_set(&hwc->prev_count, new); | |
88 | delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); | |
89 | delta >>= COUNTER_SHIFT; | |
90 | local64_add(delta, &event->count); | |
91 | } | |
92 | ||
93 | static void amd_uncore_start(struct perf_event *event, int flags) | |
94 | { | |
95 | struct hw_perf_event *hwc = &event->hw; | |
96 | ||
97 | if (flags & PERF_EF_RELOAD) | |
98 | wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); | |
99 | ||
100 | hwc->state = 0; | |
101 | wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); | |
102 | perf_event_update_userpage(event); | |
103 | } | |
104 | ||
105 | static void amd_uncore_stop(struct perf_event *event, int flags) | |
106 | { | |
107 | struct hw_perf_event *hwc = &event->hw; | |
108 | ||
109 | wrmsrl(hwc->config_base, hwc->config); | |
110 | hwc->state |= PERF_HES_STOPPED; | |
111 | ||
112 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
113 | amd_uncore_read(event); | |
114 | hwc->state |= PERF_HES_UPTODATE; | |
115 | } | |
116 | } | |
117 | ||
118 | static int amd_uncore_add(struct perf_event *event, int flags) | |
119 | { | |
120 | int i; | |
121 | struct amd_uncore *uncore = event_to_amd_uncore(event); | |
122 | struct hw_perf_event *hwc = &event->hw; | |
123 | ||
124 | /* are we already assigned? */ | |
125 | if (hwc->idx != -1 && uncore->events[hwc->idx] == event) | |
126 | goto out; | |
127 | ||
128 | for (i = 0; i < uncore->num_counters; i++) { | |
129 | if (uncore->events[i] == event) { | |
130 | hwc->idx = i; | |
131 | goto out; | |
132 | } | |
133 | } | |
134 | ||
135 | /* if not, take the first available counter */ | |
136 | hwc->idx = -1; | |
137 | for (i = 0; i < uncore->num_counters; i++) { | |
138 | if (cmpxchg(&uncore->events[i], NULL, event) == NULL) { | |
139 | hwc->idx = i; | |
140 | break; | |
141 | } | |
142 | } | |
143 | ||
144 | out: | |
145 | if (hwc->idx == -1) | |
146 | return -EBUSY; | |
147 | ||
148 | hwc->config_base = uncore->msr_base + (2 * hwc->idx); | |
149 | hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx); | |
150 | hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx; | |
151 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | |
152 | ||
153 | if (flags & PERF_EF_START) | |
154 | amd_uncore_start(event, PERF_EF_RELOAD); | |
155 | ||
156 | return 0; | |
157 | } | |
158 | ||
159 | static void amd_uncore_del(struct perf_event *event, int flags) | |
160 | { | |
161 | int i; | |
162 | struct amd_uncore *uncore = event_to_amd_uncore(event); | |
163 | struct hw_perf_event *hwc = &event->hw; | |
164 | ||
165 | amd_uncore_stop(event, PERF_EF_UPDATE); | |
166 | ||
167 | for (i = 0; i < uncore->num_counters; i++) { | |
168 | if (cmpxchg(&uncore->events[i], event, NULL) == event) | |
169 | break; | |
170 | } | |
171 | ||
172 | hwc->idx = -1; | |
173 | } | |
174 | ||
175 | static int amd_uncore_event_init(struct perf_event *event) | |
176 | { | |
177 | struct amd_uncore *uncore; | |
178 | struct hw_perf_event *hwc = &event->hw; | |
179 | ||
180 | if (event->attr.type != event->pmu->type) | |
181 | return -ENOENT; | |
182 | ||
183 | /* | |
184 | * NB and L2 counters (MSRs) are shared across all cores that share the | |
185 | * same NB / L2 cache. Interrupts can be directed to a single target | |
186 | * core, however, event counts generated by processes running on other | |
187 | * cores cannot be masked out. So we do not support sampling and | |
188 | * per-thread events. | |
189 | */ | |
190 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
191 | return -EINVAL; | |
192 | ||
193 | /* NB and L2 counters do not have usr/os/guest/host bits */ | |
194 | if (event->attr.exclude_user || event->attr.exclude_kernel || | |
195 | event->attr.exclude_host || event->attr.exclude_guest) | |
196 | return -EINVAL; | |
197 | ||
198 | /* and we do not enable counter overflow interrupts */ | |
199 | hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; | |
200 | hwc->idx = -1; | |
201 | ||
202 | if (event->cpu < 0) | |
203 | return -EINVAL; | |
204 | ||
205 | uncore = event_to_amd_uncore(event); | |
206 | if (!uncore) | |
207 | return -ENODEV; | |
208 | ||
209 | /* | |
210 | * since request can come in to any of the shared cores, we will remap | |
211 | * to a single common cpu. | |
212 | */ | |
213 | event->cpu = uncore->cpu; | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | static ssize_t amd_uncore_attr_show_cpumask(struct device *dev, | |
219 | struct device_attribute *attr, | |
220 | char *buf) | |
221 | { | |
c43ca509 JS |
222 | cpumask_t *active_mask; |
223 | struct pmu *pmu = dev_get_drvdata(dev); | |
224 | ||
225 | if (pmu->type == amd_nb_pmu.type) | |
226 | active_mask = &amd_nb_active_mask; | |
227 | else if (pmu->type == amd_l2_pmu.type) | |
228 | active_mask = &amd_l2_active_mask; | |
229 | else | |
230 | return 0; | |
231 | ||
5aaba363 | 232 | return cpumap_print_to_pagebuf(true, buf, active_mask); |
c43ca509 JS |
233 | } |
234 | static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL); | |
235 | ||
236 | static struct attribute *amd_uncore_attrs[] = { | |
237 | &dev_attr_cpumask.attr, | |
238 | NULL, | |
239 | }; | |
240 | ||
241 | static struct attribute_group amd_uncore_attr_group = { | |
242 | .attrs = amd_uncore_attrs, | |
243 | }; | |
244 | ||
245 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); | |
246 | PMU_FORMAT_ATTR(umask, "config:8-15"); | |
247 | ||
248 | static struct attribute *amd_uncore_format_attr[] = { | |
249 | &format_attr_event.attr, | |
250 | &format_attr_umask.attr, | |
251 | NULL, | |
252 | }; | |
253 | ||
254 | static struct attribute_group amd_uncore_format_group = { | |
255 | .name = "format", | |
256 | .attrs = amd_uncore_format_attr, | |
257 | }; | |
258 | ||
259 | static const struct attribute_group *amd_uncore_attr_groups[] = { | |
260 | &amd_uncore_attr_group, | |
261 | &amd_uncore_format_group, | |
262 | NULL, | |
263 | }; | |
264 | ||
265 | static struct pmu amd_nb_pmu = { | |
31d50c55 | 266 | .task_ctx_nr = perf_invalid_context, |
c43ca509 JS |
267 | .attr_groups = amd_uncore_attr_groups, |
268 | .name = "amd_nb", | |
269 | .event_init = amd_uncore_event_init, | |
270 | .add = amd_uncore_add, | |
271 | .del = amd_uncore_del, | |
272 | .start = amd_uncore_start, | |
273 | .stop = amd_uncore_stop, | |
274 | .read = amd_uncore_read, | |
275 | }; | |
276 | ||
277 | static struct pmu amd_l2_pmu = { | |
31d50c55 | 278 | .task_ctx_nr = perf_invalid_context, |
c43ca509 JS |
279 | .attr_groups = amd_uncore_attr_groups, |
280 | .name = "amd_l2", | |
281 | .event_init = amd_uncore_event_init, | |
282 | .add = amd_uncore_add, | |
283 | .del = amd_uncore_del, | |
284 | .start = amd_uncore_start, | |
285 | .stop = amd_uncore_stop, | |
286 | .read = amd_uncore_read, | |
287 | }; | |
288 | ||
148f9bb8 | 289 | static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) |
c43ca509 JS |
290 | { |
291 | return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, | |
292 | cpu_to_node(cpu)); | |
293 | } | |
294 | ||
503d3291 | 295 | static int amd_uncore_cpu_up_prepare(unsigned int cpu) |
c43ca509 | 296 | { |
503d3291 | 297 | struct amd_uncore *uncore_nb = NULL, *uncore_l2; |
c43ca509 JS |
298 | |
299 | if (amd_uncore_nb) { | |
503d3291 ZZ |
300 | uncore_nb = amd_uncore_alloc(cpu); |
301 | if (!uncore_nb) | |
302 | goto fail; | |
303 | uncore_nb->cpu = cpu; | |
304 | uncore_nb->num_counters = NUM_COUNTERS_NB; | |
305 | uncore_nb->rdpmc_base = RDPMC_BASE_NB; | |
306 | uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL; | |
307 | uncore_nb->active_mask = &amd_nb_active_mask; | |
308 | uncore_nb->pmu = &amd_nb_pmu; | |
309 | *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; | |
c43ca509 JS |
310 | } |
311 | ||
312 | if (amd_uncore_l2) { | |
503d3291 ZZ |
313 | uncore_l2 = amd_uncore_alloc(cpu); |
314 | if (!uncore_l2) | |
315 | goto fail; | |
316 | uncore_l2->cpu = cpu; | |
317 | uncore_l2->num_counters = NUM_COUNTERS_L2; | |
318 | uncore_l2->rdpmc_base = RDPMC_BASE_L2; | |
319 | uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL; | |
320 | uncore_l2->active_mask = &amd_l2_active_mask; | |
321 | uncore_l2->pmu = &amd_l2_pmu; | |
322 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; | |
c43ca509 | 323 | } |
503d3291 ZZ |
324 | |
325 | return 0; | |
326 | ||
327 | fail: | |
8bc9162c TG |
328 | if (amd_uncore_nb) |
329 | *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; | |
503d3291 ZZ |
330 | kfree(uncore_nb); |
331 | return -ENOMEM; | |
c43ca509 JS |
332 | } |
333 | ||
334 | static struct amd_uncore * | |
148f9bb8 PG |
335 | amd_uncore_find_online_sibling(struct amd_uncore *this, |
336 | struct amd_uncore * __percpu *uncores) | |
c43ca509 JS |
337 | { |
338 | unsigned int cpu; | |
339 | struct amd_uncore *that; | |
340 | ||
341 | for_each_online_cpu(cpu) { | |
342 | that = *per_cpu_ptr(uncores, cpu); | |
343 | ||
344 | if (!that) | |
345 | continue; | |
346 | ||
347 | if (this == that) | |
348 | continue; | |
349 | ||
350 | if (this->id == that->id) { | |
351 | that->free_when_cpu_online = this; | |
352 | this = that; | |
353 | break; | |
354 | } | |
355 | } | |
356 | ||
357 | this->refcnt++; | |
358 | return this; | |
359 | } | |
360 | ||
96b2bd38 | 361 | static int amd_uncore_cpu_starting(unsigned int cpu) |
c43ca509 JS |
362 | { |
363 | unsigned int eax, ebx, ecx, edx; | |
364 | struct amd_uncore *uncore; | |
365 | ||
366 | if (amd_uncore_nb) { | |
367 | uncore = *per_cpu_ptr(amd_uncore_nb, cpu); | |
368 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); | |
369 | uncore->id = ecx & 0xff; | |
370 | ||
371 | uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb); | |
372 | *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; | |
373 | } | |
374 | ||
375 | if (amd_uncore_l2) { | |
376 | unsigned int apicid = cpu_data(cpu).apicid; | |
377 | unsigned int nshared; | |
378 | ||
379 | uncore = *per_cpu_ptr(amd_uncore_l2, cpu); | |
380 | cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx); | |
381 | nshared = ((eax >> 14) & 0xfff) + 1; | |
382 | uncore->id = apicid - (apicid % nshared); | |
383 | ||
384 | uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2); | |
385 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; | |
386 | } | |
96b2bd38 RC |
387 | |
388 | return 0; | |
c43ca509 JS |
389 | } |
390 | ||
148f9bb8 PG |
391 | static void uncore_online(unsigned int cpu, |
392 | struct amd_uncore * __percpu *uncores) | |
c43ca509 JS |
393 | { |
394 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); | |
395 | ||
396 | kfree(uncore->free_when_cpu_online); | |
397 | uncore->free_when_cpu_online = NULL; | |
398 | ||
399 | if (cpu == uncore->cpu) | |
400 | cpumask_set_cpu(cpu, uncore->active_mask); | |
401 | } | |
402 | ||
96b2bd38 | 403 | static int amd_uncore_cpu_online(unsigned int cpu) |
c43ca509 JS |
404 | { |
405 | if (amd_uncore_nb) | |
406 | uncore_online(cpu, amd_uncore_nb); | |
407 | ||
408 | if (amd_uncore_l2) | |
409 | uncore_online(cpu, amd_uncore_l2); | |
96b2bd38 RC |
410 | |
411 | return 0; | |
c43ca509 JS |
412 | } |
413 | ||
148f9bb8 PG |
414 | static void uncore_down_prepare(unsigned int cpu, |
415 | struct amd_uncore * __percpu *uncores) | |
c43ca509 JS |
416 | { |
417 | unsigned int i; | |
418 | struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); | |
419 | ||
420 | if (this->cpu != cpu) | |
421 | return; | |
422 | ||
423 | /* this cpu is going down, migrate to a shared sibling if possible */ | |
424 | for_each_online_cpu(i) { | |
425 | struct amd_uncore *that = *per_cpu_ptr(uncores, i); | |
426 | ||
427 | if (cpu == i) | |
428 | continue; | |
429 | ||
430 | if (this == that) { | |
431 | perf_pmu_migrate_context(this->pmu, cpu, i); | |
432 | cpumask_clear_cpu(cpu, that->active_mask); | |
433 | cpumask_set_cpu(i, that->active_mask); | |
434 | that->cpu = i; | |
435 | break; | |
436 | } | |
437 | } | |
438 | } | |
439 | ||
96b2bd38 | 440 | static int amd_uncore_cpu_down_prepare(unsigned int cpu) |
c43ca509 JS |
441 | { |
442 | if (amd_uncore_nb) | |
443 | uncore_down_prepare(cpu, amd_uncore_nb); | |
444 | ||
445 | if (amd_uncore_l2) | |
446 | uncore_down_prepare(cpu, amd_uncore_l2); | |
96b2bd38 RC |
447 | |
448 | return 0; | |
c43ca509 JS |
449 | } |
450 | ||
148f9bb8 | 451 | static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) |
c43ca509 JS |
452 | { |
453 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); | |
454 | ||
455 | if (cpu == uncore->cpu) | |
456 | cpumask_clear_cpu(cpu, uncore->active_mask); | |
457 | ||
458 | if (!--uncore->refcnt) | |
459 | kfree(uncore); | |
503d3291 | 460 | *per_cpu_ptr(uncores, cpu) = NULL; |
c43ca509 JS |
461 | } |
462 | ||
96b2bd38 | 463 | static int amd_uncore_cpu_dead(unsigned int cpu) |
c43ca509 JS |
464 | { |
465 | if (amd_uncore_nb) | |
466 | uncore_dead(cpu, amd_uncore_nb); | |
467 | ||
468 | if (amd_uncore_l2) | |
469 | uncore_dead(cpu, amd_uncore_l2); | |
c43ca509 | 470 | |
96b2bd38 | 471 | return 0; |
503d3291 ZZ |
472 | } |
473 | ||
c43ca509 JS |
474 | static int __init amd_uncore_init(void) |
475 | { | |
c43ca509 JS |
476 | int ret = -ENODEV; |
477 | ||
478 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | |
503d3291 | 479 | goto fail_nodev; |
c43ca509 | 480 | |
362f924b | 481 | if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) |
503d3291 | 482 | goto fail_nodev; |
c43ca509 | 483 | |
362f924b | 484 | if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) { |
c43ca509 | 485 | amd_uncore_nb = alloc_percpu(struct amd_uncore *); |
503d3291 ZZ |
486 | if (!amd_uncore_nb) { |
487 | ret = -ENOMEM; | |
488 | goto fail_nb; | |
489 | } | |
490 | ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); | |
491 | if (ret) | |
492 | goto fail_nb; | |
c43ca509 | 493 | |
1b74dde7 | 494 | pr_info("perf: AMD NB counters detected\n"); |
c43ca509 JS |
495 | ret = 0; |
496 | } | |
497 | ||
362f924b | 498 | if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) { |
c43ca509 | 499 | amd_uncore_l2 = alloc_percpu(struct amd_uncore *); |
503d3291 ZZ |
500 | if (!amd_uncore_l2) { |
501 | ret = -ENOMEM; | |
502 | goto fail_l2; | |
503 | } | |
504 | ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1); | |
505 | if (ret) | |
506 | goto fail_l2; | |
c43ca509 | 507 | |
1b74dde7 | 508 | pr_info("perf: AMD L2I counters detected\n"); |
c43ca509 JS |
509 | ret = 0; |
510 | } | |
511 | ||
96b2bd38 RC |
512 | /* |
513 | * Install callbacks. Core will call them for each online cpu. | |
514 | */ | |
515 | if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP, | |
516 | "PERF_X86_AMD_UNCORE_PREP", | |
517 | amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead)) | |
518 | goto fail_l2; | |
519 | ||
520 | if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, | |
521 | "AP_PERF_X86_AMD_UNCORE_STARTING", | |
522 | amd_uncore_cpu_starting, NULL)) | |
523 | goto fail_prep; | |
524 | if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, | |
525 | "AP_PERF_X86_AMD_UNCORE_ONLINE", | |
526 | amd_uncore_cpu_online, | |
527 | amd_uncore_cpu_down_prepare)) | |
528 | goto fail_start; | |
c43ca509 | 529 | return 0; |
503d3291 | 530 | |
96b2bd38 RC |
531 | fail_start: |
532 | cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING); | |
533 | fail_prep: | |
534 | cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP); | |
503d3291 | 535 | fail_l2: |
362f924b | 536 | if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) |
503d3291 ZZ |
537 | perf_pmu_unregister(&amd_nb_pmu); |
538 | if (amd_uncore_l2) | |
539 | free_percpu(amd_uncore_l2); | |
540 | fail_nb: | |
541 | if (amd_uncore_nb) | |
542 | free_percpu(amd_uncore_nb); | |
543 | ||
544 | fail_nodev: | |
545 | return ret; | |
c43ca509 JS |
546 | } |
547 | device_initcall(amd_uncore_init); |