Merge branch 'stable-4.8' of git://git.infradead.org/users/pcmoore/audit
[deliverable/linux.git] / arch / x86 / events / core.c
index 33787ee817f0cdaad78814849b0aad2dc7e2b407..c17f0de5fd391c3ce000df6428802521384d179f 100644 (file)
@@ -263,7 +263,7 @@ static bool check_hw_exists(void)
 
 msr_fail:
        pr_cont("Broken PMU hardware detected, using software events only.\n");
-       pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+       printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
                boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
                reg, val_new);
 
@@ -1477,49 +1477,49 @@ NOKPROBE_SYMBOL(perf_event_nmi_handler);
 struct event_constraint emptyconstraint;
 struct event_constraint unconstrained;
 
-static int
-x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int x86_pmu_prepare_cpu(unsigned int cpu)
 {
-       unsigned int cpu = (long)hcpu;
        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
-       int i, ret = NOTIFY_OK;
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
-                       cpuc->kfree_on_online[i] = NULL;
-               if (x86_pmu.cpu_prepare)
-                       ret = x86_pmu.cpu_prepare(cpu);
-               break;
-
-       case CPU_STARTING:
-               if (x86_pmu.cpu_starting)
-                       x86_pmu.cpu_starting(cpu);
-               break;
+       int i;
 
-       case CPU_ONLINE:
-               for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
-                       kfree(cpuc->kfree_on_online[i]);
-                       cpuc->kfree_on_online[i] = NULL;
-               }
-               break;
+       for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
+               cpuc->kfree_on_online[i] = NULL;
+       if (x86_pmu.cpu_prepare)
+               return x86_pmu.cpu_prepare(cpu);
+       return 0;
+}
 
-       case CPU_DYING:
-               if (x86_pmu.cpu_dying)
-                       x86_pmu.cpu_dying(cpu);
-               break;
+static int x86_pmu_dead_cpu(unsigned int cpu)
+{
+       if (x86_pmu.cpu_dead)
+               x86_pmu.cpu_dead(cpu);
+       return 0;
+}
 
-       case CPU_UP_CANCELED:
-       case CPU_DEAD:
-               if (x86_pmu.cpu_dead)
-                       x86_pmu.cpu_dead(cpu);
-               break;
+static int x86_pmu_online_cpu(unsigned int cpu)
+{
+       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       int i;
 
-       default:
-               break;
+       for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
+               kfree(cpuc->kfree_on_online[i]);
+               cpuc->kfree_on_online[i] = NULL;
        }
+       return 0;
+}
 
-       return ret;
+static int x86_pmu_starting_cpu(unsigned int cpu)
+{
+       if (x86_pmu.cpu_starting)
+               x86_pmu.cpu_starting(cpu);
+       return 0;
+}
+
+static int x86_pmu_dying_cpu(unsigned int cpu)
+{
+       if (x86_pmu.cpu_dying)
+               x86_pmu.cpu_dying(cpu);
+       return 0;
 }
 
 static void __init pmu_check_apic(void)
@@ -1622,6 +1622,29 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, cha
 }
 EXPORT_SYMBOL_GPL(events_sysfs_show);
 
+ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
+                         char *page)
+{
+       struct perf_pmu_events_ht_attr *pmu_attr =
+               container_of(attr, struct perf_pmu_events_ht_attr, attr);
+
+       /*
+        * Report conditional events depending on Hyper-Threading.
+        *
+        * This is overly conservative as usually the HT special
+        * handling is not needed if the other CPU thread is idle.
+        *
+        * Note this does not (and cannot) handle the case when thread
+        * siblings are invisible, for example with virtualization
+        * if they are owned by some other guest.  The user tool
+        * has to re-read when a thread sibling gets onlined later.
+        */
+       return sprintf(page, "%s",
+                       topology_max_smt_threads() > 1 ?
+                       pmu_attr->event_str_ht :
+                       pmu_attr->event_str_noht);
+}
+
 EVENT_ATTR(cpu-cycles,                 CPU_CYCLES              );
 EVENT_ATTR(instructions,               INSTRUCTIONS            );
 EVENT_ATTR(cache-references,           CACHE_REFERENCES        );
@@ -1764,10 +1787,39 @@ static int __init init_hw_perf_events(void)
        pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
        pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
 
-       perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-       perf_cpu_notifier(x86_pmu_notifier);
+       /*
+        * Install callbacks. Core will call them for each online
+        * cpu.
+        */
+       err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
+                               x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
+       if (err)
+               return err;
+
+       err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
+                               "AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
+                               x86_pmu_dying_cpu);
+       if (err)
+               goto out;
+
+       err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
+                               x86_pmu_online_cpu, NULL);
+       if (err)
+               goto out1;
+
+       err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+       if (err)
+               goto out2;
 
        return 0;
+
+out2:
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
+out1:
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
+out:
+       cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
+       return err;
 }
 early_initcall(init_hw_perf_events);
 
@@ -2319,7 +2371,7 @@ void
 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct stack_frame frame;
-       const void __user *fp;
+       const unsigned long __user *fp;
 
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
                /* TODO: We don't support guest os callchain now */
@@ -2332,7 +2384,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
        if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
                return;
 
-       fp = (void __user *)regs->bp;
+       fp = (unsigned long __user *)regs->bp;
 
        perf_callchain_store(entry, regs->ip);
 
@@ -2345,16 +2397,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
        pagefault_disable();
        while (entry->nr < entry->max_stack) {
                unsigned long bytes;
+
                frame.next_frame             = NULL;
                frame.return_address = 0;
 
-               if (!access_ok(VERIFY_READ, fp, 16))
+               if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
                        break;
 
-               bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
+               bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
                if (bytes != 0)
                        break;
-               bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
+               bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
                if (bytes != 0)
                        break;
 
This page took 0.026204 seconds and 5 git commands to generate.