cpumask: convert kernel trace functions
[deliverable/linux.git] / kernel / trace / trace.c
index 0e91f43b6baf53cac177af4ca02f0939997c1a0c..5d04e27f3b400799f9c0e2fad9ca922a3b322994 100644 (file)
@@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
        preempt_enable();
 }
 
-static cpumask_t __read_mostly         tracing_buffer_mask;
+static cpumask_var_t __read_mostly     tracing_buffer_mask;
 
 #define for_each_tracing_cpu(cpu)      \
-       for_each_cpu_mask(cpu, tracing_buffer_mask)
+       for_each_cpu(cpu, tracing_buffer_mask)
 
 /*
  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = {
 /*
  * Only trace on a CPU if the bitmask is set:
  */
-static cpumask_t tracing_cpumask = CPU_MASK_ALL;
-
-/*
- * When tracing/tracing_cpu_mask is modified then this holds
- * the new bitmask we are about to install:
- */
-static cpumask_t tracing_cpumask_new;
+static cpumask_var_t tracing_cpumask;
 
 /*
  * The tracer itself will not take this lock, but still we want
@@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
 
        mutex_lock(&tracing_cpumask_update_lock);
 
-       len = cpumask_scnprintf(mask_str, count, &tracing_cpumask);
+       len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
        if (count - len < 2) {
                count = -EINVAL;
                goto out_err;
@@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                      size_t count, loff_t *ppos)
 {
        int err, cpu;
+       cpumask_var_t tracing_cpumask_new;
+
+       if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+               return -ENOMEM;
 
        mutex_lock(&tracing_cpumask_update_lock);
-       err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new);
+       err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
        if (err)
                goto err_unlock;
 
@@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                 * Increase/decrease the disabled counter if we are
                 * about to flip a bit in the cpumask:
                 */
-               if (cpu_isset(cpu, tracing_cpumask) &&
-                               !cpu_isset(cpu, tracing_cpumask_new)) {
+               if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+                               !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_inc(&global_trace.data[cpu]->disabled);
                }
-               if (!cpu_isset(cpu, tracing_cpumask) &&
-                               cpu_isset(cpu, tracing_cpumask_new)) {
+               if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+                               cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_dec(&global_trace.data[cpu]->disabled);
                }
        }
        __raw_spin_unlock(&ftrace_max_lock);
        local_irq_enable();
 
-       tracing_cpumask = tracing_cpumask_new;
+       cpumask_copy(tracing_cpumask, tracing_cpumask_new);
 
        mutex_unlock(&tracing_cpumask_update_lock);
+       free_cpumask_var(tracing_cpumask_new);
 
        return count;
 
 err_unlock:
        mutex_unlock(&tracing_cpumask_update_lock);
+       free_cpumask_var(tracing_cpumask);
 
        return err;
 }
@@ -3752,7 +3752,6 @@ void ftrace_dump(void)
        static DEFINE_SPINLOCK(ftrace_dump_lock);
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
-       static cpumask_t mask;
        static int dump_ran;
        unsigned long flags;
        int cnt = 0, cpu;
@@ -3786,8 +3785,6 @@ void ftrace_dump(void)
         * and then release the locks again.
         */
 
-       cpus_clear(mask);
-
        while (!trace_empty(&iter)) {
 
                if (!cnt)
@@ -3823,19 +3820,28 @@ __init static int tracer_alloc_buffers(void)
 {
        struct trace_array_cpu *data;
        int i;
+       int ret = -ENOMEM;
 
-       /* TODO: make the number of buffers hot pluggable with CPUS */
-       tracing_buffer_mask = cpu_possible_map;
+       if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
+               goto out;
+
+       if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
+               goto out_free_buffer_mask;
 
+       cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
+       cpumask_copy(tracing_cpumask, cpu_all_mask);
+
+       /* TODO: make the number of buffers hot pluggable with CPUS */
        global_trace.buffer = ring_buffer_alloc(trace_buf_size,
                                                   TRACE_BUFFER_FLAGS);
        if (!global_trace.buffer) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
                WARN_ON(1);
-               return 0;
+               goto out_free_cpumask;
        }
        global_trace.entries = ring_buffer_size(global_trace.buffer);
 
+
 #ifdef CONFIG_TRACER_MAX_TRACE
        max_tr.buffer = ring_buffer_alloc(trace_buf_size,
                                             TRACE_BUFFER_FLAGS);
@@ -3843,7 +3849,7 @@ __init static int tracer_alloc_buffers(void)
                printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
                WARN_ON(1);
                ring_buffer_free(global_trace.buffer);
-               return 0;
+               goto out_free_cpumask;
        }
        max_tr.entries = ring_buffer_size(max_tr.buffer);
        WARN_ON(max_tr.entries != global_trace.entries);
@@ -3873,8 +3879,14 @@ __init static int tracer_alloc_buffers(void)
                                       &trace_panic_notifier);
 
        register_die_notifier(&trace_die_notifier);
+       ret = 0;
 
-       return 0;
+out_free_cpumask:
+       free_cpumask_var(tracing_cpumask);
+out_free_buffer_mask:
+       free_cpumask_var(tracing_buffer_mask);
+out:
+       return ret;
 }
 early_initcall(tracer_alloc_buffers);
 fs_initcall(tracer_init_debugfs);
This page took 0.078394 seconds and 5 git commands to generate.