void __weak perf_counter_print_debug(void) { }
-static DEFINE_PER_CPU(int, disable_count);
+static DEFINE_PER_CPU(int, perf_disable_count);
void __perf_disable(void)
{
- __get_cpu_var(disable_count)++;
+ __get_cpu_var(perf_disable_count)++;
}
bool __perf_enable(void)
{
- return !--__get_cpu_var(disable_count);
+ return !--__get_cpu_var(perf_disable_count);
}
void perf_disable(void)
* leader's sibling list:
*/
if (group_leader == counter)
- list_add_tail(&counter->list_entry, &ctx->counter_list);
+ list_add_tail(&counter->group_entry, &ctx->group_list);
else {
- list_add_tail(&counter->list_entry, &group_leader->sibling_list);
+ list_add_tail(&counter->group_entry, &group_leader->sibling_list);
group_leader->nr_siblings++;
}
{
struct perf_counter *sibling, *tmp;
- if (list_empty(&counter->list_entry))
+ if (list_empty(&counter->group_entry))
return;
ctx->nr_counters--;
if (counter->attr.inherit_stat)
ctx->nr_stat--;
- list_del_init(&counter->list_entry);
+ list_del_init(&counter->group_entry);
list_del_rcu(&counter->event_entry);
if (counter->group_leader != counter)
* upgrade the siblings to singleton counters by adding them
* to the context list directly:
*/
- list_for_each_entry_safe(sibling, tmp,
- &counter->sibling_list, list_entry) {
+ list_for_each_entry_safe(sibling, tmp, &counter->sibling_list, group_entry) {
- list_move_tail(&sibling->list_entry, &ctx->counter_list);
+ list_move_tail(&sibling->group_entry, &ctx->group_list);
sibling->group_leader = sibling;
}
}
/*
* Schedule out siblings (if any):
*/
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
+ list_for_each_entry(counter, &group_counter->sibling_list, group_entry)
counter_sched_out(counter, cpuctx, ctx);
if (group_counter->attr.exclusive)
/*
* If the context is active we need to retry the smp call.
*/
- if (ctx->nr_active && !list_empty(&counter->list_entry)) {
+ if (ctx->nr_active && !list_empty(&counter->group_entry)) {
spin_unlock_irq(&ctx->lock);
goto retry;
}
* can remove the counter safely, if the call above did not
* succeed.
*/
- if (!list_empty(&counter->list_entry)) {
+ if (!list_empty(&counter->group_entry)) {
list_del_counter(counter, ctx);
}
spin_unlock_irq(&ctx->lock);
struct perf_counter *counter;
update_counter_times(leader);
- list_for_each_entry(counter, &leader->sibling_list, list_entry)
+ list_for_each_entry(counter, &leader->sibling_list, group_entry)
update_counter_times(counter);
}
/*
* Schedule in siblings as one group (if any):
*/
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+ list_for_each_entry(counter, &group_counter->sibling_list, group_entry) {
if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
partial_group = counter;
goto group_error;
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
*/
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+ list_for_each_entry(counter, &group_counter->sibling_list, group_entry) {
if (counter == partial_group)
break;
counter_sched_out(counter, cpuctx, ctx);
if (!is_software_counter(leader))
return 0;
- list_for_each_entry(counter, &leader->sibling_list, list_entry)
+ list_for_each_entry(counter, &leader->sibling_list, group_entry)
if (!is_software_counter(counter))
return 0;
/*
* we need to retry the smp call.
*/
- if (ctx->is_active && list_empty(&counter->list_entry)) {
+ if (ctx->is_active && list_empty(&counter->group_entry)) {
spin_unlock_irq(&ctx->lock);
goto retry;
}
* can add the counter safely, if it the call above did not
* succeed.
*/
- if (list_empty(&counter->list_entry))
+ if (list_empty(&counter->group_entry))
add_counter_to_ctx(counter, ctx);
spin_unlock_irq(&ctx->lock);
}
counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
- list_for_each_entry(sub, &counter->sibling_list, list_entry)
+ list_for_each_entry(sub, &counter->sibling_list, group_entry)
if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
sub->tstamp_enabled =
ctx->time - sub->total_time_enabled;
perf_disable();
if (ctx->nr_active) {
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ list_for_each_entry(counter, &ctx->group_list, group_entry) {
if (counter != counter->group_leader)
counter_sched_out(counter, cpuctx, ctx);
else
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ list_for_each_entry(counter, &ctx->group_list, group_entry) {
if (counter->state <= PERF_COUNTER_STATE_OFF ||
!counter->attr.pinned)
continue;
}
}
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ list_for_each_entry(counter, &ctx->group_list, group_entry) {
/*
* Ignore counters in OFF or ERROR state, and
* ignore pinned counters since we did them already.
u64 interrupts, freq;
spin_lock(&ctx->lock);
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ list_for_each_entry(counter, &ctx->group_list, group_entry) {
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
continue;
* Rotate the first entry last (works just fine for group counters too):
*/
perf_disable();
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- list_move_tail(&counter->list_entry, &ctx->counter_list);
+ list_for_each_entry(counter, &ctx->group_list, group_entry) {
+ list_move_tail(&counter->group_entry, &ctx->group_list);
break;
}
perf_enable();
spin_lock(&ctx->lock);
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ list_for_each_entry(counter, &ctx->group_list, group_entry) {
if (!counter->attr.enable_on_exec)
continue;
counter->attr.enable_on_exec = 0;
memset(ctx, 0, sizeof(*ctx));
spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
- INIT_LIST_HEAD(&ctx->counter_list);
+ INIT_LIST_HEAD(&ctx->group_list);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
ctx->task = task;
size += err;
- list_for_each_entry(sub, &leader->sibling_list, list_entry) {
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
err = perf_counter_read_entry(sub, read_format,
buf + size);
if (err < 0)
perf_counter_for_each_child(counter, func);
func(counter);
- list_for_each_entry(sibling, &counter->sibling_list, list_entry)
+ list_for_each_entry(sibling, &counter->sibling_list, group_entry)
perf_counter_for_each_child(counter, func);
mutex_unlock(&ctx->mutex);
}
perf_output_copy(handle, values, n * sizeof(u64));
- list_for_each_entry(sub, &leader->sibling_list, list_entry) {
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
if (sub != counter)
}
if (sample_type & PERF_SAMPLE_TIME) {
- /*
- * Maybe do better on x86 and provide cpu_clock_nmi()
- */
- data->time = sched_clock();
+ data->time = perf_clock();
header->size += sizeof(data->time);
}
u32 ppid;
u32 tid;
u32 ptid;
+ u64 time;
} event;
};
struct perf_task_event *task_event)
{
struct perf_output_handle handle;
- int size = task_event->event.header.size;
+ int size;
struct task_struct *task = task_event->task;
- int ret = perf_output_begin(&handle, counter, size, 0, 0);
+ int ret;
+
+ size = task_event->event.header.size;
+ ret = perf_output_begin(&handle, counter, size, 0, 0);
if (ret)
return;
task_event->event.tid = perf_counter_tid(counter, task);
task_event->event.ptid = perf_counter_tid(counter, current);
+ task_event->event.time = perf_clock();
+
perf_output_put(&handle, task_event->event);
+
perf_output_end(&handle);
}
.misc = 0,
.size = sizeof(throttle_event),
},
- .time = sched_clock(),
+ .time = perf_clock(),
.id = primary_counter_id(counter),
.stream_id = counter->id,
};
}
if (counter->attr.freq) {
- u64 now = sched_clock();
+ u64 now = perf_clock();
s64 delta = now - hwc->freq_stamp;
hwc->freq_stamp = now;
*/
break;
}
- throttle = 0;
+ throttle = 1;
}
}
mutex_init(&counter->child_mutex);
INIT_LIST_HEAD(&counter->child_list);
- INIT_LIST_HEAD(&counter->list_entry);
+ INIT_LIST_HEAD(&counter->group_entry);
INIT_LIST_HEAD(&counter->event_entry);
INIT_LIST_HEAD(&counter->sibling_list);
init_waitqueue_head(&counter->waitq);
static int perf_copy_attr(struct perf_counter_attr __user *uattr,
struct perf_counter_attr *attr)
{
- int ret;
u32 size;
+ int ret;
if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
return -EFAULT;
/*
* If we're handed a bigger struct than we know of,
- * ensure all the unknown bits are 0.
+ * ensure all the unknown bits are 0 - i.e. new
+ * user-space does not rely on any kernel feature
+ * extensions we dont know about yet.
*/
if (size > sizeof(*attr)) {
- unsigned long val;
- unsigned long __user *addr;
- unsigned long __user *end;
+ unsigned char __user *addr;
+ unsigned char __user *end;
+ unsigned char val;
- addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
- sizeof(unsigned long));
- end = PTR_ALIGN((void __user *)uattr + size,
- sizeof(unsigned long));
+ addr = (void __user *)uattr + sizeof(*attr);
+ end = (void __user *)uattr + size;
- for (; addr < end; addr += sizeof(unsigned long)) {
+ for (; addr < end; addr++) {
ret = get_user(val, addr);
if (ret)
return ret;
if (val)
goto err_size;
}
+ size = sizeof(*attr);
}
ret = copy_from_user(attr, uattr, size);
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
- list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
+ list_for_each_entry(sub, &parent_counter->sibling_list, group_entry) {
child_ctr = inherit_counter(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
again:
- list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
- list_entry)
+ list_for_each_entry_safe(child_counter, tmp, &child_ctx->group_list,
+ group_entry)
__perf_counter_exit_task(child_counter, child_ctx, child);
/*
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
- if (!list_empty(&child_ctx->counter_list))
+ if (!list_empty(&child_ctx->group_list))
goto again;
mutex_unlock(&child_ctx->mutex);
mutex_lock(&ctx->mutex);
again:
- list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
+ list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry) {
struct perf_counter *parent = counter->parent;
if (WARN_ON_ONCE(!parent))
free_counter(counter);
}
- if (!list_empty(&ctx->counter_list))
+ if (!list_empty(&ctx->group_list))
goto again;
mutex_unlock(&ctx->mutex);
struct perf_counter_context *ctx = &cpuctx->ctx;
struct perf_counter *counter, *tmp;
- list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
+ list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry)
__perf_counter_remove_from_context(counter);
}
static void perf_counter_exit_cpu(int cpu)