X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=include%2Flinux%2Flockdep.h;h=3d3386b88b6aeb043f3bca3f49a75eb3f62e5c43;hb=4b32d0a4e9ec07808a5c406a416c6576c986b047;hp=ea097dddc44f791d34a599aa12b3a251413aa47d;hpb=40b20c257a13c5a526ac540bc5e43d0fdf29792a;p=deliverable%2Flinux.git diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index ea097dddc44f..3d3386b88b6a 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -1,13 +1,17 @@ /* * Runtime locking correctness validator * - * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * see Documentation/lockdep-design.txt for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H +struct task_struct; +struct lockdep_map; + #ifdef CONFIG_LOCKDEP #include @@ -112,8 +116,32 @@ struct lock_class { const char *name; int name_version; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[4]; +#endif +}; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; }; +struct lock_class_stats { + unsigned long contention_point[4]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + /* * Map the lock object (the lock instance) to the lock-class object. * This is embedded into specific lock instances: @@ -132,6 +160,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct stack_trace trace; + int distance; }; /* @@ -162,6 +191,10 @@ struct held_lock { unsigned long acquire_ip; struct lockdep_map *instance; +#ifdef CONFIG_LOCK_STAT + u64 waittime_stamp; + u64 holdtime_stamp; +#endif /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -242,7 +275,7 @@ extern void lock_release(struct lockdep_map *lock, int nested, # define INIT_LOCKDEP .lockdep_recursion = 0, -#define lockdep_depth(tsk) ((tsk)->lockdep_depth) +#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #else /* !LOCKDEP */ @@ -278,6 +311,30 @@ struct lock_class_key { }; #endif /* !LOCKDEP */ +#ifdef CONFIG_LOCK_STAT + +extern void lock_contended(struct lockdep_map *lock, unsigned long ip); +extern void lock_acquired(struct lockdep_map *lock); + +#define LOCK_CONTENDED(_lock, try, lock) \ +do { \ + if (!try(_lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + lock(_lock); \ + lock_acquired(&(_lock)->dep_map); \ + } \ +} while (0) + +#else /* CONFIG_LOCK_STAT */ + +#define lock_contended(lockdep_map, ip) do {} while (0) +#define lock_acquired(lockdep_map) do {} while (0) + +#define LOCK_CONTENDED(_lock, try, lock) \ + lock(_lock) + +#endif /* CONFIG_LOCK_STAT */ + #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) extern void early_init_irq_lock_class(void); #else