Merge branches 'bigrtm.2012.07.04a', 'doctorture.2012.07.02a', 'fixes.2012.07.06a...
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Fri, 6 Jul 2012 12:59:20 +0000 (05:59 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Fri, 6 Jul 2012 12:59:30 +0000 (05:59 -0700)
bigrtm: First steps towards getting RCU out of the way of
tens-of-microseconds real-time response on systems compiled
with NR_CPUS=4096.  Also cleanups for and increased concurrency
of rcu_barrier() family of primitives.
doctorture: rcutorture and documentation improvements.
fixes:  Miscellaneous fixes.
fnh: RCU_FAST_NO_HZ fixes and improvements.

14 files changed:
Documentation/RCU/checklist.txt
Documentation/RCU/rcubarrier.txt
Documentation/RCU/torture.txt
Documentation/RCU/whatisRCU.txt
Documentation/kernel-parameters.txt
include/linux/rcupdate.h
include/trace/events/rcu.h
kernel/rcutiny_plugin.h
kernel/rcutorture.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/rcutree_trace.c
kernel/time/tick-sched.c

index 5c8d74968090544ae570105b00a96561ea4acd6c..fc103d7a04740d338bf0490a2133674c25986d59 100644 (file)
@@ -162,9 +162,9 @@ over a rather long period of time, but improvements are always welcome!
                when publicizing a pointer to a structure that can
                be traversed by an RCU read-side critical section.
 
-5.     If call_rcu(), or a related primitive such as call_rcu_bh() or
-       call_rcu_sched(), is used, the callback function must be
-       written to be called from softirq context.  In particular,
+5.     If call_rcu(), or a related primitive such as call_rcu_bh(),
+       call_rcu_sched(), or call_srcu() is used, the callback function
+       must be written to be called from softirq context.  In particular,
        it cannot block.
 
 6.     Since synchronize_rcu() can block, it cannot be called from
@@ -202,11 +202,12 @@ over a rather long period of time, but improvements are always welcome!
        updater uses call_rcu_sched() or synchronize_sched(), then
        the corresponding readers must disable preemption, possibly
        by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
-       If the updater uses synchronize_srcu(), the the corresponding
-       readers must use srcu_read_lock() and srcu_read_unlock(),
-       and with the same srcu_struct.  The rules for the expedited
-       primitives are the same as for their non-expedited counterparts.
-       Mixing things up will result in confusion and broken kernels.
+       If the updater uses synchronize_srcu() or call_srcu(),
+       the the corresponding readers must use srcu_read_lock() and
+       srcu_read_unlock(), and with the same srcu_struct.  The rules for
+       the expedited primitives are the same as for their non-expedited
+       counterparts.  Mixing things up will result in confusion and
+       broken kernels.
 
        One exception to this rule: rcu_read_lock() and rcu_read_unlock()
        may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
@@ -333,14 +334,14 @@ over a rather long period of time, but improvements are always welcome!
        victim CPU from ever going offline.)
 
 14.    SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(),
-       synchronize_srcu(), and synchronize_srcu_expedited()) may only
-       be invoked from process context.  Unlike other forms of RCU, it
-       -is- permissible to block in an SRCU read-side critical section
-       (demarked by srcu_read_lock() and srcu_read_unlock()), hence the
-       "SRCU": "sleepable RCU".  Please note that if you don't need
-       to sleep in read-side critical sections, you should be using
-       RCU rather than SRCU, because RCU is almost always faster and
-       easier to use than is SRCU.
+       synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu())
+       may only be invoked from process context.  Unlike other forms of
+       RCU, it -is- permissible to block in an SRCU read-side critical
+       section (demarked by srcu_read_lock() and srcu_read_unlock()),
+       hence the "SRCU": "sleepable RCU".  Please note that if you
+       don't need to sleep in read-side critical sections, you should be
+       using RCU rather than SRCU, because RCU is almost always faster
+       and easier to use than is SRCU.
 
        If you need to enter your read-side critical section in a
        hardirq or exception handler, and then exit that same read-side
@@ -353,8 +354,8 @@ over a rather long period of time, but improvements are always welcome!
        cleanup_srcu_struct().  These are passed a "struct srcu_struct"
        that defines the scope of a given SRCU domain.  Once initialized,
        the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
-       synchronize_srcu(), and synchronize_srcu_expedited().  A given
-       synchronize_srcu() waits only for SRCU read-side critical
+       synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu().
+       A given synchronize_srcu() waits only for SRCU read-side critical
        sections governed by srcu_read_lock() and srcu_read_unlock()
        calls that have been passed the same srcu_struct.  This property
        is what makes sleeping read-side critical sections tolerable --
@@ -374,7 +375,7 @@ over a rather long period of time, but improvements are always welcome!
        requiring SRCU's read-side deadlock immunity or low read-side
        realtime latency.
 
-       Note that, rcu_assign_pointer() relates to SRCU just as they do
+       Note that, rcu_assign_pointer() relates to SRCU just as it does
        to other forms of RCU.
 
 15.    The whole point of call_rcu(), synchronize_rcu(), and friends
index e439a0edee2263d554a53282aaa8512b0295a40c..38428c125135504de8737943088a4e0f3df6f213 100644 (file)
@@ -79,8 +79,6 @@ complete. Pseudo-code using rcu_barrier() is as follows:
    2. Execute rcu_barrier().
    3. Allow the module to be unloaded.
 
-Quick Quiz #1: Why is there no srcu_barrier()?
-
 The rcutorture module makes use of rcu_barrier in its exit function
 as follows:
 
@@ -162,7 +160,7 @@ for any pre-existing callbacks to complete.
 Then lines 55-62 print status and do operation-specific cleanup, and
 then return, permitting the module-unload operation to be completed.
 
-Quick Quiz #2: Is there any other situation where rcu_barrier() might
+Quick Quiz #1: Is there any other situation where rcu_barrier() might
        be required?
 
 Your module might have additional complications. For example, if your
@@ -242,7 +240,7 @@ reaches zero, as follows:
  4 complete(&rcu_barrier_completion);
  5 }
 
-Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
+Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
        immediately (thus incrementing rcu_barrier_cpu_count to the
        value one), but the other CPU's rcu_barrier_func() invocations
        are delayed for a full grace period? Couldn't this result in
@@ -259,12 +257,7 @@ so that your module may be safely unloaded.
 
 Answers to Quick Quizzes
 
-Quick Quiz #1: Why is there no srcu_barrier()?
-
-Answer: Since there is no call_srcu(), there can be no outstanding SRCU
-       callbacks. Therefore, there is no need to wait for them.
-
-Quick Quiz #2: Is there any other situation where rcu_barrier() might
+Quick Quiz #1: Is there any other situation where rcu_barrier() might
        be required?
 
 Answer: Interestingly enough, rcu_barrier() was not originally
@@ -278,7 +271,7 @@ Answer: Interestingly enough, rcu_barrier() was not originally
        implementing rcutorture, and found that rcu_barrier() solves
        this problem as well.
 
-Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
+Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
        immediately (thus incrementing rcu_barrier_cpu_count to the
        value one), but the other CPU's rcu_barrier_func() invocations
        are delayed for a full grace period? Couldn't this result in
index 4ddf3913fd8c0df7b91d2d29b17e3c69327907c7..7dce8a17eac269cdff475a57377bf49963c1216a 100644 (file)
@@ -174,11 +174,20 @@ torture_type      The type of RCU to test, with string values as follows:
                        and synchronize_rcu_bh_expedited().
 
                "srcu": srcu_read_lock(), srcu_read_unlock() and
+                       call_srcu().
+
+               "srcu_sync": srcu_read_lock(), srcu_read_unlock() and
                        synchronize_srcu().
 
                "srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
                        synchronize_srcu_expedited().
 
+               "srcu_raw": srcu_read_lock_raw(), srcu_read_unlock_raw(),
+                       and call_srcu().
+
+               "srcu_raw_sync": srcu_read_lock_raw(), srcu_read_unlock_raw(),
+                       and synchronize_srcu().
+
                "sched": preempt_disable(), preempt_enable(), and
                        call_rcu_sched().
 
index 6bbe8dcdc3da94166160f3d27a96a6f2a622287f..69ee188515e7a9c907ec243dfff21852e55a9998 100644 (file)
@@ -833,9 +833,9 @@ sched:      Critical sections       Grace period            Barrier
 
 SRCU:  Critical sections       Grace period            Barrier
 
-       srcu_read_lock          synchronize_srcu        N/A
-       srcu_read_unlock        synchronize_srcu_expedited
-       srcu_read_lock_raw
+       srcu_read_lock          synchronize_srcu        srcu_barrier
+       srcu_read_unlock        call_srcu
+       srcu_read_lock_raw      synchronize_srcu_expedited
        srcu_read_unlock_raw
        srcu_dereference
 
index a92c5ebf373e2bf4bea68072b58fbc0471ad9c13..12783fa833c38041c94fb02cff34bc011b4d184d 100644 (file)
@@ -2367,6 +2367,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Set maximum number of finished RCU callbacks to process
                        in one batch.
 
+       rcutree.fanout_leaf=    [KNL,BOOT]
+                       Increase the number of CPUs assigned to each
+                       leaf rcu_node structure.  Useful for very large
+                       systems.
+
        rcutree.qhimark=        [KNL,BOOT]
                        Set threshold of queued
                        RCU callbacks over which batch limiting is disabled.
index 26f6417f0264f75c2d91f7b71097b7ef168e7626..c2c0d86dd3acc784db948e8f8f0bea7f430f8d2a 100644 (file)
@@ -923,7 +923,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 #define __kfree_rcu(head, offset) \
        do { \
                BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
-               call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
+               kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
        } while (0)
 
 /**
index d274734b2aa42fee56d7ce7ab2b7898d39521e7e..5bde94d8585b77c71e957926ed9ca7b5b85bcc5c 100644 (file)
@@ -541,6 +541,50 @@ TRACE_EVENT(rcu_torture_read,
                  __entry->rcutorturename, __entry->rhp)
 );
 
+/*
+ * Tracepoint for _rcu_barrier() execution.  The string "s" describes
+ * the _rcu_barrier phase:
+ *     "Begin": rcu_barrier_callback() started.
+ *     "Check": rcu_barrier_callback() checking for piggybacking.
+ *     "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
+ *     "Inc1": rcu_barrier_callback() piggyback check counter incremented.
+ *     "Offline": rcu_barrier_callback() found offline CPU
+ *     "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
+ *     "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
+ *     "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
+ *     "CB": An rcu_barrier_callback() invoked a callback, not the last.
+ *     "LastCB": An rcu_barrier_callback() invoked the last callback.
+ *     "Inc2": rcu_barrier_callback() piggyback check counter incremented.
+ * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
+ * is the count of remaining callbacks, and "done" is the piggybacking count.
+ */
+TRACE_EVENT(rcu_barrier,
+
+       TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
+
+       TP_ARGS(rcuname, s, cpu, cnt, done),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(char *, s)
+               __field(int, cpu)
+               __field(int, cnt)
+               __field(unsigned long, done)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->s = s;
+               __entry->cpu = cpu;
+               __entry->cnt = cnt;
+               __entry->done = done;
+       ),
+
+       TP_printk("%s %s cpu %d remaining %d # %lu",
+                 __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
+                 __entry->done)
+);
+
 #else /* #ifdef CONFIG_RCU_TRACE */
 
 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
@@ -564,6 +608,7 @@ TRACE_EVENT(rcu_torture_read,
 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
        do { } while (0)
 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
index a269b0da0eb6fdd3d16dad2daf7dfbf4ddf70a61..116725b5edfb9cbd5d618f15c445ca72db7e61f6 100644 (file)
@@ -801,8 +801,6 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  */
 int rcu_preempt_needs_cpu(void)
 {
-       if (!rcu_preempt_running_reader())
-               rcu_preempt_cpu_qs();
        return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
 }
 
index e66b34ab7555f9153a8c5955678d73fc87b6d777..c279ee9209473dedb9c510c3a584510f588defa5 100644 (file)
@@ -206,6 +206,7 @@ static unsigned long boost_starttime;       /* jiffies of next boost test start. */
 DEFINE_MUTEX(boost_mutex);             /* protect setting boost_starttime */
                                        /*  and boost task create/destroy. */
 static atomic_t barrier_cbs_count;     /* Barrier callbacks registered. */
+static bool barrier_phase;             /* Test phase. */
 static atomic_t barrier_cbs_invoked;   /* Barrier callbacks invoked. */
 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
@@ -635,6 +636,17 @@ static void srcu_torture_synchronize(void)
        synchronize_srcu(&srcu_ctl);
 }
 
+static void srcu_torture_call(struct rcu_head *head,
+                             void (*func)(struct rcu_head *head))
+{
+       call_srcu(&srcu_ctl, head, func);
+}
+
+static void srcu_torture_barrier(void)
+{
+       srcu_barrier(&srcu_ctl);
+}
+
 static int srcu_torture_stats(char *page)
 {
        int cnt = 0;
@@ -661,8 +673,8 @@ static struct rcu_torture_ops srcu_ops = {
        .completed      = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
-       .call           = NULL,
-       .cb_barrier     = NULL,
+       .call           = srcu_torture_call,
+       .cb_barrier     = srcu_torture_barrier,
        .stats          = srcu_torture_stats,
        .name           = "srcu"
 };
@@ -1013,7 +1025,11 @@ rcu_torture_fakewriter(void *arg)
        do {
                schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
                udelay(rcu_random(&rand) & 0x3ff);
-               cur_ops->sync();
+               if (cur_ops->cb_barrier != NULL &&
+                   rcu_random(&rand) % (nfakewriters * 8) == 0)
+                       cur_ops->cb_barrier();
+               else
+                       cur_ops->sync();
                rcu_stutter_wait("rcu_torture_fakewriter");
        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 
@@ -1631,6 +1647,7 @@ void rcu_torture_barrier_cbf(struct rcu_head *rcu)
 static int rcu_torture_barrier_cbs(void *arg)
 {
        long myid = (long)arg;
+       bool lastphase = 0;
        struct rcu_head rcu;
 
        init_rcu_head_on_stack(&rcu);
@@ -1638,9 +1655,11 @@ static int rcu_torture_barrier_cbs(void *arg)
        set_user_nice(current, 19);
        do {
                wait_event(barrier_cbs_wq[myid],
-                          atomic_read(&barrier_cbs_count) == n_barrier_cbs ||
+                          barrier_phase != lastphase ||
                           kthread_should_stop() ||
                           fullstop != FULLSTOP_DONTSTOP);
+               lastphase = barrier_phase;
+               smp_mb(); /* ensure barrier_phase load before ->call(). */
                if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
                        break;
                cur_ops->call(&rcu, rcu_torture_barrier_cbf);
@@ -1665,7 +1684,8 @@ static int rcu_torture_barrier(void *arg)
        do {
                atomic_set(&barrier_cbs_invoked, 0);
                atomic_set(&barrier_cbs_count, n_barrier_cbs);
-               /* wake_up() path contains the required barriers. */
+               smp_mb(); /* Ensure barrier_phase after prior assignments. */
+               barrier_phase = !barrier_phase;
                for (i = 0; i < n_barrier_cbs; i++)
                        wake_up(&barrier_cbs_wq[i]);
                wait_event(barrier_wq,
@@ -1684,7 +1704,7 @@ static int rcu_torture_barrier(void *arg)
                schedule_timeout_interruptible(HZ / 10);
        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
        VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
+       rcutorture_shutdown_absorb("rcu_torture_barrier");
        while (!kthread_should_stop())
                schedule_timeout_interruptible(1);
        return 0;
@@ -1908,8 +1928,8 @@ rcu_torture_init(void)
        static struct rcu_torture_ops *torture_ops[] =
                { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
                  &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
-                 &srcu_ops, &srcu_sync_ops, &srcu_raw_ops,
-                 &srcu_raw_sync_ops, &srcu_expedited_ops,
+                 &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
+                 &srcu_raw_ops, &srcu_raw_sync_ops,
                  &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
 
        mutex_lock(&fullstop_mutex);
index 4154c9567a6d4924092ab2096729d6ae0ce0e60c..117218a43724f862296a59c7815acab9defaf063 100644 (file)
 
 /* Data structures. */
 
-static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
-
-#define RCU_STATE_INITIALIZER(structname) { \
-       .level = { &structname##_state.node[0] }, \
-       .levelcnt = { \
-               NUM_RCU_LVL_0,  /* root of hierarchy. */ \
-               NUM_RCU_LVL_1, \
-               NUM_RCU_LVL_2, \
-               NUM_RCU_LVL_3, \
-               NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
-       }, \
+static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
+
+#define RCU_STATE_INITIALIZER(sname, cr) { \
+       .level = { &sname##_state.node[0] }, \
+       .call = cr, \
        .fqs_state = RCU_GP_IDLE, \
        .gpnum = -300, \
        .completed = -300, \
-       .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
-       .orphan_nxttail = &structname##_state.orphan_nxtlist, \
-       .orphan_donetail = &structname##_state.orphan_donelist, \
-       .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
-       .n_force_qs = 0, \
-       .n_force_qs_ngp = 0, \
-       .name = #structname, \
+       .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
+       .orphan_nxttail = &sname##_state.orphan_nxtlist, \
+       .orphan_donetail = &sname##_state.orphan_donelist, \
+       .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
+       .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \
+       .name = #sname, \
 }
 
-struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
+struct rcu_state rcu_sched_state =
+       RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched);
 DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
 
-struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
+struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh);
 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
 
 static struct rcu_state *rcu_state;
+LIST_HEAD(rcu_struct_flavors);
+
+/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
+static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
+module_param(rcu_fanout_leaf, int, 0);
+int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
+static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
+       NUM_RCU_LVL_0,
+       NUM_RCU_LVL_1,
+       NUM_RCU_LVL_2,
+       NUM_RCU_LVL_3,
+       NUM_RCU_LVL_4,
+};
+int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 
 /*
  * The rcu_scheduler_active variable transitions from zero to one just
@@ -147,13 +155,6 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 unsigned long rcutorture_testseq;
 unsigned long rcutorture_vernum;
 
-/* State information for rcu_barrier() and friends. */
-
-static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
-static atomic_t rcu_barrier_cpu_count;
-static DEFINE_MUTEX(rcu_barrier_mutex);
-static struct completion rcu_barrier_completion;
-
 /*
  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
  * permit this function to be invoked without holding the root rcu_node
@@ -358,7 +359,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
                struct task_struct *idle = idle_task(smp_processor_id());
 
                trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
-               ftrace_dump(DUMP_ALL);
+               ftrace_dump(DUMP_ORIG);
                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
                          current->pid, current->comm,
                          idle->pid, idle->comm); /* must be idle task! */
@@ -468,7 +469,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
 
                trace_rcu_dyntick("Error on exit: not idle task",
                                  oldval, rdtp->dynticks_nesting);
-               ftrace_dump(DUMP_ALL);
+               ftrace_dump(DUMP_ORIG);
                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
                          current->pid, current->comm,
                          idle->pid, idle->comm); /* must be idle task! */
@@ -856,9 +857,10 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  */
 void rcu_cpu_stall_reset(void)
 {
-       rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
-       rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
-       rcu_preempt_stall_reset();
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
 }
 
 static struct notifier_block rcu_panic_block = {
@@ -1753,8 +1755,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
                break; /* grace period idle or initializing, ignore. */
 
        case RCU_SAVE_DYNTICK:
-               if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
-                       break; /* So gcc recognizes the dead code. */
 
                raw_spin_unlock(&rnp->lock);  /* irqs remain disabled */
 
@@ -1796,9 +1796,10 @@ unlock_fqs_ret:
  * whom the rdp belongs.
  */
 static void
-__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+__rcu_process_callbacks(struct rcu_state *rsp)
 {
        unsigned long flags;
+       struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
 
        WARN_ON_ONCE(rdp->beenonline == 0);
 
@@ -1834,11 +1835,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static void rcu_process_callbacks(struct softirq_action *unused)
 {
+       struct rcu_state *rsp;
+
        trace_rcu_utilization("Start RCU core");
-       __rcu_process_callbacks(&rcu_sched_state,
-                               &__get_cpu_var(rcu_sched_data));
-       __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
-       rcu_preempt_process_callbacks();
+       for_each_rcu_flavor(rsp)
+               __rcu_process_callbacks(rsp);
        trace_rcu_utilization("End RCU core");
 }
 
@@ -2252,9 +2253,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static int rcu_pending(int cpu)
 {
-       return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
-              __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
-              rcu_preempt_pending(cpu);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
+                       return 1;
+       return 0;
 }
 
 /*
@@ -2264,20 +2268,41 @@ static int rcu_pending(int cpu)
  */
 static int rcu_cpu_has_callbacks(int cpu)
 {
+       struct rcu_state *rsp;
+
        /* RCU callbacks either ready or pending? */
-       return per_cpu(rcu_sched_data, cpu).nxtlist ||
-              per_cpu(rcu_bh_data, cpu).nxtlist ||
-              rcu_preempt_cpu_has_callbacks(cpu);
+       for_each_rcu_flavor(rsp)
+               if (per_cpu_ptr(rsp->rda, cpu)->nxtlist)
+                       return 1;
+       return 0;
+}
+
+/*
+ * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
+ * the compiler is expected to optimize this away.
+ */
+static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
+                              int cpu, unsigned long done)
+{
+       trace_rcu_barrier(rsp->name, s, cpu,
+                         atomic_read(&rsp->barrier_cpu_count), done);
 }
 
 /*
  * RCU callback function for _rcu_barrier().  If we are last, wake
  * up the task executing _rcu_barrier().
  */
-static void rcu_barrier_callback(struct rcu_head *notused)
+static void rcu_barrier_callback(struct rcu_head *rhp)
 {
-       if (atomic_dec_and_test(&rcu_barrier_cpu_count))
-               complete(&rcu_barrier_completion);
+       struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
+       struct rcu_state *rsp = rdp->rsp;
+
+       if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
+               _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
+               complete(&rsp->barrier_completion);
+       } else {
+               _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
+       }
 }
 
 /*
@@ -2285,35 +2310,63 @@ static void rcu_barrier_callback(struct rcu_head *notused)
  */
 static void rcu_barrier_func(void *type)
 {
-       int cpu = smp_processor_id();
-       struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
-       void (*call_rcu_func)(struct rcu_head *head,
-                             void (*func)(struct rcu_head *head));
+       struct rcu_state *rsp = type;
+       struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
 
-       atomic_inc(&rcu_barrier_cpu_count);
-       call_rcu_func = type;
-       call_rcu_func(head, rcu_barrier_callback);
+       _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
+       atomic_inc(&rsp->barrier_cpu_count);
+       rsp->call(&rdp->barrier_head, rcu_barrier_callback);
 }
 
 /*
  * Orchestrate the specified type of RCU barrier, waiting for all
  * RCU callbacks of the specified type to complete.
  */
-static void _rcu_barrier(struct rcu_state *rsp,
-                        void (*call_rcu_func)(struct rcu_head *head,
-                                              void (*func)(struct rcu_head *head)))
+static void _rcu_barrier(struct rcu_state *rsp)
 {
        int cpu;
        unsigned long flags;
        struct rcu_data *rdp;
-       struct rcu_head rh;
+       struct rcu_data rd;
+       unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
+       unsigned long snap_done;
 
-       init_rcu_head_on_stack(&rh);
+       init_rcu_head_on_stack(&rd.barrier_head);
+       _rcu_barrier_trace(rsp, "Begin", -1, snap);
 
        /* Take mutex to serialize concurrent rcu_barrier() requests. */
-       mutex_lock(&rcu_barrier_mutex);
+       mutex_lock(&rsp->barrier_mutex);
 
-       smp_mb();  /* Prevent any prior operations from leaking in. */
+       /*
+        * Ensure that all prior references, including to ->n_barrier_done,
+        * are ordered before the _rcu_barrier() machinery.
+        */
+       smp_mb();  /* See above block comment. */
+
+       /*
+        * Recheck ->n_barrier_done to see if others did our work for us.
+        * This means checking ->n_barrier_done for an even-to-odd-to-even
+        * transition.  The "if" expression below therefore rounds the old
+        * value up to the next even number and adds two before comparing.
+        */
+       snap_done = ACCESS_ONCE(rsp->n_barrier_done);
+       _rcu_barrier_trace(rsp, "Check", -1, snap_done);
+       if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
+               _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
+               smp_mb(); /* caller's subsequent code after above check. */
+               mutex_unlock(&rsp->barrier_mutex);
+               return;
+       }
+
+       /*
+        * Increment ->n_barrier_done to avoid duplicate work.  Use
+        * ACCESS_ONCE() to prevent the compiler from speculating
+        * the increment to precede the early-exit check.
+        */
+       ACCESS_ONCE(rsp->n_barrier_done)++;
+       WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
+       _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
+       smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
 
        /*
         * Initialize the count to one rather than to zero in order to
@@ -2332,8 +2385,8 @@ static void _rcu_barrier(struct rcu_state *rsp,
         * 6.   Both rcu_barrier_callback() callbacks are invoked, awakening
         *      us -- but before CPU 1's orphaned callbacks are invoked!!!
         */
-       init_completion(&rcu_barrier_completion);
-       atomic_set(&rcu_barrier_cpu_count, 1);
+       init_completion(&rsp->barrier_completion);
+       atomic_set(&rsp->barrier_cpu_count, 1);
        raw_spin_lock_irqsave(&rsp->onofflock, flags);
        rsp->rcu_barrier_in_progress = current;
        raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
@@ -2349,14 +2402,19 @@ static void _rcu_barrier(struct rcu_state *rsp,
                preempt_disable();
                rdp = per_cpu_ptr(rsp->rda, cpu);
                if (cpu_is_offline(cpu)) {
+                       _rcu_barrier_trace(rsp, "Offline", cpu,
+                                          rsp->n_barrier_done);
                        preempt_enable();
                        while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
                                schedule_timeout_interruptible(1);
                } else if (ACCESS_ONCE(rdp->qlen)) {
-                       smp_call_function_single(cpu, rcu_barrier_func,
-                                                (void *)call_rcu_func, 1);
+                       _rcu_barrier_trace(rsp, "OnlineQ", cpu,
+                                          rsp->n_barrier_done);
+                       smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
                        preempt_enable();
                } else {
+                       _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
+                                          rsp->n_barrier_done);
                        preempt_enable();
                }
        }
@@ -2373,24 +2431,32 @@ static void _rcu_barrier(struct rcu_state *rsp,
        rcu_adopt_orphan_cbs(rsp);
        rsp->rcu_barrier_in_progress = NULL;
        raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
-       atomic_inc(&rcu_barrier_cpu_count);
+       atomic_inc(&rsp->barrier_cpu_count);
        smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
-       call_rcu_func(&rh, rcu_barrier_callback);
+       rd.rsp = rsp;
+       rsp->call(&rd.barrier_head, rcu_barrier_callback);
 
        /*
         * Now that we have an rcu_barrier_callback() callback on each
         * CPU, and thus each counted, remove the initial count.
         */
-       if (atomic_dec_and_test(&rcu_barrier_cpu_count))
-               complete(&rcu_barrier_completion);
+       if (atomic_dec_and_test(&rsp->barrier_cpu_count))
+               complete(&rsp->barrier_completion);
+
+       /* Increment ->n_barrier_done to prevent duplicate work. */
+       smp_mb(); /* Keep increment after above mechanism. */
+       ACCESS_ONCE(rsp->n_barrier_done)++;
+       WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
+       _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
+       smp_mb(); /* Keep increment before caller's subsequent code. */
 
        /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
-       wait_for_completion(&rcu_barrier_completion);
+       wait_for_completion(&rsp->barrier_completion);
 
        /* Other rcu_barrier() invocations can now safely proceed. */
-       mutex_unlock(&rcu_barrier_mutex);
+       mutex_unlock(&rsp->barrier_mutex);
 
-       destroy_rcu_head_on_stack(&rh);
+       destroy_rcu_head_on_stack(&rd.barrier_head);
 }
 
 /**
@@ -2398,7 +2464,7 @@ static void _rcu_barrier(struct rcu_state *rsp,
  */
 void rcu_barrier_bh(void)
 {
-       _rcu_barrier(&rcu_bh_state, call_rcu_bh);
+       _rcu_barrier(&rcu_bh_state);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
 
@@ -2407,7 +2473,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  */
 void rcu_barrier_sched(void)
 {
-       _rcu_barrier(&rcu_sched_state, call_rcu_sched);
+       _rcu_barrier(&rcu_sched_state);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 
@@ -2500,9 +2566,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
 
 static void __cpuinit rcu_prepare_cpu(int cpu)
 {
-       rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
-       rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
-       rcu_preempt_init_percpu_data(cpu);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               rcu_init_percpu_data(cpu, rsp,
+                                    strcmp(rsp->name, "rcu_preempt") == 0);
 }
 
 /*
@@ -2514,6 +2582,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
        long cpu = (long)hcpu;
        struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;
+       struct rcu_state *rsp;
 
        trace_rcu_utilization("Start CPU hotplug");
        switch (action) {
@@ -2538,18 +2607,16 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
                 * touch any data without introducing corruption. We send the
                 * dying CPU's callbacks to an arbitrarily chosen online CPU.
                 */
-               rcu_cleanup_dying_cpu(&rcu_bh_state);
-               rcu_cleanup_dying_cpu(&rcu_sched_state);
-               rcu_preempt_cleanup_dying_cpu();
+               for_each_rcu_flavor(rsp)
+                       rcu_cleanup_dying_cpu(rsp);
                rcu_cleanup_after_idle(cpu);
                break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
-               rcu_cleanup_dead_cpu(cpu, &rcu_bh_state);
-               rcu_cleanup_dead_cpu(cpu, &rcu_sched_state);
-               rcu_preempt_cleanup_dead_cpu(cpu);
+               for_each_rcu_flavor(rsp)
+                       rcu_cleanup_dead_cpu(cpu, rsp);
                break;
        default:
                break;
@@ -2582,9 +2649,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
 {
        int i;
 
-       for (i = NUM_RCU_LVLS - 1; i > 0; i--)
+       for (i = rcu_num_lvls - 1; i > 0; i--)
                rsp->levelspread[i] = CONFIG_RCU_FANOUT;
-       rsp->levelspread[0] = CONFIG_RCU_FANOUT_LEAF;
+       rsp->levelspread[0] = rcu_fanout_leaf;
 }
 #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
 static void __init rcu_init_levelspread(struct rcu_state *rsp)
@@ -2594,7 +2661,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
        int i;
 
        cprv = NR_CPUS;
-       for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+       for (i = rcu_num_lvls - 1; i >= 0; i--) {
                ccur = rsp->levelcnt[i];
                rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
                cprv = ccur;
@@ -2621,13 +2688,15 @@ static void __init rcu_init_one(struct rcu_state *rsp,
 
        /* Initialize the level-tracking arrays. */
 
-       for (i = 1; i < NUM_RCU_LVLS; i++)
+       for (i = 0; i < rcu_num_lvls; i++)
+               rsp->levelcnt[i] = num_rcu_lvl[i];
+       for (i = 1; i < rcu_num_lvls; i++)
                rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
        rcu_init_levelspread(rsp);
 
        /* Initialize the elements themselves, starting from the leaves. */
 
-       for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+       for (i = rcu_num_lvls - 1; i >= 0; i--) {
                cpustride *= rsp->levelspread[i];
                rnp = rsp->level[i];
                for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
@@ -2657,13 +2726,74 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        }
 
        rsp->rda = rda;
-       rnp = rsp->level[NUM_RCU_LVLS - 1];
+       rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)
                        rnp++;
                per_cpu_ptr(rsp->rda, i)->mynode = rnp;
                rcu_boot_init_percpu_data(i, rsp);
        }
+       list_add(&rsp->flavors, &rcu_struct_flavors);
+}
+
+/*
+ * Compute the rcu_node tree geometry from kernel parameters.  This cannot
+ * replace the definitions in rcutree.h because those are needed to size
+ * the ->node array in the rcu_state structure.
+ */
+static void __init rcu_init_geometry(void)
+{
+       int i;
+       int j;
+       int n = nr_cpu_ids;
+       int rcu_capacity[MAX_RCU_LVLS + 1];
+
+       /* If the compile-time values are accurate, just leave. */
+       if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF)
+               return;
+
+       /*
+        * Compute number of nodes that can be handled an rcu_node tree
+        * with the given number of levels.  Setting rcu_capacity[0] makes
+        * some of the arithmetic easier.
+        */
+       rcu_capacity[0] = 1;
+       rcu_capacity[1] = rcu_fanout_leaf;
+       for (i = 2; i <= MAX_RCU_LVLS; i++)
+               rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
+
+       /*
+        * The boot-time rcu_fanout_leaf parameter is only permitted
+        * to increase the leaf-level fanout, not decrease it.  Of course,
+        * the leaf-level fanout cannot exceed the number of bits in
+        * the rcu_node masks.  Finally, the tree must be able to accommodate
+        * the configured number of CPUs.  Complain and fall back to the
+        * compile-time values if these limits are exceeded.
+        */
+       if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
+           rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
+           n > rcu_capacity[MAX_RCU_LVLS]) {
+               WARN_ON(1);
+               return;
+       }
+
+       /* Calculate the number of rcu_nodes at each level of the tree. */
+       for (i = 1; i <= MAX_RCU_LVLS; i++)
+               if (n <= rcu_capacity[i]) {
+                       for (j = 0; j <= i; j++)
+                               num_rcu_lvl[j] =
+                                       DIV_ROUND_UP(n, rcu_capacity[i - j]);
+                       rcu_num_lvls = i;
+                       for (j = i + 1; j <= MAX_RCU_LVLS; j++)
+                               num_rcu_lvl[j] = 0;
+                       break;
+               }
+
+       /* Calculate the total number of rcu_node structures. */
+       rcu_num_nodes = 0;
+       for (i = 0; i <= MAX_RCU_LVLS; i++)
+               rcu_num_nodes += num_rcu_lvl[i];
+       rcu_num_nodes -= n;
 }
 
 void __init rcu_init(void)
@@ -2671,6 +2801,7 @@ void __init rcu_init(void)
        int cpu;
 
        rcu_bootup_announce();
+       rcu_init_geometry();
        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        __rcu_init_preempt();
index 19b61ac1079f825702a7f17ae7d8dec88756c141..4d29169f212468bdc6f8dd17311ecf8bdd6850a8 100644 (file)
 #define RCU_FANOUT_4         (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
 
 #if NR_CPUS <= RCU_FANOUT_1
-#  define NUM_RCU_LVLS       1
+#  define RCU_NUM_LVLS       1
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_1              (NR_CPUS)
 #  define NUM_RCU_LVL_2              0
 #  define NUM_RCU_LVL_3              0
 #  define NUM_RCU_LVL_4              0
 #elif NR_CPUS <= RCU_FANOUT_2
-#  define NUM_RCU_LVLS       2
+#  define RCU_NUM_LVLS       2
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_1              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 #  define NUM_RCU_LVL_2              (NR_CPUS)
 #  define NUM_RCU_LVL_3              0
 #  define NUM_RCU_LVL_4              0
 #elif NR_CPUS <= RCU_FANOUT_3
-#  define NUM_RCU_LVLS       3
+#  define RCU_NUM_LVLS       3
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_1              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 #  define NUM_RCU_LVL_2              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 #  define NUM_RCU_LVL_3              (NR_CPUS)
 #  define NUM_RCU_LVL_4              0
 #elif NR_CPUS <= RCU_FANOUT_4
-#  define NUM_RCU_LVLS       4
+#  define RCU_NUM_LVLS       4
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_1              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
 #  define NUM_RCU_LVL_2              DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
@@ -76,6 +76,9 @@
 #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
 #define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
 
+extern int rcu_num_lvls;
+extern int rcu_num_nodes;
+
 /*
  * Dynticks per-CPU state.
  */
@@ -97,6 +100,7 @@ struct rcu_dynticks {
                                    /* # times non-lazy CBs posted to CPU. */
        unsigned long nonlazy_posted_snap;
                                    /* idle-period nonlazy_posted snapshot. */
+       int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
 };
 
@@ -206,7 +210,7 @@ struct rcu_node {
  */
 #define rcu_for_each_node_breadth_first(rsp, rnp) \
        for ((rnp) = &(rsp)->node[0]; \
-            (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+            (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
 
 /*
  * Do a breadth-first scan of the non-leaf rcu_node structures for the
@@ -215,7 +219,7 @@ struct rcu_node {
  */
 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
        for ((rnp) = &(rsp)->node[0]; \
-            (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
+            (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
 
 /*
  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
@@ -224,8 +228,8 @@ struct rcu_node {
  * It is still a leaf node, even if it is also the root node.
  */
 #define rcu_for_each_leaf_node(rsp, rnp) \
-       for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
-            (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+       for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
+            (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
 
 /* Index values for nxttail array in struct rcu_data. */
 #define RCU_DONE_TAIL          0       /* Also RCU_WAIT head. */
@@ -311,6 +315,9 @@ struct rcu_data {
        unsigned long n_rp_need_fqs;
        unsigned long n_rp_need_nothing;
 
+       /* 6) _rcu_barrier() callback. */
+       struct rcu_head barrier_head;
+
        int cpu;
        struct rcu_state *rsp;
 };
@@ -357,10 +364,12 @@ do {                                                                      \
  */
 struct rcu_state {
        struct rcu_node node[NUM_RCU_NODES];    /* Hierarchy. */
-       struct rcu_node *level[NUM_RCU_LVLS];   /* Hierarchy levels. */
+       struct rcu_node *level[RCU_NUM_LVLS];   /* Hierarchy levels. */
        u32 levelcnt[MAX_RCU_LVLS + 1];         /* # nodes in each level. */
-       u8 levelspread[NUM_RCU_LVLS];           /* kids/node in each level. */
+       u8 levelspread[RCU_NUM_LVLS];           /* kids/node in each level. */
        struct rcu_data __percpu *rda;          /* pointer of percu rcu_data. */
+       void (*call)(struct rcu_head *head,     /* call_rcu() flavor. */
+                    void (*func)(struct rcu_head *head));
 
        /* The following fields are guarded by the root rcu_node's lock. */
 
@@ -392,6 +401,11 @@ struct rcu_state {
        struct task_struct *rcu_barrier_in_progress;
                                                /* Task doing rcu_barrier(), */
                                                /*  or NULL if no barrier. */
+       struct mutex barrier_mutex;             /* Guards barrier fields. */
+       atomic_t barrier_cpu_count;             /* # CPUs waiting on. */
+       struct completion barrier_completion;   /* Wake at barrier end. */
+       unsigned long n_barrier_done;           /* ++ at start and end of */
+                                               /*  _rcu_barrier(). */
        raw_spinlock_t fqslock;                 /* Only one task forcing */
                                                /*  quiescent states. */
        unsigned long jiffies_force_qs;         /* Time at which to invoke */
@@ -409,8 +423,13 @@ struct rcu_state {
        unsigned long gp_max;                   /* Maximum GP duration in */
                                                /*  jiffies. */
        char *name;                             /* Name of structure. */
+       struct list_head flavors;               /* List of RCU flavors. */
 };
 
+extern struct list_head rcu_struct_flavors;
+#define for_each_rcu_flavor(rsp) \
+       list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
+
 /* Return values for rcu_preempt_offline_tasks(). */
 
 #define RCU_OFL_TASKS_NORM_GP  0x1             /* Tasks blocking normal */
@@ -453,25 +472,18 @@ static void rcu_stop_cpu_kthread(int cpu);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
-static void rcu_preempt_stall_reset(void);
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
                                     struct rcu_node *rnp,
                                     struct rcu_data *rdp);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static void rcu_preempt_cleanup_dead_cpu(int cpu);
 static void rcu_preempt_check_callbacks(int cpu);
-static void rcu_preempt_process_callbacks(void);
 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                               bool wake);
 #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
-static int rcu_preempt_pending(int cpu);
-static int rcu_preempt_cpu_has_callbacks(int cpu);
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
-static void rcu_preempt_cleanup_dying_cpu(void);
 static void __init __rcu_init_preempt(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
index 395cdd1e063445170e9ad9b0a0ef7d511a5f7622..a9194d5606c48e68ef67a64ec4ba4b2943f8881a 100644 (file)
@@ -68,13 +68,18 @@ static void __init rcu_bootup_announce_oddness(void)
        printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
 #endif
 #if NUM_RCU_LVL_4 != 0
-       printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
+       printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
 #endif
+       if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
+               printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
+       if (nr_cpu_ids != NR_CPUS)
+               printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
 }
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
 
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
+struct rcu_state rcu_preempt_state =
+       RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
 static struct rcu_state *rcu_state = &rcu_preempt_state;
 
@@ -494,16 +499,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
        return ndetected;
 }
 
-/*
- * Suppress preemptible RCU's CPU stall warnings by pushing the
- * time of the next stall-warning message comfortably far into the
- * future.
- */
-static void rcu_preempt_stall_reset(void)
-{
-       rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
-}
-
 /*
  * Check that the list of blocked tasks for the newly completed grace
  * period is in fact empty.  It is a serious bug to complete a grace
@@ -604,14 +599,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-/*
- * Do CPU-offline processing for preemptible RCU.
- */
-static void rcu_preempt_cleanup_dead_cpu(int cpu)
-{
-       rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
-}
-
 /*
  * Check for a quiescent state from the current CPU.  When a task blocks,
  * the task is recorded in the corresponding CPU's rcu_node structure,
@@ -632,15 +619,6 @@ static void rcu_preempt_check_callbacks(int cpu)
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
 }
 
-/*
- * Process callbacks for preemptible RCU.
- */
-static void rcu_preempt_process_callbacks(void)
-{
-       __rcu_process_callbacks(&rcu_preempt_state,
-                               &__get_cpu_var(rcu_preempt_data));
-}
-
 #ifdef CONFIG_RCU_BOOST
 
 static void rcu_preempt_do_callbacks(void)
@@ -872,50 +850,15 @@ mb_ret:
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
-/*
- * Check to see if there is any immediate preemptible-RCU-related work
- * to be done.
- */
-static int rcu_preempt_pending(int cpu)
-{
-       return __rcu_pending(&rcu_preempt_state,
-                            &per_cpu(rcu_preempt_data, cpu));
-}
-
-/*
- * Does preemptible RCU have callbacks on this CPU?
- */
-static int rcu_preempt_cpu_has_callbacks(int cpu)
-{
-       return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
-}
-
 /**
  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
  */
 void rcu_barrier(void)
 {
-       _rcu_barrier(&rcu_preempt_state, call_rcu);
+       _rcu_barrier(&rcu_preempt_state);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
-/*
- * Initialize preemptible RCU's per-CPU data.
- */
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
-{
-       rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
-}
-
-/*
- * Move preemptible RCU's callbacks from dying CPU to other online CPU
- * and record a quiescent state.
- */
-static void rcu_preempt_cleanup_dying_cpu(void)
-{
-       rcu_cleanup_dying_cpu(&rcu_preempt_state);
-}
-
 /*
  * Initialize preemptible RCU's state structures.
  */
@@ -1000,14 +943,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
        return 0;
 }
 
-/*
- * Because preemptible RCU does not exist, there is no need to suppress
- * its CPU stall warnings.
- */
-static void rcu_preempt_stall_reset(void)
-{
-}
-
 /*
  * Because there is no preemptible RCU, there can be no readers blocked,
  * so there is no need to check for blocked tasks.  So check only for
@@ -1035,14 +970,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-/*
- * Because preemptible RCU does not exist, it never needs CPU-offline
- * processing.
- */
-static void rcu_preempt_cleanup_dead_cpu(int cpu)
-{
-}
-
 /*
  * Because preemptible RCU does not exist, it never has any callbacks
  * to check.
@@ -1051,14 +978,6 @@ static void rcu_preempt_check_callbacks(int cpu)
 {
 }
 
-/*
- * Because preemptible RCU does not exist, it never has any callbacks
- * to process.
- */
-static void rcu_preempt_process_callbacks(void)
-{
-}
-
 /*
  * Queue an RCU callback for lazy invocation after a grace period.
  * This will likely be later named something like "call_rcu_lazy()",
@@ -1099,22 +1018,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-/*
- * Because preemptible RCU does not exist, it never has any work to do.
- */
-static int rcu_preempt_pending(int cpu)
-{
-       return 0;
-}
-
-/*
- * Because preemptible RCU does not exist, it never has callbacks
- */
-static int rcu_preempt_cpu_has_callbacks(int cpu)
-{
-       return 0;
-}
-
 /*
  * Because preemptible RCU does not exist, rcu_barrier() is just
  * another name for rcu_barrier_sched().
@@ -1125,21 +1028,6 @@ void rcu_barrier(void)
 }
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
-/*
- * Because preemptible RCU does not exist, there is no per-CPU
- * data to initialize.
- */
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
-{
-}
-
-/*
- * Because there is no preemptible RCU, there is no cleanup to do.
- */
-static void rcu_preempt_cleanup_dying_cpu(void)
-{
-}
-
 /*
  * Because preemptible RCU does not exist, it need not be initialized.
  */
@@ -1923,9 +1811,11 @@ static void rcu_idle_count_callbacks_posted(void)
  */
 #define RCU_IDLE_FLUSHES 5             /* Number of dyntick-idle tries. */
 #define RCU_IDLE_OPT_FLUSHES 3         /* Optional dyntick-idle tries. */
-#define RCU_IDLE_GP_DELAY 6            /* Roughly one grace period. */
+#define RCU_IDLE_GP_DELAY 4            /* Roughly one grace period. */
 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)        /* Roughly six seconds. */
 
+extern int tick_nohz_enabled;
+
 /*
  * Does the specified flavor of RCU have non-lazy callbacks pending on
  * the specified CPU?  Both RCU flavor and CPU are specified by the
@@ -2002,10 +1892,13 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
                return 1;
        }
        /* Set up for the possibility that RCU will post a timer. */
-       if (rcu_cpu_has_nonlazy_callbacks(cpu))
-               *delta_jiffies = RCU_IDLE_GP_DELAY;
-       else
-               *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
+       if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+               *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
+                                         RCU_IDLE_GP_DELAY) - jiffies;
+       } else {
+               *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
+               *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
+       }
        return 0;
 }
 
@@ -2064,6 +1957,7 @@ static void rcu_cleanup_after_idle(int cpu)
 
        del_timer(&rdtp->idle_gp_timer);
        trace_rcu_prep_idle("Cleanup after idle");
+       rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
 }
 
 /*
@@ -2089,6 +1983,18 @@ static void rcu_prepare_for_idle(int cpu)
 {
        struct timer_list *tp;
        struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+       int tne;
+
+       /* Handle nohz enablement switches conservatively. */
+       tne = ACCESS_ONCE(tick_nohz_enabled);
+       if (tne != rdtp->tick_nohz_enabled_snap) {
+               if (rcu_cpu_has_callbacks(cpu))
+                       invoke_rcu_core(); /* force nohz to see update. */
+               rdtp->tick_nohz_enabled_snap = tne;
+               return;
+       }
+       if (!tne)
+               return;
 
        /*
         * If this is an idle re-entry, for example, due to use of
@@ -2142,10 +2048,11 @@ static void rcu_prepare_for_idle(int cpu)
                if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
                        trace_rcu_prep_idle("Dyntick with callbacks");
                        rdtp->idle_gp_timer_expires =
-                                          jiffies + RCU_IDLE_GP_DELAY;
+                               round_up(jiffies + RCU_IDLE_GP_DELAY,
+                                        RCU_IDLE_GP_DELAY);
                } else {
                        rdtp->idle_gp_timer_expires =
-                                          jiffies + RCU_IDLE_LAZY_GP_DELAY;
+                               round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
                        trace_rcu_prep_idle("Dyntick with lazy callbacks");
                }
                tp = &rdtp->idle_gp_timer;
index d4bc16ddd1d4aa09efb52cbee140f1f0b45afc36..a16ddbd6fdc4b2e28ca3f1814a8093f2f4c45960 100644 (file)
 #define RCU_TREE_NONCORE
 #include "rcutree.h"
 
+static int show_rcubarrier(struct seq_file *m, void *unused)
+{
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               seq_printf(m, "%s: %c bcc: %d nbd: %lu\n",
+                          rsp->name, rsp->rcu_barrier_in_progress ? 'B' : '.',
+                          atomic_read(&rsp->barrier_cpu_count),
+                          rsp->n_barrier_done);
+       return 0;
+}
+
+static int rcubarrier_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, show_rcubarrier, NULL);
+}
+
+static const struct file_operations rcubarrier_fops = {
+       .owner = THIS_MODULE,
+       .open = rcubarrier_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
 #ifdef CONFIG_RCU_BOOST
 
 static char convert_kthread_status(unsigned int kthread_status)
@@ -95,24 +120,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
 
-#define PRINT_RCU_DATA(name, func, m) \
-       do { \
-               int _p_r_d_i; \
-               \
-               for_each_possible_cpu(_p_r_d_i) \
-                       func(m, &per_cpu(name, _p_r_d_i)); \
-       } while (0)
-
 static int show_rcudata(struct seq_file *m, void *unused)
 {
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       seq_puts(m, "rcu_preempt:\n");
-       PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       seq_puts(m, "rcu_sched:\n");
-       PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m);
-       seq_puts(m, "rcu_bh:\n");
-       PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
+       int cpu;
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp) {
+               seq_printf(m, "%s:\n", rsp->name);
+               for_each_possible_cpu(cpu)
+                       print_one_rcu_data(m, per_cpu_ptr(rsp->rda, cpu));
+       }
        return 0;
 }
 
@@ -166,6 +183,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
 
 static int show_rcudata_csv(struct seq_file *m, void *unused)
 {
+       int cpu;
+       struct rcu_state *rsp;
+
        seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
        seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
        seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\"");
@@ -173,14 +193,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
        seq_puts(m, "\"kt\",\"ktl\"");
 #endif /* #ifdef CONFIG_RCU_BOOST */
        seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       seq_puts(m, "\"rcu_preempt:\"\n");
-       PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       seq_puts(m, "\"rcu_sched:\"\n");
-       PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m);
-       seq_puts(m, "\"rcu_bh:\"\n");
-       PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
+       for_each_rcu_flavor(rsp) {
+               seq_printf(m, "\"%s:\"\n", rsp->name);
+               for_each_possible_cpu(cpu)
+                       print_one_rcu_data_csv(m, per_cpu_ptr(rsp->rda, cpu));
+       }
        return 0;
 }
 
@@ -270,15 +287,15 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
        struct rcu_node *rnp;
 
        gpnum = rsp->gpnum;
-       seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
+       seq_printf(m, "%s: c=%lu g=%lu s=%d jfq=%ld j=%x "
                      "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
-                  rsp->completed, gpnum, rsp->fqs_state,
+                  rsp->name, rsp->completed, gpnum, rsp->fqs_state,
                   (long)(rsp->jiffies_force_qs - jiffies),
                   (int)(jiffies & 0xffff),
                   rsp->n_force_qs, rsp->n_force_qs_ngp,
                   rsp->n_force_qs - rsp->n_force_qs_ngp,
                   rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
-       for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
+       for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
                if (rnp->level != level) {
                        seq_puts(m, "\n");
                        level = rnp->level;
@@ -295,14 +312,10 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
 
 static int show_rcuhier(struct seq_file *m, void *unused)
 {
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       seq_puts(m, "rcu_preempt:\n");
-       print_one_rcu_state(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       seq_puts(m, "rcu_sched:\n");
-       print_one_rcu_state(m, &rcu_sched_state);
-       seq_puts(m, "rcu_bh:\n");
-       print_one_rcu_state(m, &rcu_bh_state);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               print_one_rcu_state(m, rsp);
        return 0;
 }
 
@@ -343,11 +356,10 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
 
 static int show_rcugp(struct seq_file *m, void *unused)
 {
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       show_one_rcugp(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       show_one_rcugp(m, &rcu_sched_state);
-       show_one_rcugp(m, &rcu_bh_state);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp)
+               show_one_rcugp(m, rsp);
        return 0;
 }
 
@@ -382,28 +394,20 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
                   rdp->n_rp_need_nothing);
 }
 
-static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
+static int show_rcu_pending(struct seq_file *m, void *unused)
 {
        int cpu;
        struct rcu_data *rdp;
-
-       for_each_possible_cpu(cpu) {
-               rdp = per_cpu_ptr(rsp->rda, cpu);
-               if (rdp->beenonline)
-                       print_one_rcu_pending(m, rdp);
+       struct rcu_state *rsp;
+
+       for_each_rcu_flavor(rsp) {
+               seq_printf(m, "%s:\n", rsp->name);
+               for_each_possible_cpu(cpu) {
+                       rdp = per_cpu_ptr(rsp->rda, cpu);
+                       if (rdp->beenonline)
+                               print_one_rcu_pending(m, rdp);
+               }
        }
-}
-
-static int show_rcu_pending(struct seq_file *m, void *unused)
-{
-#ifdef CONFIG_TREE_PREEMPT_RCU
-       seq_puts(m, "rcu_preempt:\n");
-       print_rcu_pendings(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-       seq_puts(m, "rcu_sched:\n");
-       print_rcu_pendings(m, &rcu_sched_state);
-       seq_puts(m, "rcu_bh:\n");
-       print_rcu_pendings(m, &rcu_bh_state);
        return 0;
 }
 
@@ -453,6 +457,11 @@ static int __init rcutree_trace_init(void)
        if (!rcudir)
                goto free_out;
 
+       retval = debugfs_create_file("rcubarrier", 0444, rcudir,
+                                               NULL, &rcubarrier_fops);
+       if (!retval)
+               goto free_out;
+
        retval = debugfs_create_file("rcudata", 0444, rcudir,
                                                NULL, &rcudata_fops);
        if (!retval)
index 8699978339286b444d2d096dd3f0e9b6fba5c898..66ff07f6184c4ce048c2b5d52fb8de29a6f89525 100644 (file)
@@ -105,7 +105,7 @@ static ktime_t tick_init_jiffy_update(void)
 /*
  * NO HZ enabled ?
  */
-static int tick_nohz_enabled __read_mostly  = 1;
+int tick_nohz_enabled __read_mostly  = 1;
 
 /*
  * Enable / Disable tickless mode
This page took 0.052225 seconds and 5 git commands to generate.