rcu: Simplify quiescent-state detection
[deliverable/linux.git] / kernel / rcutree.c
index f280e542e3e9f531df83b03d1e1fce6cf8ebaaa2..6194402ec853b6cf4dbd25a85bb704b004b8073a 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/prefetch.h>
 #include <linux/delay.h>
 #include <linux/stop_machine.h>
+#include <linux/random.h>
 
 #include "rcutree.h"
 #include <trace/events/rcu.h>
@@ -61,6 +62,7 @@
 /* Data structures. */
 
 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
+static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
 
 #define RCU_STATE_INITIALIZER(sname, cr) { \
        .level = { &sname##_state.node[0] }, \
@@ -72,7 +74,6 @@ static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
        .orphan_nxttail = &sname##_state.orphan_nxtlist, \
        .orphan_donetail = &sname##_state.orphan_donelist, \
        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
-       .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \
        .name = #sname, \
 }
 
@@ -88,7 +89,7 @@ LIST_HEAD(rcu_struct_flavors);
 
 /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
 static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
-module_param(rcu_fanout_leaf, int, 0);
+module_param(rcu_fanout_leaf, int, 0444);
 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
        NUM_RCU_LVL_0,
@@ -175,8 +176,6 @@ void rcu_sched_qs(int cpu)
 {
        struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
 
-       rdp->passed_quiesce_gpnum = rdp->gpnum;
-       barrier();
        if (rdp->passed_quiesce == 0)
                trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
        rdp->passed_quiesce = 1;
@@ -186,8 +185,6 @@ void rcu_bh_qs(int cpu)
 {
        struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
 
-       rdp->passed_quiesce_gpnum = rdp->gpnum;
-       barrier();
        if (rdp->passed_quiesce == 0)
                trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
        rdp->passed_quiesce = 1;
@@ -216,9 +213,9 @@ static int blimit = 10;             /* Maximum callbacks per rcu_do_batch. */
 static int qhimark = 10000;    /* If this many pending, ignore blimit. */
 static int qlowmark = 100;     /* Once only this many pending, use blimit. */
 
-module_param(blimit, int, 0);
-module_param(qhimark, int, 0);
-module_param(qlowmark, int, 0);
+module_param(blimit, int, 0444);
+module_param(qhimark, int, 0444);
+module_param(qlowmark, int, 0444);
 
 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
@@ -226,7 +223,14 @@ int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
 module_param(rcu_cpu_stall_suppress, int, 0644);
 module_param(rcu_cpu_stall_timeout, int, 0644);
 
-static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
+static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;
+static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS;
+
+module_param(jiffies_till_first_fqs, ulong, 0644);
+module_param(jiffies_till_next_fqs, ulong, 0644);
+
+static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
+static void force_quiescent_state(struct rcu_state *rsp);
 static int rcu_pending(int cpu);
 
 /*
@@ -252,7 +256,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  */
 void rcu_bh_force_quiescent_state(void)
 {
-       force_quiescent_state(&rcu_bh_state, 0);
+       force_quiescent_state(&rcu_bh_state);
 }
 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 
@@ -286,7 +290,7 @@ EXPORT_SYMBOL_GPL(rcutorture_record_progress);
  */
 void rcu_sched_force_quiescent_state(void)
 {
-       force_quiescent_state(&rcu_sched_state, 0);
+       force_quiescent_state(&rcu_sched_state);
 }
 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
 
@@ -305,7 +309,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
 static int
 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
+       return *rdp->nxttail[RCU_DONE_TAIL +
+                            ACCESS_ONCE(rsp->completed) != rdp->completed] &&
+              !rcu_gp_in_progress(rsp);
 }
 
 /*
@@ -782,11 +788,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
        else if (!trigger_all_cpu_backtrace())
                dump_stack();
 
-       /* If so configured, complain about tasks blocking the grace period. */
+       /* Complain about tasks blocking the grace period. */
 
        rcu_print_detail_task_stall(rsp);
 
-       force_quiescent_state(rsp, 0);  /* Kick them all. */
+       force_quiescent_state(rsp);  /* Kick them all. */
 }
 
 static void print_cpu_stall(struct rcu_state *rsp)
@@ -889,12 +895,8 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
                 */
                rdp->gpnum = rnp->gpnum;
                trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
-               if (rnp->qsmask & rdp->grpmask) {
-                       rdp->qs_pending = 1;
-                       rdp->passed_quiesce = 0;
-               } else {
-                       rdp->qs_pending = 0;
-               }
+               rdp->passed_quiesce = 0;
+               rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
                zero_cpu_stall_ticks(rdp);
        }
 }
@@ -974,10 +976,13 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
                 * our behalf. Catch up with this state to avoid noting
                 * spurious new grace periods.  If another grace period
                 * has started, then rnp->gpnum will have advanced, so
-                * we will detect this later on.
+                * we will detect this later on.  Of course, any quiescent
+                * states we found for the old GP are now invalid.
                 */
-               if (ULONG_CMP_LT(rdp->gpnum, rdp->completed))
+               if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) {
                        rdp->gpnum = rdp->completed;
+                       rdp->passed_quiesce = 0;
+               }
 
                /*
                 * If RCU does not need a quiescent state from this CPU,
@@ -1021,97 +1026,56 @@ rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
        /* Prior grace period ended, so advance callbacks for current CPU. */
        __rcu_process_gp_end(rsp, rnp, rdp);
 
-       /*
-        * Because this CPU just now started the new grace period, we know
-        * that all of its callbacks will be covered by this upcoming grace
-        * period, even the ones that were registered arbitrarily recently.
-        * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
-        *
-        * Other CPUs cannot be sure exactly when the grace period started.
-        * Therefore, their recently registered callbacks must pass through
-        * an additional RCU_NEXT_READY stage, so that they will be handled
-        * by the next RCU grace period.
-        */
-       rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
-       rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
-
        /* Set state so that this CPU will detect the next quiescent state. */
        __note_new_gpnum(rsp, rnp, rdp);
 }
 
 /*
- * Start a new RCU grace period if warranted, re-initializing the hierarchy
- * in preparation for detecting the next grace period.  The caller must hold
- * the root node's ->lock, which is released before return.  Hard irqs must
- * be disabled.
- *
- * Note that it is legal for a dying CPU (which is marked as offline) to
- * invoke this function.  This can happen when the dying CPU reports its
- * quiescent state.
+ * Initialize a new grace period.
  */
-static void
-rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
-       __releases(rcu_get_root(rsp)->lock)
+static int rcu_gp_init(struct rcu_state *rsp)
 {
-       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
+       struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       if (!rcu_scheduler_fully_active ||
-           !cpu_needs_another_gp(rsp, rdp)) {
-               /*
-                * Either the scheduler hasn't yet spawned the first
-                * non-idle task or this CPU does not need another
-                * grace period.  Either way, don't start a new grace
-                * period.
-                */
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               return;
-       }
+       raw_spin_lock_irq(&rnp->lock);
+       rsp->gp_flags = 0; /* Clear all flags: New grace period. */
 
-       if (rsp->fqs_active) {
-               /*
-                * This CPU needs a grace period, but force_quiescent_state()
-                * is running.  Tell it to start one on this CPU's behalf.
-                */
-               rsp->fqs_need_gp = 1;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               return;
+       if (rcu_gp_in_progress(rsp)) {
+               /* Grace period already in progress, don't start another.  */
+               raw_spin_unlock_irq(&rnp->lock);
+               return 0;
        }
 
        /* Advance to a new grace period and initialize state. */
        rsp->gpnum++;
        trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
-       WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT);
-       rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
-       rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
        record_gp_stall_check_time(rsp);
-       raw_spin_unlock(&rnp->lock);  /* leave irqs disabled. */
+       raw_spin_unlock_irq(&rnp->lock);
 
        /* Exclude any concurrent CPU-hotplug operations. */
-       raw_spin_lock(&rsp->onofflock);  /* irqs already disabled. */
+       get_online_cpus();
 
        /*
         * Set the quiescent-state-needed bits in all the rcu_node
-        * structures for all currently online CPUs in breadth-first
-        * order, starting from the root rcu_node structure.  This
-        * operation relies on the layout of the hierarchy within the
-        * rsp->node[] array.  Note that other CPUs will access only
-        * the leaves of the hierarchy, which still indicate that no
+        * structures for all currently online CPUs in breadth-first order,
+        * starting from the root rcu_node structure, relying on the layout
+        * of the tree within the rsp->node[] array.  Note that other CPUs
+        * will access only the leaves of the hierarchy, thus seeing that no
         * grace period is in progress, at least until the corresponding
         * leaf node has been initialized.  In addition, we have excluded
         * CPU-hotplug operations.
         *
-        * Note that the grace period cannot complete until we finish
-        * the initialization process, as there will be at least one
-        * qsmask bit set in the root node until that time, namely the
-        * one corresponding to this CPU, due to the fact that we have
-        * irqs disabled.
+        * The grace period cannot complete until the initialization
+        * process finishes, because this kthread handles both.
         */
        rcu_for_each_node_breadth_first(rsp, rnp) {
-               raw_spin_lock(&rnp->lock);      /* irqs already disabled. */
+               raw_spin_lock_irq(&rnp->lock);
+               rdp = this_cpu_ptr(rsp->rda);
                rcu_preempt_check_blocked_tasks(rnp);
                rnp->qsmask = rnp->qsmaskinit;
                rnp->gpnum = rsp->gpnum;
+               WARN_ON_ONCE(rnp->completed != rsp->completed);
                rnp->completed = rsp->completed;
                if (rnp == rdp->mynode)
                        rcu_start_gp_per_cpu(rsp, rnp, rdp);
@@ -1119,37 +1083,54 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
                trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
-               raw_spin_unlock(&rnp->lock);    /* irqs remain disabled. */
+               raw_spin_unlock_irq(&rnp->lock);
+#ifdef CONFIG_PROVE_RCU_DELAY
+               if ((random32() % (rcu_num_nodes * 8)) == 0)
+                       schedule_timeout_uninterruptible(2);
+#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
+               cond_resched();
        }
 
-       rnp = rcu_get_root(rsp);
-       raw_spin_lock(&rnp->lock);              /* irqs already disabled. */
-       rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
-       raw_spin_unlock(&rnp->lock);            /* irqs remain disabled. */
-       raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
+       put_online_cpus();
+       return 1;
 }
 
 /*
- * Report a full set of quiescent states to the specified rcu_state
- * data structure.  This involves cleaning up after the prior grace
- * period and letting rcu_start_gp() start up the next grace period
- * if one is needed.  Note that the caller must hold rnp->lock, as
- * required by rcu_start_gp(), which will release it.
+ * Do one round of quiescent-state forcing.
  */
-static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
-       __releases(rcu_get_root(rsp)->lock)
+int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
 {
-       unsigned long gp_duration;
+       int fqs_state = fqs_state_in;
        struct rcu_node *rnp = rcu_get_root(rsp);
-       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
 
-       WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+       rsp->n_force_qs++;
+       if (fqs_state == RCU_SAVE_DYNTICK) {
+               /* Collect dyntick-idle snapshots. */
+               force_qs_rnp(rsp, dyntick_save_progress_counter);
+               fqs_state = RCU_FORCE_QS;
+       } else {
+               /* Handle dyntick-idle and offline CPUs. */
+               force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
+       }
+       /* Clear flag to prevent immediate re-entry. */
+       if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+               raw_spin_lock_irq(&rnp->lock);
+               rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
+               raw_spin_unlock_irq(&rnp->lock);
+       }
+       return fqs_state;
+}
 
-       /*
-        * Ensure that all grace-period and pre-grace-period activity
-        * is seen before the assignment to rsp->completed.
-        */
-       smp_mb(); /* See above block comment. */
+/*
+ * Clean up after the old grace period.
+ */
+static void rcu_gp_cleanup(struct rcu_state *rsp)
+{
+       unsigned long gp_duration;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp = rcu_get_root(rsp);
+
+       raw_spin_lock_irq(&rnp->lock);
        gp_duration = jiffies - rsp->gp_start;
        if (gp_duration > rsp->gp_max)
                rsp->gp_max = gp_duration;
@@ -1161,35 +1142,149 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
         * they can do to advance the grace period.  It is therefore
         * safe for us to drop the lock in order to mark the grace
         * period as completed in all of the rcu_node structures.
-        *
-        * But if this CPU needs another grace period, it will take
-        * care of this while initializing the next grace period.
-        * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL
-        * because the callbacks have not yet been advanced: Those
-        * callbacks are waiting on the grace period that just now
-        * completed.
         */
-       if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
-               raw_spin_unlock(&rnp->lock);     /* irqs remain disabled. */
+       raw_spin_unlock_irq(&rnp->lock);
 
-               /*
-                * Propagate new ->completed value to rcu_node structures
-                * so that other CPUs don't have to wait until the start
-                * of the next grace period to process their callbacks.
-                */
-               rcu_for_each_node_breadth_first(rsp, rnp) {
-                       raw_spin_lock(&rnp->lock); /* irqs already disabled. */
-                       rnp->completed = rsp->gpnum;
-                       raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-               }
-               rnp = rcu_get_root(rsp);
-               raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+       /*
+        * Propagate new ->completed value to rcu_node structures so
+        * that other CPUs don't have to wait until the start of the next
+        * grace period to process their callbacks.  This also avoids
+        * some nasty RCU grace-period initialization races by forcing
+        * the end of the current grace period to be completely recorded in
+        * all of the rcu_node structures before the beginning of the next
+        * grace period is recorded in any of the rcu_node structures.
+        */
+       rcu_for_each_node_breadth_first(rsp, rnp) {
+               raw_spin_lock_irq(&rnp->lock);
+               rnp->completed = rsp->gpnum;
+               raw_spin_unlock_irq(&rnp->lock);
+               cond_resched();
        }
+       rnp = rcu_get_root(rsp);
+       raw_spin_lock_irq(&rnp->lock);
 
-       rsp->completed = rsp->gpnum;  /* Declare the grace period complete. */
+       rsp->completed = rsp->gpnum; /* Declare grace period done. */
        trace_rcu_grace_period(rsp->name, rsp->completed, "end");
        rsp->fqs_state = RCU_GP_IDLE;
-       rcu_start_gp(rsp, flags);  /* releases root node's rnp->lock. */
+       rdp = this_cpu_ptr(rsp->rda);
+       if (cpu_needs_another_gp(rsp, rdp))
+               rsp->gp_flags = 1;
+       raw_spin_unlock_irq(&rnp->lock);
+}
+
+/*
+ * Body of kthread that handles grace periods.
+ */
+static int __noreturn rcu_gp_kthread(void *arg)
+{
+       int fqs_state;
+       unsigned long j;
+       int ret;
+       struct rcu_state *rsp = arg;
+       struct rcu_node *rnp = rcu_get_root(rsp);
+
+       for (;;) {
+
+               /* Handle grace-period start. */
+               for (;;) {
+                       wait_event_interruptible(rsp->gp_wq,
+                                                rsp->gp_flags &
+                                                RCU_GP_FLAG_INIT);
+                       if ((rsp->gp_flags & RCU_GP_FLAG_INIT) &&
+                           rcu_gp_init(rsp))
+                               break;
+                       cond_resched();
+                       flush_signals(current);
+               }
+
+               /* Handle quiescent-state forcing. */
+               fqs_state = RCU_SAVE_DYNTICK;
+               j = jiffies_till_first_fqs;
+               if (j > HZ) {
+                       j = HZ;
+                       jiffies_till_first_fqs = HZ;
+               }
+               for (;;) {
+                       rsp->jiffies_force_qs = jiffies + j;
+                       ret = wait_event_interruptible_timeout(rsp->gp_wq,
+                                       (rsp->gp_flags & RCU_GP_FLAG_FQS) ||
+                                       (!ACCESS_ONCE(rnp->qsmask) &&
+                                        !rcu_preempt_blocked_readers_cgp(rnp)),
+                                       j);
+                       /* If grace period done, leave loop. */
+                       if (!ACCESS_ONCE(rnp->qsmask) &&
+                           !rcu_preempt_blocked_readers_cgp(rnp))
+                               break;
+                       /* If time for quiescent-state forcing, do it. */
+                       if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) {
+                               fqs_state = rcu_gp_fqs(rsp, fqs_state);
+                               cond_resched();
+                       } else {
+                               /* Deal with stray signal. */
+                               cond_resched();
+                               flush_signals(current);
+                       }
+                       j = jiffies_till_next_fqs;
+                       if (j > HZ) {
+                               j = HZ;
+                               jiffies_till_next_fqs = HZ;
+                       } else if (j < 1) {
+                               j = 1;
+                               jiffies_till_next_fqs = 1;
+                       }
+               }
+
+               /* Handle grace-period end. */
+               rcu_gp_cleanup(rsp);
+       }
+}
+
+/*
+ * Start a new RCU grace period if warranted, re-initializing the hierarchy
+ * in preparation for detecting the next grace period.  The caller must hold
+ * the root node's ->lock, which is released before return.  Hard irqs must
+ * be disabled.
+ *
+ * Note that it is legal for a dying CPU (which is marked as offline) to
+ * invoke this function.  This can happen when the dying CPU reports its
+ * quiescent state.
+ */
+static void
+rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
+       __releases(rcu_get_root(rsp)->lock)
+{
+       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
+       struct rcu_node *rnp = rcu_get_root(rsp);
+
+       if (!rsp->gp_kthread ||
+           !cpu_needs_another_gp(rsp, rdp)) {
+               /*
+                * Either we have not yet spawned the grace-period
+                * task or this CPU does not need another grace period.
+                * Either way, don't start a new grace period.
+                */
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               return;
+       }
+
+       rsp->gp_flags = RCU_GP_FLAG_INIT;
+       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       wake_up(&rsp->gp_wq);
+}
+
+/*
+ * Report a full set of quiescent states to the specified rcu_state
+ * data structure.  This involves cleaning up after the prior grace
+ * period and letting rcu_start_gp() start up the next grace period
+ * if one is needed.  Note that the caller must hold rnp->lock, as
+ * required by rcu_start_gp(), which will release it.
+ */
+static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
+       __releases(rcu_get_root(rsp)->lock)
+{
+       WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+       raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
+       wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
 }
 
 /*
@@ -1258,7 +1353,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  * based on quiescent states detected in an earlier grace period!
  */
 static void
-rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp)
+rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 {
        unsigned long flags;
        unsigned long mask;
@@ -1266,7 +1361,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las
 
        rnp = rdp->mynode;
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) {
+       if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
+           rnp->completed == rnp->gpnum) {
 
                /*
                 * The grace period in which this quiescent state was
@@ -1325,7 +1421,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
         * Tell RCU we are done (but rcu_report_qs_rdp() will be the
         * judge of that).
         */
-       rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum);
+       rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -1687,6 +1783,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
        struct rcu_node *rnp;
 
        rcu_for_each_leaf_node(rsp, rnp) {
+               cond_resched();
                mask = 0;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                if (!rcu_gp_in_progress(rsp)) {
@@ -1723,72 +1820,39 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
  * Force quiescent states on reluctant CPUs, and also detect which
  * CPUs are in dyntick-idle mode.
  */
-static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
+static void force_quiescent_state(struct rcu_state *rsp)
 {
        unsigned long flags;
-       struct rcu_node *rnp = rcu_get_root(rsp);
-
-       trace_rcu_utilization("Start fqs");
-       if (!rcu_gp_in_progress(rsp)) {
-               trace_rcu_utilization("End fqs");
-               return;  /* No grace period in progress, nothing to force. */
-       }
-       if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
-               rsp->n_force_qs_lh++; /* Inexact, can lose counts.  Tough! */
-               trace_rcu_utilization("End fqs");
-               return; /* Someone else is already on the job. */
-       }
-       if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
-               goto unlock_fqs_ret; /* no emergency and done recently. */
-       rsp->n_force_qs++;
-       raw_spin_lock(&rnp->lock);  /* irqs already disabled */
-       rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
-       if(!rcu_gp_in_progress(rsp)) {
-               rsp->n_force_qs_ngp++;
-               raw_spin_unlock(&rnp->lock);  /* irqs remain disabled */
-               goto unlock_fqs_ret;  /* no GP in progress, time updated. */
-       }
-       rsp->fqs_active = 1;
-       switch (rsp->fqs_state) {
-       case RCU_GP_IDLE:
-       case RCU_GP_INIT:
-
-               break; /* grace period idle or initializing, ignore. */
-
-       case RCU_SAVE_DYNTICK:
-
-               raw_spin_unlock(&rnp->lock);  /* irqs remain disabled */
-
-               /* Record dyntick-idle state. */
-               force_qs_rnp(rsp, dyntick_save_progress_counter);
-               raw_spin_lock(&rnp->lock);  /* irqs already disabled */
-               if (rcu_gp_in_progress(rsp))
-                       rsp->fqs_state = RCU_FORCE_QS;
-               break;
-
-       case RCU_FORCE_QS:
-
-               /* Check dyntick-idle state, send IPI to laggarts. */
-               raw_spin_unlock(&rnp->lock);  /* irqs remain disabled */
-               force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
-
-               /* Leave state in case more forcing is required. */
-
-               raw_spin_lock(&rnp->lock);  /* irqs already disabled */
-               break;
+       bool ret;
+       struct rcu_node *rnp;
+       struct rcu_node *rnp_old = NULL;
+
+       /* Funnel through hierarchy to reduce memory contention. */
+       rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
+       for (; rnp != NULL; rnp = rnp->parent) {
+               ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
+                     !raw_spin_trylock(&rnp->fqslock);
+               if (rnp_old != NULL)
+                       raw_spin_unlock(&rnp_old->fqslock);
+               if (ret) {
+                       rsp->n_force_qs_lh++;
+                       return;
+               }
+               rnp_old = rnp;
        }
-       rsp->fqs_active = 0;
-       if (rsp->fqs_need_gp) {
-               raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
-               rsp->fqs_need_gp = 0;
-               rcu_start_gp(rsp, flags); /* releases rnp->lock */
-               trace_rcu_utilization("End fqs");
-               return;
+       /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
+
+       /* Reached the root of the rcu_node tree, acquire lock. */
+       raw_spin_lock_irqsave(&rnp_old->lock, flags);
+       raw_spin_unlock(&rnp_old->fqslock);
+       if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+               rsp->n_force_qs_lh++;
+               raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+               return;  /* Someone beat us to it. */
        }
-       raw_spin_unlock(&rnp->lock);  /* irqs remain disabled */
-unlock_fqs_ret:
-       raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
-       trace_rcu_utilization("End fqs");
+       rsp->gp_flags |= RCU_GP_FLAG_FQS;
+       raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+       wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
 }
 
 /*
@@ -1804,13 +1868,6 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 
        WARN_ON_ONCE(rdp->beenonline == 0);
 
-       /*
-        * If an RCU GP has gone long enough, go check for dyntick
-        * idle CPUs and, if needed, send resched IPIs.
-        */
-       if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
-               force_quiescent_state(rsp, 1);
-
        /*
         * Advance callbacks in response to end of earlier grace
         * period that some other CPU ended.
@@ -1838,6 +1895,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
 {
        struct rcu_state *rsp;
 
+       if (cpu_is_offline(smp_processor_id()))
+               return;
        trace_rcu_utilization("Start RCU core");
        for_each_rcu_flavor(rsp)
                __rcu_process_callbacks(rsp);
@@ -1909,12 +1968,11 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
                        rdp->blimit = LONG_MAX;
                        if (rsp->n_force_qs == rdp->n_force_qs_snap &&
                            *rdp->nxttail[RCU_DONE_TAIL] != head)
-                               force_quiescent_state(rsp, 0);
+                               force_quiescent_state(rsp);
                        rdp->n_force_qs_snap = rsp->n_force_qs;
                        rdp->qlen_last_fqs_check = rdp->qlen;
                }
-       } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
-               force_quiescent_state(rsp, 1);
+       }
 }
 
 static void
@@ -2195,17 +2253,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
        /* Is the RCU core waiting for a quiescent state from this CPU? */
        if (rcu_scheduler_fully_active &&
            rdp->qs_pending && !rdp->passed_quiesce) {
-
-               /*
-                * If force_quiescent_state() coming soon and this CPU
-                * needs a quiescent state, and this is either RCU-sched
-                * or RCU-bh, force a local reschedule.
-                */
                rdp->n_rp_qs_pending++;
-               if (!rdp->preemptible &&
-                   ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
-                                jiffies))
-                       set_need_resched();
        } else if (rdp->qs_pending && rdp->passed_quiesce) {
                rdp->n_rp_report_qs++;
                return 1;
@@ -2235,13 +2283,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
                return 1;
        }
 
-       /* Has an RCU GP gone long enough to send resched IPIs &c? */
-       if (rcu_gp_in_progress(rsp) &&
-           ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
-               rdp->n_rp_need_fqs++;
-               return 1;
-       }
-
        /* nothing to do */
        rdp->n_rp_need_nothing++;
        return 0;
@@ -2555,7 +2596,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
                        rdp->completed = rnp->completed;
                        rdp->passed_quiesce = 0;
                        rdp->qs_pending = 0;
-                       rdp->passed_quiesce_gpnum = rnp->gpnum - 1;
                        trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
                }
                raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
@@ -2626,6 +2666,28 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
+/*
+ * Spawn the kthread that handles this RCU flavor's grace periods.
+ */
+static int __init rcu_spawn_gp_kthread(void)
+{
+       unsigned long flags;
+       struct rcu_node *rnp;
+       struct rcu_state *rsp;
+       struct task_struct *t;
+
+       for_each_rcu_flavor(rsp) {
+               t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
+               BUG_ON(IS_ERR(t));
+               rnp = rcu_get_root(rsp);
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               rsp->gp_kthread = t;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       }
+       return 0;
+}
+early_initcall(rcu_spawn_gp_kthread);
+
 /*
  * This function is invoked towards the end of the scheduler's initialization
  * process.  Before this is called, the idle task might contain
@@ -2676,10 +2738,14 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
 static void __init rcu_init_one(struct rcu_state *rsp,
                struct rcu_data __percpu *rda)
 {
-       static char *buf[] = { "rcu_node_level_0",
-                              "rcu_node_level_1",
-                              "rcu_node_level_2",
-                              "rcu_node_level_3" };  /* Match MAX_RCU_LVLS */
+       static char *buf[] = { "rcu_node_0",
+                              "rcu_node_1",
+                              "rcu_node_2",
+                              "rcu_node_3" };  /* Match MAX_RCU_LVLS */
+       static char *fqs[] = { "rcu_node_fqs_0",
+                              "rcu_node_fqs_1",
+                              "rcu_node_fqs_2",
+                              "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
        int cpustride = 1;
        int i;
        int j;
@@ -2704,7 +2770,11 @@ static void __init rcu_init_one(struct rcu_state *rsp,
                        raw_spin_lock_init(&rnp->lock);
                        lockdep_set_class_and_name(&rnp->lock,
                                                   &rcu_node_class[i], buf[i]);
-                       rnp->gpnum = 0;
+                       raw_spin_lock_init(&rnp->fqslock);
+                       lockdep_set_class_and_name(&rnp->fqslock,
+                                                  &rcu_fqs_class[i], fqs[i]);
+                       rnp->gpnum = rsp->gpnum;
+                       rnp->completed = rsp->completed;
                        rnp->qsmask = 0;
                        rnp->qsmaskinit = 0;
                        rnp->grplo = j * cpustride;
@@ -2727,6 +2797,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        }
 
        rsp->rda = rda;
+       init_waitqueue_head(&rsp->gp_wq);
        rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)
This page took 0.033448 seconds and 5 git commands to generate.