sched, timer: Convert usages of ACCESS_ONCE() in the scheduler to READ_ONCE()/WRITE_O...
authorJason Low <jason.low2@hp.com>
Tue, 28 Apr 2015 20:00:20 +0000 (13:00 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 8 May 2015 10:11:32 +0000 (12:11 +0200)
ACCESS_ONCE doesn't work reliably on non-scalar types. This patch removes
the rest of the existing usages of ACCESS_ONCE() in the scheduler, and use
the new READ_ONCE() and WRITE_ONCE() APIs as appropriate.

Signed-off-by: Jason Low <jason.low2@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Waiman Long <Waiman.Long@hp.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/1430251224-5764-2-git-send-email-jason.low2@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
12 files changed:
include/linux/sched.h
kernel/fork.c
kernel/sched/auto_group.c
kernel/sched/auto_group.h
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/wait.c
kernel/time/posix-cpu-timers.c

index fb650a2f4a73ba110d72d9d23b5eab767a757d8d..d70910355b20a93c88822098b810c51b8f4a33f4 100644 (file)
@@ -3085,13 +3085,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
 static inline unsigned long task_rlimit(const struct task_struct *tsk,
                unsigned int limit)
 {
-       return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
+       return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
 }
 
 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
                unsigned int limit)
 {
-       return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
+       return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
 }
 
 static inline unsigned long rlimit(unsigned int limit)
index 03c1eaaa6ef56f56a670488eaf572eb8c6f58d4e..47c37a411a620163b215a94b32f852572d7ef4b9 100644 (file)
@@ -1094,7 +1094,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
        /* Thread group counters. */
        thread_group_cputime_init(sig);
 
-       cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
+       cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
        if (cpu_limit != RLIM_INFINITY) {
                sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
                sig->cputimer.running = 1;
index 1a3b58d531b26a0ff0e32d0c09390a292dbf5012..750ed601ddf78e6dcdc5f10818c98b34b5feea3a 100644 (file)
@@ -139,7 +139,7 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
 
        p->signal->autogroup = autogroup_kref_get(ag);
 
-       if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
+       if (!READ_ONCE(sysctl_sched_autogroup_enabled))
                goto out;
 
        for_each_thread(p, t)
index 8bd047142816dea81894bb27ccc3c78a38ac3d61..890c95f2587a4d8c530c1a5df69eef8a65e5eaf7 100644 (file)
@@ -29,7 +29,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
 static inline struct task_group *
 autogroup_task_group(struct task_struct *p, struct task_group *tg)
 {
-       int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
+       int enabled = READ_ONCE(sysctl_sched_autogroup_enabled);
 
        if (enabled && task_wants_autogroup(p, tg))
                return p->signal->autogroup->tg;
index 46a5d6f0520833a27226b3cd870646203c38ebe5..22b53c863ef3f9606788852fa73c794d090cc88e 100644 (file)
@@ -511,7 +511,7 @@ static bool set_nr_and_not_polling(struct task_struct *p)
 static bool set_nr_if_polling(struct task_struct *p)
 {
        struct thread_info *ti = task_thread_info(p);
-       typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags);
+       typeof(ti->flags) old, val = READ_ONCE(ti->flags);
 
        for (;;) {
                if (!(val & _TIF_POLLING_NRFLAG))
@@ -2526,7 +2526,7 @@ void scheduler_tick(void)
 u64 scheduler_tick_max_deferment(void)
 {
        struct rq *rq = this_rq();
-       unsigned long next, now = ACCESS_ONCE(jiffies);
+       unsigned long next, now = READ_ONCE(jiffies);
 
        next = rq->last_sched_tick + HZ;
 
index 8394b1ee600c38ba6e9144a6326369b6ef0cdacd..f5a64ffad176f12b01381cb1dc2e25a05f02508d 100644 (file)
@@ -567,7 +567,7 @@ static void cputime_advance(cputime_t *counter, cputime_t new)
 {
        cputime_t old;
 
-       while (new > (old = ACCESS_ONCE(*counter)))
+       while (new > (old = READ_ONCE(*counter)))
                cmpxchg_cputime(counter, old, new);
 }
 
index 5e95145088fd37b3d07ccac66c3cd58f7effe10a..890ce951c71713ab8dead85a62e5ad8104686bc1 100644 (file)
@@ -995,7 +995,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
        rq = cpu_rq(cpu);
 
        rcu_read_lock();
-       curr = ACCESS_ONCE(rq->curr); /* unlocked access */
+       curr = READ_ONCE(rq->curr); /* unlocked access */
 
        /*
         * If we are dealing with a -deadline task, we must
index 4bc6013886ecaac3ed1f6b1d214142f14ae20214..d6915a038d8a08780e1331e4b6386e39e19e52b4 100644 (file)
@@ -834,7 +834,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
 
 static unsigned int task_scan_min(struct task_struct *p)
 {
-       unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
+       unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
        unsigned int scan, floor;
        unsigned int windows = 1;
 
@@ -1794,7 +1794,7 @@ static void task_numa_placement(struct task_struct *p)
        u64 runtime, period;
        spinlock_t *group_lock = NULL;
 
-       seq = ACCESS_ONCE(p->mm->numa_scan_seq);
+       seq = READ_ONCE(p->mm->numa_scan_seq);
        if (p->numa_scan_seq == seq)
                return;
        p->numa_scan_seq = seq;
@@ -1938,7 +1938,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        }
 
        rcu_read_lock();
-       tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
+       tsk = READ_ONCE(cpu_rq(cpu)->curr);
 
        if (!cpupid_match_pid(tsk, cpupid))
                goto no_join;
@@ -2107,7 +2107,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 
 static void reset_ptenuma_scan(struct task_struct *p)
 {
-       ACCESS_ONCE(p->mm->numa_scan_seq)++;
+       WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
        p->mm->numa_scan_offset = 0;
 }
 
@@ -4451,7 +4451,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
  */
 static void update_idle_cpu_load(struct rq *this_rq)
 {
-       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
+       unsigned long curr_jiffies = READ_ONCE(jiffies);
        unsigned long load = this_rq->cfs.runnable_load_avg;
        unsigned long pending_updates;
 
@@ -4473,7 +4473,7 @@ static void update_idle_cpu_load(struct rq *this_rq)
 void update_cpu_load_nohz(void)
 {
        struct rq *this_rq = this_rq();
-       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
+       unsigned long curr_jiffies = READ_ONCE(jiffies);
        unsigned long pending_updates;
 
        if (curr_jiffies == this_rq->last_load_update_tick)
@@ -4558,7 +4558,7 @@ static unsigned long capacity_orig_of(int cpu)
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
+       unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
        unsigned long load_avg = rq->cfs.runnable_load_avg;
 
        if (nr_running)
@@ -6220,8 +6220,8 @@ static unsigned long scale_rt_capacity(int cpu)
         * Since we're reading these variables without serialization make sure
         * we read them once before doing sanity checks on them.
         */
-       age_stamp = ACCESS_ONCE(rq->age_stamp);
-       avg = ACCESS_ONCE(rq->rt_avg);
+       age_stamp = READ_ONCE(rq->age_stamp);
+       avg = READ_ONCE(rq->rt_avg);
        delta = __rq_clock_broken(rq) - age_stamp;
 
        if (unlikely(delta < 0))
index 575da76a3874a8c1b2ddd0f518e5ecea7a805262..560d2fa623c311c9aa5ad51ead007e1b27c6fa6c 100644 (file)
@@ -1323,7 +1323,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
        rq = cpu_rq(cpu);
 
        rcu_read_lock();
-       curr = ACCESS_ONCE(rq->curr); /* unlocked access */
+       curr = READ_ONCE(rq->curr); /* unlocked access */
 
        /*
         * If the current task on @p's runqueue is an RT task, then
index 09ed26a89f31186ae1ce8a8d9c95c59c37af4401..d85455539d5cd8424c281bf6487ee814e91708e3 100644 (file)
@@ -713,7 +713,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
 static inline u64 __rq_clock_broken(struct rq *rq)
 {
-       return ACCESS_ONCE(rq->clock);
+       return READ_ONCE(rq->clock);
 }
 
 static inline u64 rq_clock(struct rq *rq)
index 852143a79f367fb75fd73f6c60e2dcaf1ae77895..2ccec988d6b7c4c11f8355bf4e5286ac422c3fcf 100644 (file)
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bit_wait_io);
 
 __sched int bit_wait_timeout(struct wait_bit_key *word)
 {
-       unsigned long now = ACCESS_ONCE(jiffies);
+       unsigned long now = READ_ONCE(jiffies);
        if (signal_pending_state(current->state, current))
                return 1;
        if (time_after_eq(now, word->timeout))
@@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
 
 __sched int bit_wait_io_timeout(struct wait_bit_key *word)
 {
-       unsigned long now = ACCESS_ONCE(jiffies);
+       unsigned long now = READ_ONCE(jiffies);
        if (signal_pending_state(current->state, current))
                return 1;
        if (time_after_eq(now, word->timeout))
index 0075da74abf0c5f55c823f393e96b99d79b05e13..e072d982f64cfb45db4ca8ed2922df16ff0c66d6 100644 (file)
@@ -852,10 +852,10 @@ static void check_thread_timers(struct task_struct *tsk,
        /*
         * Check for the special case thread timers.
         */
-       soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
+       soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
        if (soft != RLIM_INFINITY) {
                unsigned long hard =
-                       ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
+                       READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
 
                if (hard != RLIM_INFINITY &&
                    tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
@@ -958,11 +958,11 @@ static void check_process_timers(struct task_struct *tsk,
                         SIGPROF);
        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
                         SIGVTALRM);
-       soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
+       soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
        if (soft != RLIM_INFINITY) {
                unsigned long psecs = cputime_to_secs(ptime);
                unsigned long hard =
-                       ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
+                       READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
                cputime_t x;
                if (psecs >= hard) {
                        /*
This page took 0.037001 seconds and 5 git commands to generate.