sched, timer: Convert usages of ACCESS_ONCE() in the scheduler to READ_ONCE()/WRITE_ONCE()
ACCESS_ONCE doesn't work reliably on non-scalar types. This patch removes the rest of the existing usages of ACCESS_ONCE() in the scheduler, and use the new READ_ONCE() and WRITE_ONCE() APIs as appropriate. Signed-off-by: Jason Low <jason.low2@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Waiman Long <Waiman.Long@hp.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Aswin Chandramouleeswaran <aswin@hp.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/1430251224-5764-2-git-send-email-jason.low2@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ce2f5fe463
commit
316c1608d1
12 changed files with 26 additions and 26 deletions
|
@ -3085,13 +3085,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
|
|||
static inline unsigned long task_rlimit(const struct task_struct *tsk,
|
||||
unsigned int limit)
|
||||
{
|
||||
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
|
||||
return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
|
||||
}
|
||||
|
||||
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
|
||||
unsigned int limit)
|
||||
{
|
||||
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
|
||||
return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
|
||||
}
|
||||
|
||||
static inline unsigned long rlimit(unsigned int limit)
|
||||
|
|
|
@ -1094,7 +1094,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
|
|||
/* Thread group counters. */
|
||||
thread_group_cputime_init(sig);
|
||||
|
||||
cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
|
||||
cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
|
||||
if (cpu_limit != RLIM_INFINITY) {
|
||||
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
|
||||
sig->cputimer.running = 1;
|
||||
|
|
|
@ -139,7 +139,7 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
|||
|
||||
p->signal->autogroup = autogroup_kref_get(ag);
|
||||
|
||||
if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
|
||||
if (!READ_ONCE(sysctl_sched_autogroup_enabled))
|
||||
goto out;
|
||||
|
||||
for_each_thread(p, t)
|
||||
|
|
|
@ -29,7 +29,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
|
|||
static inline struct task_group *
|
||||
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
||||
{
|
||||
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
|
||||
int enabled = READ_ONCE(sysctl_sched_autogroup_enabled);
|
||||
|
||||
if (enabled && task_wants_autogroup(p, tg))
|
||||
return p->signal->autogroup->tg;
|
||||
|
|
|
@ -511,7 +511,7 @@ static bool set_nr_and_not_polling(struct task_struct *p)
|
|||
static bool set_nr_if_polling(struct task_struct *p)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags);
|
||||
typeof(ti->flags) old, val = READ_ONCE(ti->flags);
|
||||
|
||||
for (;;) {
|
||||
if (!(val & _TIF_POLLING_NRFLAG))
|
||||
|
@ -2526,7 +2526,7 @@ void scheduler_tick(void)
|
|||
u64 scheduler_tick_max_deferment(void)
|
||||
{
|
||||
struct rq *rq = this_rq();
|
||||
unsigned long next, now = ACCESS_ONCE(jiffies);
|
||||
unsigned long next, now = READ_ONCE(jiffies);
|
||||
|
||||
next = rq->last_sched_tick + HZ;
|
||||
|
||||
|
|
|
@ -567,7 +567,7 @@ static void cputime_advance(cputime_t *counter, cputime_t new)
|
|||
{
|
||||
cputime_t old;
|
||||
|
||||
while (new > (old = ACCESS_ONCE(*counter)))
|
||||
while (new > (old = READ_ONCE(*counter)))
|
||||
cmpxchg_cputime(counter, old, new);
|
||||
}
|
||||
|
||||
|
|
|
@ -995,7 +995,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|||
rq = cpu_rq(cpu);
|
||||
|
||||
rcu_read_lock();
|
||||
curr = ACCESS_ONCE(rq->curr); /* unlocked access */
|
||||
curr = READ_ONCE(rq->curr); /* unlocked access */
|
||||
|
||||
/*
|
||||
* If we are dealing with a -deadline task, we must
|
||||
|
|
|
@ -834,7 +834,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
|
|||
|
||||
static unsigned int task_scan_min(struct task_struct *p)
|
||||
{
|
||||
unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
|
||||
unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
|
||||
unsigned int scan, floor;
|
||||
unsigned int windows = 1;
|
||||
|
||||
|
@ -1794,7 +1794,7 @@ static void task_numa_placement(struct task_struct *p)
|
|||
u64 runtime, period;
|
||||
spinlock_t *group_lock = NULL;
|
||||
|
||||
seq = ACCESS_ONCE(p->mm->numa_scan_seq);
|
||||
seq = READ_ONCE(p->mm->numa_scan_seq);
|
||||
if (p->numa_scan_seq == seq)
|
||||
return;
|
||||
p->numa_scan_seq = seq;
|
||||
|
@ -1938,7 +1938,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
|
|||
}
|
||||
|
||||
rcu_read_lock();
|
||||
tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
|
||||
tsk = READ_ONCE(cpu_rq(cpu)->curr);
|
||||
|
||||
if (!cpupid_match_pid(tsk, cpupid))
|
||||
goto no_join;
|
||||
|
@ -2107,7 +2107,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
|
|||
|
||||
static void reset_ptenuma_scan(struct task_struct *p)
|
||||
{
|
||||
ACCESS_ONCE(p->mm->numa_scan_seq)++;
|
||||
WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
|
||||
p->mm->numa_scan_offset = 0;
|
||||
}
|
||||
|
||||
|
@ -4451,7 +4451,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
|
|||
*/
|
||||
static void update_idle_cpu_load(struct rq *this_rq)
|
||||
{
|
||||
unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
|
||||
unsigned long curr_jiffies = READ_ONCE(jiffies);
|
||||
unsigned long load = this_rq->cfs.runnable_load_avg;
|
||||
unsigned long pending_updates;
|
||||
|
||||
|
@ -4473,7 +4473,7 @@ static void update_idle_cpu_load(struct rq *this_rq)
|
|||
void update_cpu_load_nohz(void)
|
||||
{
|
||||
struct rq *this_rq = this_rq();
|
||||
unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
|
||||
unsigned long curr_jiffies = READ_ONCE(jiffies);
|
||||
unsigned long pending_updates;
|
||||
|
||||
if (curr_jiffies == this_rq->last_load_update_tick)
|
||||
|
@ -4558,7 +4558,7 @@ static unsigned long capacity_orig_of(int cpu)
|
|||
static unsigned long cpu_avg_load_per_task(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
|
||||
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
|
||||
unsigned long load_avg = rq->cfs.runnable_load_avg;
|
||||
|
||||
if (nr_running)
|
||||
|
@ -6220,8 +6220,8 @@ static unsigned long scale_rt_capacity(int cpu)
|
|||
* Since we're reading these variables without serialization make sure
|
||||
* we read them once before doing sanity checks on them.
|
||||
*/
|
||||
age_stamp = ACCESS_ONCE(rq->age_stamp);
|
||||
avg = ACCESS_ONCE(rq->rt_avg);
|
||||
age_stamp = READ_ONCE(rq->age_stamp);
|
||||
avg = READ_ONCE(rq->rt_avg);
|
||||
delta = __rq_clock_broken(rq) - age_stamp;
|
||||
|
||||
if (unlikely(delta < 0))
|
||||
|
|
|
@ -1323,7 +1323,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|||
rq = cpu_rq(cpu);
|
||||
|
||||
rcu_read_lock();
|
||||
curr = ACCESS_ONCE(rq->curr); /* unlocked access */
|
||||
curr = READ_ONCE(rq->curr); /* unlocked access */
|
||||
|
||||
/*
|
||||
* If the current task on @p's runqueue is an RT task, then
|
||||
|
|
|
@ -713,7 +713,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
|||
|
||||
static inline u64 __rq_clock_broken(struct rq *rq)
|
||||
{
|
||||
return ACCESS_ONCE(rq->clock);
|
||||
return READ_ONCE(rq->clock);
|
||||
}
|
||||
|
||||
static inline u64 rq_clock(struct rq *rq)
|
||||
|
|
|
@ -601,7 +601,7 @@ EXPORT_SYMBOL(bit_wait_io);
|
|||
|
||||
__sched int bit_wait_timeout(struct wait_bit_key *word)
|
||||
{
|
||||
unsigned long now = ACCESS_ONCE(jiffies);
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
if (signal_pending_state(current->state, current))
|
||||
return 1;
|
||||
if (time_after_eq(now, word->timeout))
|
||||
|
@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
|
|||
|
||||
__sched int bit_wait_io_timeout(struct wait_bit_key *word)
|
||||
{
|
||||
unsigned long now = ACCESS_ONCE(jiffies);
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
if (signal_pending_state(current->state, current))
|
||||
return 1;
|
||||
if (time_after_eq(now, word->timeout))
|
||||
|
|
|
@ -852,10 +852,10 @@ static void check_thread_timers(struct task_struct *tsk,
|
|||
/*
|
||||
* Check for the special case thread timers.
|
||||
*/
|
||||
soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
|
||||
soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
|
||||
if (soft != RLIM_INFINITY) {
|
||||
unsigned long hard =
|
||||
ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
|
||||
READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
|
||||
|
||||
if (hard != RLIM_INFINITY &&
|
||||
tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
|
||||
|
@ -958,11 +958,11 @@ static void check_process_timers(struct task_struct *tsk,
|
|||
SIGPROF);
|
||||
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
|
||||
SIGVTALRM);
|
||||
soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
|
||||
soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
|
||||
if (soft != RLIM_INFINITY) {
|
||||
unsigned long psecs = cputime_to_secs(ptime);
|
||||
unsigned long hard =
|
||||
ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
|
||||
READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
|
||||
cputime_t x;
|
||||
if (psecs >= hard) {
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue