Merge branches 'doc.2013.09.25b' and 'fixes.2013.09.23b' into HEAD
doc.2013.09.25b: Topic branch for documentation updates. fixes.2013.09.23b: Topic branch for miscellaneous fixes.
This commit is contained in:
commit
460aebac73
7 changed files with 119 additions and 51 deletions
|
@ -18,6 +18,21 @@
|
|||
* be used anywhere you would want to use a list_empty_rcu().
|
||||
*/
|
||||
|
||||
/*
|
||||
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
|
||||
* @list: list to be initialized
|
||||
*
|
||||
* You should instead use INIT_LIST_HEAD() for normal initialization and
|
||||
* cleanup tasks, when readers have no access to the list being initialized.
|
||||
* However, if the list being initialized is visible to readers, you
|
||||
* need to keep the compiler from being too mischievous.
|
||||
*/
|
||||
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
|
||||
{
|
||||
ACCESS_ONCE(list->next) = list;
|
||||
ACCESS_ONCE(list->prev) = list;
|
||||
}
|
||||
|
||||
/*
|
||||
* return the ->next pointer of a list_head in an rcu safe
|
||||
* way, we must not access it directly
|
||||
|
@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
|||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
/* "first" and "last" tracking list, so initialize it. */
|
||||
/*
|
||||
* "first" and "last" tracking list, so initialize it. RCU readers
|
||||
* have access to this list, so we must use INIT_LIST_HEAD_RCU()
|
||||
* instead of INIT_LIST_HEAD().
|
||||
*/
|
||||
|
||||
INIT_LIST_HEAD(list);
|
||||
INIT_LIST_HEAD_RCU(list);
|
||||
|
||||
/*
|
||||
* At this point, the list body still points to the source list.
|
||||
|
|
|
@ -122,4 +122,11 @@ int rcu_jiffies_till_stall_check(void);
|
|||
|
||||
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
|
||||
|
||||
/*
|
||||
* Strings used in tracepoints need to be exported via the
|
||||
* tracing system such that tools like perf and trace-cmd can
|
||||
* translate the string address pointers to actual text.
|
||||
*/
|
||||
#define TPS(x) tracepoint_string(x)
|
||||
|
||||
#endif /* __LINUX_RCU_H */
|
||||
|
|
|
@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
|
|||
#endif
|
||||
|
||||
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
|
||||
int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
|
||||
static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
|
||||
|
||||
module_param(rcu_cpu_stall_suppress, int, 0644);
|
||||
module_param(rcu_cpu_stall_timeout, int, 0644);
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/time.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
|
||||
#ifdef CONFIG_RCU_TRACE
|
||||
#include <trace/events/rcu.h>
|
||||
|
@ -58,16 +59,17 @@ static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
|||
static void rcu_idle_enter_common(long long newval)
|
||||
{
|
||||
if (newval) {
|
||||
RCU_TRACE(trace_rcu_dyntick("--=",
|
||||
RCU_TRACE(trace_rcu_dyntick(TPS("--="),
|
||||
rcu_dynticks_nesting, newval));
|
||||
rcu_dynticks_nesting = newval;
|
||||
return;
|
||||
}
|
||||
RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval));
|
||||
RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
|
||||
rcu_dynticks_nesting, newval));
|
||||
if (!is_idle_task(current)) {
|
||||
struct task_struct *idle = idle_task(smp_processor_id());
|
||||
|
||||
RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
|
||||
RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
|
||||
rcu_dynticks_nesting, newval));
|
||||
ftrace_dump(DUMP_ALL);
|
||||
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
||||
|
@ -120,15 +122,15 @@ EXPORT_SYMBOL_GPL(rcu_irq_exit);
|
|||
static void rcu_idle_exit_common(long long oldval)
|
||||
{
|
||||
if (oldval) {
|
||||
RCU_TRACE(trace_rcu_dyntick("++=",
|
||||
RCU_TRACE(trace_rcu_dyntick(TPS("++="),
|
||||
oldval, rcu_dynticks_nesting));
|
||||
return;
|
||||
}
|
||||
RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
|
||||
RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
|
||||
if (!is_idle_task(current)) {
|
||||
struct task_struct *idle = idle_task(smp_processor_id());
|
||||
|
||||
RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
|
||||
RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
|
||||
oldval, rcu_dynticks_nesting));
|
||||
ftrace_dump(DUMP_ALL);
|
||||
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
||||
|
@ -304,7 +306,8 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
|
|||
RCU_TRACE(cb_count++);
|
||||
}
|
||||
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
|
||||
RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
|
||||
RCU_TRACE(trace_rcu_batch_end(rcp->name,
|
||||
cb_count, 0, need_resched(),
|
||||
is_idle_task(current),
|
||||
false));
|
||||
}
|
||||
|
|
|
@ -61,13 +61,6 @@
|
|||
|
||||
#include "rcu.h"
|
||||
|
||||
/*
|
||||
* Strings used in tracepoints need to be exported via the
|
||||
* tracing system such that tools like perf and trace-cmd can
|
||||
* translate the string address pointers to actual text.
|
||||
*/
|
||||
#define TPS(x) tracepoint_string(x)
|
||||
|
||||
/* Data structures. */
|
||||
|
||||
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
|
||||
|
@ -222,7 +215,7 @@ void rcu_note_context_switch(int cpu)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
||||
|
||||
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
||||
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
||||
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
||||
.dynticks = ATOMIC_INIT(1),
|
||||
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
|
||||
|
@ -371,7 +364,8 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
|||
{
|
||||
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
|
||||
if (!user && !is_idle_task(current)) {
|
||||
struct task_struct *idle = idle_task(smp_processor_id());
|
||||
struct task_struct *idle __maybe_unused =
|
||||
idle_task(smp_processor_id());
|
||||
|
||||
trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
|
||||
ftrace_dump(DUMP_ORIG);
|
||||
|
@ -407,7 +401,7 @@ static void rcu_eqs_enter(bool user)
|
|||
long long oldval;
|
||||
struct rcu_dynticks *rdtp;
|
||||
|
||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
||||
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
|
||||
|
@ -435,7 +429,7 @@ void rcu_idle_enter(void)
|
|||
|
||||
local_irq_save(flags);
|
||||
rcu_eqs_enter(false);
|
||||
rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
|
||||
rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
||||
|
@ -478,7 +472,7 @@ void rcu_irq_exit(void)
|
|||
struct rcu_dynticks *rdtp;
|
||||
|
||||
local_irq_save(flags);
|
||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
rdtp->dynticks_nesting--;
|
||||
WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
|
||||
|
@ -508,7 +502,8 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
|
|||
rcu_cleanup_after_idle(smp_processor_id());
|
||||
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
|
||||
if (!user && !is_idle_task(current)) {
|
||||
struct task_struct *idle = idle_task(smp_processor_id());
|
||||
struct task_struct *idle __maybe_unused =
|
||||
idle_task(smp_processor_id());
|
||||
|
||||
trace_rcu_dyntick(TPS("Error on exit: not idle task"),
|
||||
oldval, rdtp->dynticks_nesting);
|
||||
|
@ -528,7 +523,7 @@ static void rcu_eqs_exit(bool user)
|
|||
struct rcu_dynticks *rdtp;
|
||||
long long oldval;
|
||||
|
||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
WARN_ON_ONCE(oldval < 0);
|
||||
if (oldval & DYNTICK_TASK_NEST_MASK)
|
||||
|
@ -555,7 +550,7 @@ void rcu_idle_exit(void)
|
|||
|
||||
local_irq_save(flags);
|
||||
rcu_eqs_exit(false);
|
||||
rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
|
||||
rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
||||
|
@ -599,7 +594,7 @@ void rcu_irq_enter(void)
|
|||
long long oldval;
|
||||
|
||||
local_irq_save(flags);
|
||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
rdtp->dynticks_nesting++;
|
||||
WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
|
||||
|
@ -620,7 +615,7 @@ void rcu_irq_enter(void)
|
|||
*/
|
||||
void rcu_nmi_enter(void)
|
||||
{
|
||||
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
|
||||
if (rdtp->dynticks_nmi_nesting == 0 &&
|
||||
(atomic_read(&rdtp->dynticks) & 0x1))
|
||||
|
@ -642,7 +637,7 @@ void rcu_nmi_enter(void)
|
|||
*/
|
||||
void rcu_nmi_exit(void)
|
||||
{
|
||||
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
|
||||
if (rdtp->dynticks_nmi_nesting == 0 ||
|
||||
--rdtp->dynticks_nmi_nesting != 0)
|
||||
|
@ -665,7 +660,7 @@ int rcu_is_cpu_idle(void)
|
|||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
|
||||
ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0;
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
@ -703,7 +698,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
|||
if (in_nmi())
|
||||
return 1;
|
||||
preempt_disable();
|
||||
rdp = &__get_cpu_var(rcu_sched_data);
|
||||
rdp = this_cpu_ptr(&rcu_sched_data);
|
||||
rnp = rdp->mynode;
|
||||
ret = (rdp->grpmask & rnp->qsmaskinit) ||
|
||||
!rcu_scheduler_fully_active;
|
||||
|
@ -723,7 +718,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
|||
*/
|
||||
static int rcu_is_cpu_rrupt_from_idle(void)
|
||||
{
|
||||
return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
|
||||
return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -802,8 +797,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
|||
|
||||
static void record_gp_stall_check_time(struct rcu_state *rsp)
|
||||
{
|
||||
rsp->gp_start = jiffies;
|
||||
rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
|
||||
unsigned long j = ACCESS_ONCE(jiffies);
|
||||
|
||||
rsp->gp_start = j;
|
||||
smp_wmb(); /* Record start time before stall time. */
|
||||
rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -932,17 +930,48 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|||
|
||||
static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
{
|
||||
unsigned long completed;
|
||||
unsigned long gpnum;
|
||||
unsigned long gps;
|
||||
unsigned long j;
|
||||
unsigned long js;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
if (rcu_cpu_stall_suppress)
|
||||
if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
|
||||
return;
|
||||
j = ACCESS_ONCE(jiffies);
|
||||
|
||||
/*
|
||||
* Lots of memory barriers to reject false positives.
|
||||
*
|
||||
* The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
|
||||
* then rsp->gp_start, and finally rsp->completed. These values
|
||||
* are updated in the opposite order with memory barriers (or
|
||||
* equivalent) during grace-period initialization and cleanup.
|
||||
* Now, a false positive can occur if we get an new value of
|
||||
* rsp->gp_start and a old value of rsp->jiffies_stall. But given
|
||||
* the memory barriers, the only way that this can happen is if one
|
||||
* grace period ends and another starts between these two fetches.
|
||||
* Detect this by comparing rsp->completed with the previous fetch
|
||||
* from rsp->gpnum.
|
||||
*
|
||||
* Given this check, comparisons of jiffies, rsp->jiffies_stall,
|
||||
* and rsp->gp_start suffice to forestall false positives.
|
||||
*/
|
||||
gpnum = ACCESS_ONCE(rsp->gpnum);
|
||||
smp_rmb(); /* Pick up ->gpnum first... */
|
||||
js = ACCESS_ONCE(rsp->jiffies_stall);
|
||||
smp_rmb(); /* ...then ->jiffies_stall before the rest... */
|
||||
gps = ACCESS_ONCE(rsp->gp_start);
|
||||
smp_rmb(); /* ...and finally ->gp_start before ->completed. */
|
||||
completed = ACCESS_ONCE(rsp->completed);
|
||||
if (ULONG_CMP_GE(completed, gpnum) ||
|
||||
ULONG_CMP_LT(j, js) ||
|
||||
ULONG_CMP_GE(gps, js))
|
||||
return; /* No stall or GP completed since entering function. */
|
||||
rnp = rdp->mynode;
|
||||
if (rcu_gp_in_progress(rsp) &&
|
||||
(ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
|
||||
(ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
|
||||
|
||||
/* We haven't checked in, so go dump stack. */
|
||||
print_cpu_stall(rsp);
|
||||
|
@ -1315,9 +1344,10 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|||
}
|
||||
|
||||
/* Advance to a new grace period and initialize state. */
|
||||
record_gp_stall_check_time(rsp);
|
||||
smp_wmb(); /* Record GP times before starting GP. */
|
||||
rsp->gpnum++;
|
||||
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
|
||||
record_gp_stall_check_time(rsp);
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
|
||||
/* Exclude any concurrent CPU-hotplug operations. */
|
||||
|
@ -1366,7 +1396,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|||
/*
|
||||
* Do one round of quiescent-state forcing.
|
||||
*/
|
||||
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
||||
static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
||||
{
|
||||
int fqs_state = fqs_state_in;
|
||||
bool isidle = false;
|
||||
|
@ -1452,7 +1482,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|||
rdp = this_cpu_ptr(rsp->rda);
|
||||
rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
|
||||
if (cpu_needs_another_gp(rsp, rdp))
|
||||
rsp->gp_flags = 1;
|
||||
rsp->gp_flags = RCU_GP_FLAG_INIT;
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
}
|
||||
|
||||
|
@ -2725,10 +2755,13 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
|
|||
|
||||
for_each_rcu_flavor(rsp) {
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
if (rdp->qlen != rdp->qlen_lazy)
|
||||
if (!rdp->nxtlist)
|
||||
continue;
|
||||
hc = true;
|
||||
if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
|
||||
al = false;
|
||||
if (rdp->nxtlist)
|
||||
hc = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (all_lazy)
|
||||
*all_lazy = al;
|
||||
|
@ -3295,8 +3328,8 @@ void __init rcu_init(void)
|
|||
|
||||
rcu_bootup_announce();
|
||||
rcu_init_geometry();
|
||||
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
||||
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
|
||||
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
||||
__rcu_init_preempt();
|
||||
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
||||
|
||||
|
|
|
@ -96,10 +96,15 @@ static void __init rcu_bootup_announce_oddness(void)
|
|||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
|
||||
#ifdef CONFIG_RCU_NOCB_CPU_ALL
|
||||
pr_info("\tOffload RCU callbacks from all CPUs\n");
|
||||
cpumask_setall(rcu_nocb_mask);
|
||||
cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
|
||||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
|
||||
if (have_rcu_nocb_mask) {
|
||||
if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
|
||||
pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
|
||||
cpumask_and(rcu_nocb_mask, cpu_possible_mask,
|
||||
rcu_nocb_mask);
|
||||
}
|
||||
cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
|
||||
pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
|
||||
if (rcu_nocb_poll)
|
||||
|
@ -660,7 +665,7 @@ static void rcu_preempt_check_callbacks(int cpu)
|
|||
|
||||
static void rcu_preempt_do_callbacks(void)
|
||||
{
|
||||
rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
|
||||
rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
@ -1332,7 +1337,7 @@ static void invoke_rcu_callbacks_kthread(void)
|
|||
*/
|
||||
static bool rcu_is_callbacks_kthread(void)
|
||||
{
|
||||
return __get_cpu_var(rcu_cpu_kthread_task) == current;
|
||||
return __this_cpu_read(rcu_cpu_kthread_task) == current;
|
||||
}
|
||||
|
||||
#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
|
||||
|
@ -1382,8 +1387,8 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
|||
|
||||
static void rcu_kthread_do_work(void)
|
||||
{
|
||||
rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
|
||||
rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
|
||||
rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
|
||||
rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
|
||||
rcu_preempt_do_callbacks();
|
||||
}
|
||||
|
||||
|
@ -1402,7 +1407,7 @@ static void rcu_cpu_kthread_park(unsigned int cpu)
|
|||
|
||||
static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
||||
{
|
||||
return __get_cpu_var(rcu_cpu_has_work);
|
||||
return __this_cpu_read(rcu_cpu_has_work);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1412,8 +1417,8 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
|||
*/
|
||||
static void rcu_cpu_kthread(unsigned int cpu)
|
||||
{
|
||||
unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
|
||||
char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
|
||||
unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
|
||||
char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
|
||||
int spincnt;
|
||||
|
||||
for (spincnt = 0; spincnt < 10; spincnt++) {
|
||||
|
@ -2108,7 +2113,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
|
|||
|
||||
/* If we are not being polled and there is a kthread, awaken it ... */
|
||||
t = ACCESS_ONCE(rdp->nocb_kthread);
|
||||
if (rcu_nocb_poll | !t)
|
||||
if (rcu_nocb_poll || !t)
|
||||
return;
|
||||
len = atomic_long_read(&rdp->nocb_q_count);
|
||||
if (old_rhpp == &rdp->nocb_head) {
|
||||
|
|
|
@ -736,6 +736,7 @@ static int do_mlockall(int flags)
|
|||
|
||||
/* Ignore errors */
|
||||
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
|
||||
cond_resched();
|
||||
}
|
||||
out:
|
||||
return 0;
|
||||
|
|
Loading…
Add table
Reference in a new issue