rcu: delete __cpuinit usage from all rcu files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0
("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
This removes all the drivers/rcu uses of the __cpuinit macros
from all C files.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Josh Triplett <josh@freedesktop.org>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
This commit is contained in:
parent
013dbb325b
commit
49fb4c6290
4 changed files with 11 additions and 11 deletions
|
@ -1476,7 +1476,7 @@ rcu_torture_shutdown(void *arg)
|
|||
* Execute random CPU-hotplug operations at the interval specified
|
||||
* by the onoff_interval.
|
||||
*/
|
||||
static int __cpuinit
|
||||
static int
|
||||
rcu_torture_onoff(void *arg)
|
||||
{
|
||||
int cpu;
|
||||
|
@ -1558,7 +1558,7 @@ rcu_torture_onoff(void *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __cpuinit
|
||||
static int
|
||||
rcu_torture_onoff_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1601,7 +1601,7 @@ static void rcu_torture_onoff_cleanup(void)
|
|||
* CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
|
||||
* induces a CPU stall for the time specified by stall_cpu.
|
||||
*/
|
||||
static int __cpuinit rcu_torture_stall(void *args)
|
||||
static int rcu_torture_stall(void *args)
|
||||
{
|
||||
unsigned long stop_at;
|
||||
|
||||
|
|
|
@ -2910,7 +2910,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|||
* can accept some slop in the rsp->completed access due to the fact
|
||||
* that this CPU cannot possibly have any RCU callbacks in flight yet.
|
||||
*/
|
||||
static void __cpuinit
|
||||
static void
|
||||
rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -2962,7 +2962,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
|
|||
mutex_unlock(&rsp->onoff_mutex);
|
||||
}
|
||||
|
||||
static void __cpuinit rcu_prepare_cpu(int cpu)
|
||||
static void rcu_prepare_cpu(int cpu)
|
||||
{
|
||||
struct rcu_state *rsp;
|
||||
|
||||
|
@ -2974,7 +2974,7 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
|
|||
/*
|
||||
* Handle CPU online/offline notification events.
|
||||
*/
|
||||
static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
|
||||
static int rcu_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
long cpu = (long)hcpu;
|
||||
|
|
|
@ -521,10 +521,10 @@ static void invoke_rcu_callbacks_kthread(void);
|
|||
static bool rcu_is_callbacks_kthread(void);
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
static void rcu_preempt_do_callbacks(void);
|
||||
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp);
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
static void __cpuinit rcu_prepare_kthreads(int cpu);
|
||||
static void rcu_prepare_kthreads(int cpu);
|
||||
static void rcu_cleanup_after_idle(int cpu);
|
||||
static void rcu_prepare_for_idle(int cpu);
|
||||
static void rcu_idle_count_callbacks_posted(void);
|
||||
|
|
|
@ -1352,7 +1352,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
|
|||
* already exist. We only create this kthread for preemptible RCU.
|
||||
* Returns zero if all is well, a negated errno otherwise.
|
||||
*/
|
||||
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp)
|
||||
{
|
||||
int rnp_index = rnp - &rsp->node[0];
|
||||
|
@ -1507,7 +1507,7 @@ static int __init rcu_spawn_kthreads(void)
|
|||
}
|
||||
early_initcall(rcu_spawn_kthreads);
|
||||
|
||||
static void __cpuinit rcu_prepare_kthreads(int cpu)
|
||||
static void rcu_prepare_kthreads(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
@ -1549,7 +1549,7 @@ static int __init rcu_scheduler_really_started(void)
|
|||
}
|
||||
early_initcall(rcu_scheduler_really_started);
|
||||
|
||||
static void __cpuinit rcu_prepare_kthreads(int cpu)
|
||||
static void rcu_prepare_kthreads(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue