on_each_cpu(): kill unused 'retry' parameter
It's not even passed on to smp_call_function() anymore, since that was removed. So kill it. Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
8691e5a8f6
commit
15c8b6c1aa
48 changed files with 84 additions and 84 deletions
|
@ -160,7 +160,7 @@ common_shutdown(int mode, char *restart_cmd)
|
|||
struct halt_info args;
|
||||
args.mode = mode;
|
||||
args.restart_cmd = restart_cmd;
|
||||
on_each_cpu(common_shutdown_1, &args, 1, 0);
|
||||
on_each_cpu(common_shutdown_1, &args, 0);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -657,7 +657,7 @@ void
|
|||
smp_imb(void)
|
||||
{
|
||||
/* Must wait other processors to flush their icache before continue. */
|
||||
if (on_each_cpu(ipi_imb, NULL, 1, 1))
|
||||
if (on_each_cpu(ipi_imb, NULL, 1))
|
||||
printk(KERN_CRIT "smp_imb: timed out\n");
|
||||
}
|
||||
EXPORT_SYMBOL(smp_imb);
|
||||
|
@ -673,7 +673,7 @@ flush_tlb_all(void)
|
|||
{
|
||||
/* Although we don't have any data to pass, we do want to
|
||||
synchronize with the other processors. */
|
||||
if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
|
||||
if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
|
||||
printk(KERN_CRIT "flush_tlb_all: timed out\n");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -604,7 +604,7 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
|
|||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1);
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
|
@ -631,7 +631,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
|
|||
|
||||
ta.ta_start = kaddr;
|
||||
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1);
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
|
@ -654,5 +654,5 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1);
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
}
|
||||
|
|
|
@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
|
|||
static void
|
||||
ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
|
||||
{
|
||||
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
|
||||
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
|
|||
static void
|
||||
ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
|
||||
{
|
||||
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
|
||||
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
|
|||
}
|
||||
|
||||
/* save the current system wide pmu states */
|
||||
ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
|
||||
ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
|
||||
if (ret) {
|
||||
DPRINT(("on_each_cpu() failed: %d\n", ret));
|
||||
goto cleanup_reserve;
|
||||
|
@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
|
|||
|
||||
pfm_alt_intr_handler = NULL;
|
||||
|
||||
ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
|
||||
ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
|
||||
if (ret) {
|
||||
DPRINT(("on_each_cpu() failed: %d\n", ret));
|
||||
}
|
||||
|
|
|
@ -285,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
|
|||
void
|
||||
smp_flush_tlb_all (void)
|
||||
{
|
||||
on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
|
||||
on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -308,7 +308,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
|
|||
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
|
||||
* rather trivial.
|
||||
*/
|
||||
on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
|
||||
on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
|
||||
}
|
||||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
|
|
|
@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args)
|
|||
|
||||
static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
|
||||
{
|
||||
on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1);
|
||||
on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args)
|
|||
|
||||
static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
|
||||
{
|
||||
on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
|
||||
on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1);
|
||||
}
|
||||
|
||||
static struct irq_chip rm9k_irq_controller = {
|
||||
|
|
|
@ -246,7 +246,7 @@ static void flush_tlb_all_ipi(void *info)
|
|||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
|
||||
on_each_cpu(flush_tlb_all_ipi, NULL, 1);
|
||||
}
|
||||
|
||||
static void flush_tlb_mm_ipi(void *mm)
|
||||
|
@ -366,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|||
.addr2 = end,
|
||||
};
|
||||
|
||||
on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
|
||||
on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
|
||||
}
|
||||
|
||||
static void flush_tlb_page_ipi(void *info)
|
||||
|
|
|
@ -27,7 +27,7 @@ static int op_mips_setup(void)
|
|||
model->reg_setup(ctr);
|
||||
|
||||
/* Configure the registers on all cpus. */
|
||||
on_each_cpu(model->cpu_setup, NULL, 0, 1);
|
||||
on_each_cpu(model->cpu_setup, NULL, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root)
|
|||
|
||||
static int op_mips_start(void)
|
||||
{
|
||||
on_each_cpu(model->cpu_start, NULL, 0, 1);
|
||||
on_each_cpu(model->cpu_start, NULL, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ static int op_mips_start(void)
|
|||
static void op_mips_stop(void)
|
||||
{
|
||||
/* Disable performance monitoring for all counters. */
|
||||
on_each_cpu(model->cpu_stop, NULL, 0, 1);
|
||||
on_each_cpu(model->cpu_stop, NULL, 1);
|
||||
}
|
||||
|
||||
int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||
|
|
|
@ -51,12 +51,12 @@ static struct pdc_btlb_info btlb_info __read_mostly;
|
|||
void
|
||||
flush_data_cache(void)
|
||||
{
|
||||
on_each_cpu(flush_data_cache_local, NULL, 1, 1);
|
||||
on_each_cpu(flush_data_cache_local, NULL, 1);
|
||||
}
|
||||
void
|
||||
flush_instruction_cache(void)
|
||||
{
|
||||
on_each_cpu(flush_instruction_cache_local, NULL, 1, 1);
|
||||
on_each_cpu(flush_instruction_cache_local, NULL, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -515,7 +515,7 @@ static void cacheflush_h_tmp_function(void *dummy)
|
|||
|
||||
void flush_cache_all(void)
|
||||
{
|
||||
on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
|
||||
on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
|
||||
}
|
||||
|
||||
void flush_cache_mm(struct mm_struct *mm)
|
||||
|
|
|
@ -292,7 +292,7 @@ void arch_send_call_function_single_ipi(int cpu)
|
|||
void
|
||||
smp_flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
|
||||
on_each_cpu(flush_tlb_all_local, NULL, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1053,7 +1053,7 @@ void flush_tlb_all(void)
|
|||
do_recycle++;
|
||||
}
|
||||
spin_unlock(&sid_lock);
|
||||
on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
|
||||
on_each_cpu(flush_tlb_all_local, NULL, 1);
|
||||
if (do_recycle) {
|
||||
spin_lock(&sid_lock);
|
||||
recycle_sids(recycle_ndirty,recycle_dirty_array);
|
||||
|
|
|
@ -747,7 +747,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
|
|||
/* Call function on all CPUs. One of us will make the
|
||||
* rtas call
|
||||
*/
|
||||
if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0))
|
||||
if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
|
||||
data.error = -EINVAL;
|
||||
|
||||
wait_for_completion(&done);
|
||||
|
|
|
@ -192,7 +192,7 @@ static void tau_timeout_smp(unsigned long unused)
|
|||
|
||||
/* schedule ourselves to be run again */
|
||||
mod_timer(&tau_timer, jiffies + shrink_timer) ;
|
||||
on_each_cpu(tau_timeout, NULL, 1, 0);
|
||||
on_each_cpu(tau_timeout, NULL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -234,7 +234,7 @@ int __init TAU_init(void)
|
|||
tau_timer.expires = jiffies + shrink_timer;
|
||||
add_timer(&tau_timer);
|
||||
|
||||
on_each_cpu(TAU_init_smp, NULL, 1, 0);
|
||||
on_each_cpu(TAU_init_smp, NULL, 0);
|
||||
|
||||
printk("Thermal assist unit ");
|
||||
#ifdef CONFIG_TAU_INT
|
||||
|
|
|
@ -322,7 +322,7 @@ void snapshot_timebases(void)
|
|||
{
|
||||
if (!cpu_has_feature(CPU_FTR_PURR))
|
||||
return;
|
||||
on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
|
||||
on_each_cpu(snapshot_tb_and_purr, NULL, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -218,7 +218,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
|
|||
mb();
|
||||
|
||||
/* XXX this is sub-optimal but will do for now */
|
||||
on_each_cpu(slice_flush_segments, mm, 0, 1);
|
||||
on_each_cpu(slice_flush_segments, mm, 1);
|
||||
#ifdef CONFIG_SPU_BASE
|
||||
spu_flush_all_slbs(mm);
|
||||
#endif
|
||||
|
|
|
@ -65,7 +65,7 @@ static int op_powerpc_setup(void)
|
|||
|
||||
/* Configure the registers on all cpus. If an error occurs on one
|
||||
* of the cpus, op_per_cpu_rc will be set to the error */
|
||||
on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1);
|
||||
on_each_cpu(op_powerpc_cpu_setup, NULL, 1);
|
||||
|
||||
out: if (op_per_cpu_rc) {
|
||||
/* error on setup release the performance counter hardware */
|
||||
|
@ -100,7 +100,7 @@ static int op_powerpc_start(void)
|
|||
if (model->global_start)
|
||||
return model->global_start(ctr);
|
||||
if (model->start) {
|
||||
on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
|
||||
on_each_cpu(op_powerpc_cpu_start, NULL, 1);
|
||||
return op_per_cpu_rc;
|
||||
}
|
||||
return -EIO; /* No start function is defined for this
|
||||
|
@ -115,7 +115,7 @@ static inline void op_powerpc_cpu_stop(void *dummy)
|
|||
static void op_powerpc_stop(void)
|
||||
{
|
||||
if (model->stop)
|
||||
on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
|
||||
on_each_cpu(op_powerpc_cpu_stop, NULL, 1);
|
||||
if (model->global_stop)
|
||||
model->global_stop();
|
||||
}
|
||||
|
|
|
@ -299,7 +299,7 @@ static void smp_ptlb_callback(void *info)
|
|||
|
||||
void smp_ptlb_all(void)
|
||||
{
|
||||
on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
|
||||
on_each_cpu(smp_ptlb_callback, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_ptlb_all);
|
||||
#endif /* ! CONFIG_64BIT */
|
||||
|
@ -347,7 +347,7 @@ void smp_ctl_set_bit(int cr, int bit)
|
|||
memset(&parms.orvals, 0, sizeof(parms.orvals));
|
||||
memset(&parms.andvals, 0xff, sizeof(parms.andvals));
|
||||
parms.orvals[cr] = 1 << bit;
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_ctl_set_bit);
|
||||
|
||||
|
@ -361,7 +361,7 @@ void smp_ctl_clear_bit(int cr, int bit)
|
|||
memset(&parms.orvals, 0, sizeof(parms.orvals));
|
||||
memset(&parms.andvals, 0xff, sizeof(parms.andvals));
|
||||
parms.andvals[cr] = ~(1L << bit);
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_ctl_clear_bit);
|
||||
|
||||
|
|
|
@ -909,7 +909,7 @@ static void etr_work_fn(struct work_struct *work)
|
|||
if (!eacr.ea) {
|
||||
/* Both ports offline. Reset everything. */
|
||||
eacr.dp = eacr.es = eacr.sl = 0;
|
||||
on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
|
||||
on_each_cpu(etr_disable_sync_clock, NULL, 1);
|
||||
del_timer_sync(&etr_timer);
|
||||
etr_update_eacr(eacr);
|
||||
set_bit(ETR_FLAG_EACCES, &etr_flags);
|
||||
|
|
|
@ -197,7 +197,7 @@ static void flush_tlb_all_ipi(void *info)
|
|||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
|
||||
on_each_cpu(flush_tlb_all_ipi, 0, 1);
|
||||
}
|
||||
|
||||
static void flush_tlb_mm_ipi(void *mm)
|
||||
|
@ -284,7 +284,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|||
|
||||
fd.addr1 = start;
|
||||
fd.addr2 = end;
|
||||
on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
|
||||
on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
|
||||
}
|
||||
|
||||
static void flush_tlb_page_ipi(void *info)
|
||||
|
|
|
@ -344,7 +344,7 @@ void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
|||
* also executing in this address space.
|
||||
*/
|
||||
mm->context.sparc64_ctx_val = ctx;
|
||||
on_each_cpu(context_reload, mm, 0, 0);
|
||||
on_each_cpu(context_reload, mm, 0);
|
||||
}
|
||||
spin_unlock(&ctx_alloc_lock);
|
||||
}
|
||||
|
|
|
@ -363,7 +363,7 @@ static void mcheck_check_cpu(void *info)
|
|||
|
||||
static void mcheck_timer(struct work_struct *work)
|
||||
{
|
||||
on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
|
||||
on_each_cpu(mcheck_check_cpu, NULL, 1);
|
||||
|
||||
/*
|
||||
* Alert userspace if needed. If we logged an MCE, reduce the
|
||||
|
@ -612,7 +612,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
|
|||
* Collect entries that were still getting written before the
|
||||
* synchronize.
|
||||
*/
|
||||
on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
|
||||
on_each_cpu(collect_tscs, cpu_tsc, 1);
|
||||
for (i = next; i < MCE_LOG_LEN; i++) {
|
||||
if (mcelog.entry[i].finished &&
|
||||
mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
|
||||
|
@ -737,7 +737,7 @@ static void mce_restart(void)
|
|||
if (next_interval)
|
||||
cancel_delayed_work(&mcheck_work);
|
||||
/* Timer race is harmless here */
|
||||
on_each_cpu(mce_init, NULL, 1, 1);
|
||||
on_each_cpu(mce_init, NULL, 1);
|
||||
next_interval = check_interval * HZ;
|
||||
if (next_interval)
|
||||
schedule_delayed_work(&mcheck_work,
|
||||
|
|
|
@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
|
|||
|
||||
static void mce_work_fn(struct work_struct *work)
|
||||
{
|
||||
on_each_cpu(mce_checkregs, NULL, 1, 1);
|
||||
on_each_cpu(mce_checkregs, NULL, 1);
|
||||
schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
|
||||
}
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ void disable_lapic_nmi_watchdog(void)
|
|||
if (atomic_read(&nmi_active) <= 0)
|
||||
return;
|
||||
|
||||
on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
|
||||
on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
|
||||
wd_ops->unreserve();
|
||||
|
||||
BUG_ON(atomic_read(&nmi_active) != 0);
|
||||
|
@ -202,7 +202,7 @@ void enable_lapic_nmi_watchdog(void)
|
|||
return;
|
||||
}
|
||||
|
||||
on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
|
||||
on_each_cpu(setup_apic_nmi_watchdog, NULL, 1);
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
|
||||
|
|
|
@ -1565,7 +1565,7 @@ void /*__init*/ print_local_APIC(void * dummy)
|
|||
|
||||
void print_all_local_APICs (void)
|
||||
{
|
||||
on_each_cpu(print_local_APIC, NULL, 1, 1);
|
||||
on_each_cpu(print_local_APIC, NULL, 1);
|
||||
}
|
||||
|
||||
void /*__init*/ print_PIC(void)
|
||||
|
|
|
@ -1146,7 +1146,7 @@ void __apicdebuginit print_local_APIC(void * dummy)
|
|||
|
||||
void print_all_local_APICs (void)
|
||||
{
|
||||
on_each_cpu(print_local_APIC, NULL, 1, 1);
|
||||
on_each_cpu(print_local_APIC, NULL, 1);
|
||||
}
|
||||
|
||||
void __apicdebuginit print_PIC(void)
|
||||
|
|
|
@ -218,7 +218,7 @@ static void __acpi_nmi_enable(void *__unused)
|
|||
void acpi_nmi_enable(void)
|
||||
{
|
||||
if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
||||
on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
|
||||
on_each_cpu(__acpi_nmi_enable, NULL, 1);
|
||||
}
|
||||
|
||||
static void __acpi_nmi_disable(void *__unused)
|
||||
|
@ -232,7 +232,7 @@ static void __acpi_nmi_disable(void *__unused)
|
|||
void acpi_nmi_disable(void)
|
||||
{
|
||||
if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
||||
on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
|
||||
on_each_cpu(__acpi_nmi_disable, NULL, 1);
|
||||
}
|
||||
|
||||
void setup_apic_nmi_watchdog(void *unused)
|
||||
|
|
|
@ -225,7 +225,7 @@ static void __acpi_nmi_enable(void *__unused)
|
|||
void acpi_nmi_enable(void)
|
||||
{
|
||||
if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
||||
on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
|
||||
on_each_cpu(__acpi_nmi_enable, NULL, 1);
|
||||
}
|
||||
|
||||
static void __acpi_nmi_disable(void *__unused)
|
||||
|
@ -239,7 +239,7 @@ static void __acpi_nmi_disable(void *__unused)
|
|||
void acpi_nmi_disable(void)
|
||||
{
|
||||
if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
|
||||
on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
|
||||
on_each_cpu(__acpi_nmi_disable, NULL, 1);
|
||||
}
|
||||
|
||||
void setup_apic_nmi_watchdog(void *unused)
|
||||
|
|
|
@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info)
|
|||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
|
||||
on_each_cpu(do_flush_tlb_all, NULL, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -270,5 +270,5 @@ static void do_flush_tlb_all(void *info)
|
|||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
|
||||
on_each_cpu(do_flush_tlb_all, NULL, 1);
|
||||
}
|
||||
|
|
|
@ -301,7 +301,7 @@ static int __init vsyscall_init(void)
|
|||
#ifdef CONFIG_SYSCTL
|
||||
register_sysctl_table(kernel_root_table2);
|
||||
#endif
|
||||
on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
|
||||
on_each_cpu(cpu_vsyscall_init, NULL, 1);
|
||||
hotcpu_notifier(cpu_vsyscall_notifier, 0);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2968,7 +2968,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
|
|||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (vmx->vmcs) {
|
||||
on_each_cpu(__vcpu_clear, vmx, 0, 1);
|
||||
on_each_cpu(__vcpu_clear, vmx, 1);
|
||||
free_vmcs(vmx->vmcs);
|
||||
vmx->vmcs = NULL;
|
||||
}
|
||||
|
|
|
@ -1072,7 +1072,7 @@ static void do_flush_tlb_all(void *info)
|
|||
/* flush the TLB of every active CPU in the system */
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(do_flush_tlb_all, 0, 1, 1);
|
||||
on_each_cpu(do_flush_tlb_all, 0, 1);
|
||||
}
|
||||
|
||||
/* used to set up the trampoline for other CPUs when the memory manager
|
||||
|
|
|
@ -106,7 +106,7 @@ static void cpa_flush_all(unsigned long cache)
|
|||
{
|
||||
BUG_ON(irqs_disabled());
|
||||
|
||||
on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1);
|
||||
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
|
||||
}
|
||||
|
||||
static void __cpa_flush_range(void *arg)
|
||||
|
@ -127,7 +127,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
|
|||
BUG_ON(irqs_disabled());
|
||||
WARN_ON(PAGE_ALIGN(start) != start);
|
||||
|
||||
on_each_cpu(__cpa_flush_range, NULL, 1, 1);
|
||||
on_each_cpu(__cpa_flush_range, NULL, 1);
|
||||
|
||||
if (!cache)
|
||||
return;
|
||||
|
|
|
@ -218,8 +218,8 @@ static int nmi_setup(void)
|
|||
}
|
||||
|
||||
}
|
||||
on_each_cpu(nmi_save_registers, NULL, 0, 1);
|
||||
on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
|
||||
on_each_cpu(nmi_save_registers, NULL, 1);
|
||||
on_each_cpu(nmi_cpu_setup, NULL, 1);
|
||||
nmi_enabled = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ static void nmi_shutdown(void)
|
|||
{
|
||||
struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
|
||||
nmi_enabled = 0;
|
||||
on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
|
||||
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
|
||||
unregister_die_notifier(&profile_exceptions_nb);
|
||||
model->shutdown(msrs);
|
||||
free_msrs();
|
||||
|
@ -285,7 +285,7 @@ static void nmi_cpu_start(void *dummy)
|
|||
|
||||
static int nmi_start(void)
|
||||
{
|
||||
on_each_cpu(nmi_cpu_start, NULL, 0, 1);
|
||||
on_each_cpu(nmi_cpu_start, NULL, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ static void nmi_cpu_stop(void *dummy)
|
|||
|
||||
static void nmi_stop(void)
|
||||
{
|
||||
on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
|
||||
on_each_cpu(nmi_cpu_stop, NULL, 1);
|
||||
}
|
||||
|
||||
struct op_counter_config counter_config[OP_MAX_COUNTER];
|
||||
|
|
|
@ -1249,7 +1249,7 @@ static void ipi_handler(void *null)
|
|||
|
||||
void global_cache_flush(void)
|
||||
{
|
||||
if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
|
||||
if (on_each_cpu(ipi_handler, NULL, 1) != 0)
|
||||
panic(PFX "timed out waiting for the other CPUs!\n");
|
||||
}
|
||||
EXPORT_SYMBOL(global_cache_flush);
|
||||
|
|
|
@ -478,7 +478,7 @@ void __init lguest_arch_host_init(void)
|
|||
cpu_had_pge = 1;
|
||||
/* adjust_pge is a helper function which sets or unsets the PGE
|
||||
* bit on its CPU, depending on the argument (0 == unset). */
|
||||
on_each_cpu(adjust_pge, (void *)0, 0, 1);
|
||||
on_each_cpu(adjust_pge, (void *)0, 1);
|
||||
/* Turn off the feature in the global feature set. */
|
||||
clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
|
||||
}
|
||||
|
@ -493,7 +493,7 @@ void __exit lguest_arch_host_fini(void)
|
|||
if (cpu_had_pge) {
|
||||
set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
|
||||
/* adjust_pge's argument "1" means set PGE. */
|
||||
on_each_cpu(adjust_pge, (void *)1, 0, 1);
|
||||
on_each_cpu(adjust_pge, (void *)1, 1);
|
||||
}
|
||||
put_online_cpus();
|
||||
}
|
||||
|
|
|
@ -1464,7 +1464,7 @@ static void invalidate_bh_lru(void *arg)
|
|||
|
||||
void invalidate_bh_lrus(void)
|
||||
{
|
||||
on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
|
||||
on_each_cpu(invalidate_bh_lru, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ static inline void init_call_single_data(void)
|
|||
/*
|
||||
* Call a function on all processors
|
||||
*/
|
||||
int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
|
||||
int on_each_cpu(void (*func) (void *info), void *info, int wait);
|
||||
|
||||
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
|
||||
#define MSG_ALL 0x8001
|
||||
|
@ -121,7 +121,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
|
|||
}
|
||||
#define smp_call_function(func, info, wait) \
|
||||
(up_smp_call_function(func, info))
|
||||
#define on_each_cpu(func,info,retry,wait) \
|
||||
#define on_each_cpu(func,info,wait) \
|
||||
({ \
|
||||
local_irq_disable(); \
|
||||
func(info); \
|
||||
|
|
|
@ -623,7 +623,7 @@ static void retrigger_next_event(void *arg)
|
|||
void clock_was_set(void)
|
||||
{
|
||||
/* Retrigger the CPU local events everywhere */
|
||||
on_each_cpu(retrigger_next_event, NULL, 0, 1);
|
||||
on_each_cpu(retrigger_next_event, NULL, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -252,7 +252,7 @@ static void profile_flip_buffers(void)
|
|||
mutex_lock(&profile_flip_mutex);
|
||||
j = per_cpu(cpu_profile_flip, get_cpu());
|
||||
put_cpu();
|
||||
on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
|
||||
on_each_cpu(__profile_flip_buffers, NULL, 1);
|
||||
for_each_online_cpu(cpu) {
|
||||
struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
|
||||
for (i = 0; i < NR_PROFILE_HIT; ++i) {
|
||||
|
@ -275,7 +275,7 @@ static void profile_discard_flip_buffers(void)
|
|||
mutex_lock(&profile_flip_mutex);
|
||||
i = per_cpu(cpu_profile_flip, get_cpu());
|
||||
put_cpu();
|
||||
on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
|
||||
on_each_cpu(__profile_flip_buffers, NULL, 1);
|
||||
for_each_online_cpu(cpu) {
|
||||
struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
|
||||
memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
|
||||
|
@ -558,7 +558,7 @@ static int __init create_hash_tables(void)
|
|||
out_cleanup:
|
||||
prof_on = 0;
|
||||
smp_mb();
|
||||
on_each_cpu(profile_nop, NULL, 0, 1);
|
||||
on_each_cpu(profile_nop, NULL, 1);
|
||||
for_each_online_cpu(cpu) {
|
||||
struct page *page;
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ void rcu_barrier(void)
|
|||
* until all the callbacks are queued.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
on_each_cpu(rcu_barrier_func, NULL, 0, 1);
|
||||
on_each_cpu(rcu_barrier_func, NULL, 1);
|
||||
rcu_read_unlock();
|
||||
wait_for_completion(&rcu_barrier_completion);
|
||||
mutex_unlock(&rcu_barrier_mutex);
|
||||
|
|
|
@ -674,7 +674,7 @@ __init int spawn_ksoftirqd(void)
|
|||
/*
|
||||
* Call a function on all processors
|
||||
*/
|
||||
int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
|
||||
int on_each_cpu(void (*func) (void *info), void *info, int wait)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
|
@ -918,7 +918,7 @@ void drain_local_pages(void *arg)
|
|||
*/
|
||||
void drain_all_pages(void)
|
||||
{
|
||||
on_each_cpu(drain_local_pages, NULL, 0, 1);
|
||||
on_each_cpu(drain_local_pages, NULL, 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
|
|
|
@ -2454,7 +2454,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
|
|||
struct kmem_list3 *l3;
|
||||
int node;
|
||||
|
||||
on_each_cpu(do_drain, cachep, 1, 1);
|
||||
on_each_cpu(do_drain, cachep, 1);
|
||||
check_irq_on();
|
||||
for_each_online_node(node) {
|
||||
l3 = cachep->nodelists[node];
|
||||
|
@ -3939,7 +3939,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|||
}
|
||||
new->cachep = cachep;
|
||||
|
||||
on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
|
||||
on_each_cpu(do_ccupdate_local, (void *)new, 1);
|
||||
|
||||
check_irq_on();
|
||||
cachep->batchcount = batchcount;
|
||||
|
|
|
@ -1497,7 +1497,7 @@ static void flush_cpu_slab(void *d)
|
|||
static void flush_all(struct kmem_cache *s)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
on_each_cpu(flush_cpu_slab, s, 1, 1);
|
||||
on_each_cpu(flush_cpu_slab, s, 1);
|
||||
#else
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -545,7 +545,7 @@ static int iucv_enable(void)
|
|||
*/
|
||||
static void iucv_disable(void)
|
||||
{
|
||||
on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1);
|
||||
on_each_cpu(iucv_retrieve_cpu, NULL, 1);
|
||||
kfree(iucv_path_table);
|
||||
}
|
||||
|
||||
|
|
|
@ -1286,7 +1286,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
|
|||
* in vmx root mode.
|
||||
*/
|
||||
printk(KERN_INFO "kvm: exiting hardware virtualization\n");
|
||||
on_each_cpu(hardware_disable, NULL, 0, 1);
|
||||
on_each_cpu(hardware_disable, NULL, 1);
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
@ -1479,7 +1479,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
|
|||
goto out_free_1;
|
||||
}
|
||||
|
||||
on_each_cpu(hardware_enable, NULL, 0, 1);
|
||||
on_each_cpu(hardware_enable, NULL, 1);
|
||||
r = register_cpu_notifier(&kvm_cpu_notifier);
|
||||
if (r)
|
||||
goto out_free_2;
|
||||
|
@ -1525,7 +1525,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
|
|||
unregister_reboot_notifier(&kvm_reboot_notifier);
|
||||
unregister_cpu_notifier(&kvm_cpu_notifier);
|
||||
out_free_2:
|
||||
on_each_cpu(hardware_disable, NULL, 0, 1);
|
||||
on_each_cpu(hardware_disable, NULL, 1);
|
||||
out_free_1:
|
||||
kvm_arch_hardware_unsetup();
|
||||
out_free_0:
|
||||
|
@ -1547,7 +1547,7 @@ void kvm_exit(void)
|
|||
sysdev_class_unregister(&kvm_sysdev_class);
|
||||
unregister_reboot_notifier(&kvm_reboot_notifier);
|
||||
unregister_cpu_notifier(&kvm_cpu_notifier);
|
||||
on_each_cpu(hardware_disable, NULL, 0, 1);
|
||||
on_each_cpu(hardware_disable, NULL, 1);
|
||||
kvm_arch_hardware_unsetup();
|
||||
kvm_arch_exit();
|
||||
kvm_exit_debug();
|
||||
|
|
Loading…
Reference in a new issue