x86: avoid back to back on_each_cpu in cpa_flush_array
Cleanup cpa_flush_array() to avoid back to back on_each_cpu() calls.
[ Impact: optimizes fix 0af48f42df
]
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
46176b4f6b
commit
2171787be2
1 changed files with 3 additions and 14 deletions
|
@ -204,30 +204,19 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
|
|||
}
|
||||
}
|
||||
|
||||
static void wbinvd_local(void *unused)
|
||||
{
|
||||
wbinvd();
|
||||
}
|
||||
|
||||
static void cpa_flush_array(unsigned long *start, int numpages, int cache,
|
||||
int in_flags, struct page **pages)
|
||||
{
|
||||
unsigned int i, level;
|
||||
unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
|
||||
|
||||
BUG_ON(irqs_disabled());
|
||||
|
||||
on_each_cpu(__cpa_flush_range, NULL, 1);
|
||||
on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
|
||||
|
||||
if (!cache)
|
||||
if (!cache || do_wbinvd)
|
||||
return;
|
||||
|
||||
/* 4M threshold */
|
||||
if (numpages >= 1024) {
|
||||
if (boot_cpu_data.x86 >= 4)
|
||||
on_each_cpu(wbinvd_local, NULL, 1);
|
||||
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* We only need to flush on one CPU,
|
||||
* clflush is a MESI-coherent instruction that
|
||||
|
|
Loading…
Add table
Reference in a new issue