mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 16:29:05 +00:00
x86: avoid back to back on_each_cpu in cpa_flush_array
Cleanup cpa_flush_array() to avoid back to back on_each_cpu() calls. [ Impact: optimizes fix 0af48f42df15b97080b450d24219dd95db7b929a ] Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
46176b4f6b
commit
2171787be2
@ -204,30 +204,19 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
|
||||
}
|
||||
}
|
||||
|
||||
static void wbinvd_local(void *unused)
|
||||
{
|
||||
wbinvd();
|
||||
}
|
||||
|
||||
static void cpa_flush_array(unsigned long *start, int numpages, int cache,
|
||||
int in_flags, struct page **pages)
|
||||
{
|
||||
unsigned int i, level;
|
||||
unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
|
||||
|
||||
BUG_ON(irqs_disabled());
|
||||
|
||||
on_each_cpu(__cpa_flush_range, NULL, 1);
|
||||
on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
|
||||
|
||||
if (!cache)
|
||||
if (!cache || do_wbinvd)
|
||||
return;
|
||||
|
||||
/* 4M threshold */
|
||||
if (numpages >= 1024) {
|
||||
if (boot_cpu_data.x86 >= 4)
|
||||
on_each_cpu(wbinvd_local, NULL, 1);
|
||||
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* We only need to flush on one CPU,
|
||||
* clflush is a MESI-coherent instruction that
|
||||
|
Loading…
x
Reference in New Issue
Block a user