x86/asm/entry/32: Stop caching MSR_IA32_SYSENTER_ESP in tss.sp1
We write a stack pointer to MSR_IA32_SYSENTER_ESP exactly once, and we unnecessarily cache the value in tss.sp1. We never read the cached value. Remove all of the caching. It serves no purpose. Suggested-by: Denys Vlasenko <dvlasenk@redhat.com> Signed-off-by: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/05a0163eb33ef5208363f0015496855da7cebadd.1428002830.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ff8287f363
commit
cf9328cc99
2 changed files with 16 additions and 15 deletions
|
@ -209,21 +209,21 @@ struct x86_hw_tss {
|
|||
unsigned short back_link, __blh;
|
||||
unsigned long sp0;
|
||||
unsigned short ss0, __ss0h;
|
||||
unsigned long sp1;
|
||||
|
||||
/*
|
||||
* We don't use ring 1, so sp1 and ss1 are convenient scratch
|
||||
* spaces in the same cacheline as sp0. We use them to cache
|
||||
* some MSR values to avoid unnecessary wrmsr instructions.
|
||||
* We don't use ring 1, so ss1 is a convenient scratch space in
|
||||
* the same cacheline as sp0. We use ss1 to cache the value in
|
||||
* MSR_IA32_SYSENTER_CS. When we context switch
|
||||
* MSR_IA32_SYSENTER_CS, we first check if the new value being
|
||||
* written matches ss1, and, if it's not, then we wrmsr the new
|
||||
* value and update ss1.
|
||||
*
|
||||
* We use SYSENTER_ESP to find sp0 and for the NMI emergency
|
||||
* stack, but we need to context switch it because we do
|
||||
* horrible things to the kernel stack in vm86 mode.
|
||||
*
|
||||
* We use SYSENTER_CS to disable sysenter in vm86 mode to avoid
|
||||
* corrupting the stack if we went through the sysenter path
|
||||
* from vm86 mode.
|
||||
* The only reason we context switch MSR_IA32_SYSENTER_CS is
|
||||
* that we set it to zero in vm86 tasks to avoid corrupting the
|
||||
* stack if we were to go through the sysenter path from vm86
|
||||
* mode.
|
||||
*/
|
||||
unsigned long sp1; /* MSR_IA32_SYSENTER_ESP */
|
||||
unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
|
||||
|
||||
unsigned short __ss1h;
|
||||
|
|
|
@ -976,15 +976,16 @@ void enable_sep_cpu(void)
|
|||
goto out;
|
||||
|
||||
/*
|
||||
* The struct::SS1 and tss_struct::SP1 fields are not used by the hardware,
|
||||
* we cache the SYSENTER CS and ESP values there for easy access:
|
||||
* We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
|
||||
* see the big comment in struct x86_hw_tss's definition.
|
||||
*/
|
||||
|
||||
tss->x86_tss.ss1 = __KERNEL_CS;
|
||||
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
|
||||
|
||||
tss->x86_tss.sp1 = (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack);
|
||||
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
|
||||
wrmsr(MSR_IA32_SYSENTER_ESP,
|
||||
(unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
|
||||
0);
|
||||
|
||||
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0);
|
||||
|
||||
|
|
Loading…
Reference in a new issue