x86-64: Improve insn scheduling in SAVE_ARGS_IRQ
In one case, use an address register that was computed earlier (and with a simpler instruction), thus reducing the risk of a stall. In the second case, eliminate a branch by using a conditional move (as is already done in call_softirq and xen_do_hypervisor_callback). Signed-off-by: Jan Beulich <jbeulich@suse.com> Link: http://lkml.kernel.org/r/4F4788A50200007800074A26@nat28.tlf.novell.com Reviewed-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
parent
b893485db9
commit
69466466ce
1 changed files with 3 additions and 4 deletions
|
@ -319,7 +319,7 @@ ENDPROC(native_usergs_sysret64)
|
|||
movq %rsp, %rsi
|
||||
|
||||
leaq -RBP(%rsp),%rdi /* arg1 for handler */
|
||||
testl $3, CS(%rdi)
|
||||
testl $3, CS-RBP(%rsi)
|
||||
je 1f
|
||||
SWAPGS
|
||||
/*
|
||||
|
@ -329,11 +329,10 @@ ENDPROC(native_usergs_sysret64)
|
|||
* moving irq_enter into assembly, which would be too much work)
|
||||
*/
|
||||
1: incl PER_CPU_VAR(irq_count)
|
||||
jne 2f
|
||||
mov PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||
CFI_DEF_CFA_REGISTER rsi
|
||||
|
||||
2: /* Store previous stack value */
|
||||
/* Store previous stack value */
|
||||
pushq %rsi
|
||||
CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
|
||||
0x77 /* DW_OP_breg7 */, 0, \
|
||||
|
|
Loading…
Reference in a new issue