x86/paravirt, 64-bit: don't restore user rsp within sysret
There's no need to combine restoring the user rsp within the sysret
pvop, so split it out. This makes the pvop's semantics closer to the
machine instruction.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citirx.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 27ac2de..a19aba8 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -62,7 +62,7 @@
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
- OFFSET(PV_CPU_usersp_sysret, pv_cpu_ops, usersp_sysret);
+ OFFSET(PV_CPU_usergs_sysret, pv_cpu_ops, usergs_sysret);
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
#endif
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 0056bc4..18447a3 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -59,8 +59,7 @@
#endif
#ifdef CONFIG_PARAVIRT
-ENTRY(native_usersp_sysret)
- movq %gs:pda_oldrsp,%rsp
+ENTRY(native_usergs_sysret)
swapgs
sysretq
#endif /* CONFIG_PARAVIRT */
@@ -275,7 +274,8 @@
CFI_REGISTER rip,rcx
RESTORE_ARGS 0,-ARG_SKIP,1
/*CFI_REGISTER rflags,r11*/
- USERSP_SYSRET
+ movq %gs:pda_oldrsp, %rsp
+ USERGS_SYSRET
CFI_RESTORE_STATE
/* Handle reschedules */
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 565ee7a..b0b17f0 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -141,7 +141,7 @@
ret = paravirt_patch_nop();
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
- type == PARAVIRT_PATCH(pv_cpu_ops.usersp_sysret))
+ type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret))
/* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
else
@@ -193,7 +193,7 @@
/* These are in entry.S */
extern void native_iret(void);
extern void native_irq_enable_sysexit(void);
-extern void native_usersp_sysret(void);
+extern void native_usergs_sysret(void);
static int __init print_banner(void)
{
@@ -332,7 +332,7 @@
#ifdef CONFIG_X86_32
.irq_enable_sysexit = native_irq_enable_sysexit,
#else
- .usersp_sysret = native_usersp_sysret,
+ .usergs_sysret = native_usergs_sysret,
#endif
.iret = native_iret,
.swapgs = native_swapgs,
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 4a17055..d4c0712 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -15,7 +15,7 @@
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
/* the three commands give us more control to how to return from a syscall */
-DEF_NATIVE(pv_cpu_ops, usersp_sysret, "movq %gs:" __stringify(pda_oldrsp) ", %rsp; swapgs; sysretq;");
+DEF_NATIVE(pv_cpu_ops, usergs_sysret, "swapgs; sysretq;");
DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
@@ -35,7 +35,7 @@
PATCH_SITE(pv_irq_ops, irq_enable);
PATCH_SITE(pv_irq_ops, irq_disable);
PATCH_SITE(pv_cpu_ops, iret);
- PATCH_SITE(pv_cpu_ops, usersp_sysret);
+ PATCH_SITE(pv_cpu_ops, usergs_sysret);
PATCH_SITE(pv_cpu_ops, swapgs);
PATCH_SITE(pv_mmu_ops, read_cr2);
PATCH_SITE(pv_mmu_ops, read_cr3);
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index 99ee525..544836c 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -112,8 +112,7 @@
#ifdef CONFIG_X86_64
#define INTERRUPT_RETURN iretq
-#define USERSP_SYSRET \
- movq %gs:pda_oldrsp, %rsp; \
+#define USERGS_SYSRET \
swapgs; \
sysretq;
#else
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 2668903..dad5b41 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -143,7 +143,7 @@
/* These three are jmp to, not actually called. */
void (*irq_enable_sysexit)(void);
- void (*usersp_sysret)(void);
+ void (*usergs_sysret)(void);
void (*iret)(void);
void (*swapgs)(void);
@@ -1505,10 +1505,10 @@
movq %rax, %rcx; \
xorq %rax, %rax;
-#define USERSP_SYSRET \
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usersp_sysret), \
+#define USERGS_SYSRET \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret), \
CLBR_NONE, \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usersp_sysret))
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret))
#endif
#endif /* __ASSEMBLY__ */