These duplicate the FLAT_KERNEL_* values also used for x86-32.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
regs->entry_vector = TRAP_syscall;
regs->rflags &= ~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|
X86_EFLAGS_NT|X86_EFLAGS_TF);
- regs->ss = __GUEST_SS;
+ regs->ss = FLAT_KERNEL_SS;
regs->rsp = (unsigned long)(rsp-11);
- regs->cs = __GUEST_CS;
+ regs->cs = FLAT_KERNEL_CS;
regs->rip = nctxt->failsafe_callback_eip;
}
}
addq $8,%rsp
popq %rcx # RIP
popq %r11 # CS
- cmpw $__GUEST_CS32,%r11
+ cmpw $FLAT_KERNEL_CS32,%r11
popq %r11 # RFLAGS
popq %rsp # RSP
je 1f
ALIGN
ENTRY(syscall_enter)
sti
- movl $__GUEST_SS,24(%rsp)
+ movl $FLAT_KERNEL_SS,24(%rsp)
pushq %rcx
pushq $0
movl $TRAP_syscall,4(%rsp)
movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
- movq $__GUEST_SS,UREGS_ss+8(%rsp)
+ movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
movq %rsi,UREGS_rsp+8(%rsp)
- movq $__GUEST_CS,UREGS_cs+8(%rsp)
+ movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
movq TRAPBOUNCE_eip(%rdx),%rax
testq %rax,%rax
jz domain_crash_synchronous
stack[14] = 0x41;
stack[15] = 0x53;
- /* pushq $__GUEST_CS64 */
+ /* pushq $FLAT_KERNEL_CS64 */
stack[16] = 0x68;
- *(u32 *)&stack[17] = __GUEST_CS64;
+ *(u32 *)&stack[17] = FLAT_KERNEL_CS64;
/* jmp syscall_enter */
stack[21] = 0xe9;
stack[14] = 0x41;
stack[15] = 0x53;
- /* pushq $__GUEST_CS32 */
+ /* pushq $FLAT_KERNEL_CS32 */
stack[16] = 0x68;
- *(u32 *)&stack[17] = __GUEST_CS32;
+ *(u32 *)&stack[17] = FLAT_KERNEL_CS32;
/* jmp syscall_enter */
stack[21] = 0xe9;
#define __HYPERVISOR_DS32 0xe018
#define __HYPERVISOR_DS __HYPERVISOR_DS64
-#define __GUEST_CS64 0xe033
-#define __GUEST_CS32 0xe023
-#define __GUEST_CS __GUEST_CS64
-#define __GUEST_DS 0x0000
-#define __GUEST_SS 0xe02b
-
/* For generic assembly code: use macros to define operation/operand sizes. */
#define __OS "q" /* Operation Suffix */
#define __OP "r" /* Operand Prefix */