#include <xen/perfc.h>
#endif
#include <xen/sched.h>
-#ifdef CONFIG_PV
+#ifdef CONFIG_PV32
#include <compat/xen.h>
#endif
#include <asm/hardirq.h>
BLANK();
#endif
-#ifdef CONFIG_PV
+#ifdef CONFIG_PV32
OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.pv.is_32bit);
BLANK();
- OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
- OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
- BLANK();
-
OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending);
OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask);
BLANK();
#endif
+#ifdef CONFIG_PV
+ OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
+ OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
+ BLANK();
+#endif
+
OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
#include <public/xen.h>
#include <irq_vectors.h>
-#ifdef CONFIG_PV32
-
ENTRY(entry_int82)
ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
pushq $0
mov %rsp, %rdi
call do_entry_int82
-#endif /* CONFIG_PV32 */
-
/* %rbx: struct vcpu */
ENTRY(compat_test_all_events)
ASSERT_NOT_IN_ATOMIC
xor %eax, %eax
ret
- .section .text.entry, "ax", @progbits
-
-/* See lstar_enter for entry register state. */
-ENTRY(cstar_enter)
-#ifdef CONFIG_XEN_SHSTK
- ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
-#endif
- /* sti could live here when we don't switch page tables below. */
- CR4_PV32_RESTORE
- movq 8(%rsp),%rax /* Restore %rax. */
- movq $FLAT_USER_SS32, 8(%rsp) /* Assume a 64bit domain. Compat handled lower. */
- pushq %r11
- pushq $FLAT_USER_CS32
- pushq %rcx
- pushq $0
- movl $TRAP_syscall, 4(%rsp)
- SAVE_ALL
-
- SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
- /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
-
- GET_STACK_END(bx)
- mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
- test %rcx, %rcx
- jz .Lcstar_cr3_okay
- movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
- mov %rcx, %cr3
- /* %r12 is still zero at this point. */
- mov %r12, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-.Lcstar_cr3_okay:
- sti
-
- movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
- movq VCPU_domain(%rbx),%rcx
- cmpb $0,DOMAIN_is_32bit_pv(%rcx)
- je switch_to_kernel
-
+ENTRY(compat_syscall)
/* Fix up reported %cs/%ss for compat domains. */
movl $FLAT_COMPAT_USER_SS, UREGS_ss(%rsp)
movl $FLAT_COMPAT_USER_CS, UREGS_cs(%rsp)
movb %cl,TRAPBOUNCE_flags(%rdx)
jmp .Lcompat_bounce_exception
- .text
-
ENTRY(compat_sysenter)
CR4_PV32_RESTORE
movq VCPU_trap_ctxt(%rbx),%rcx
#ifdef CONFIG_PV
/* %rbx: struct vcpu */
-ENTRY(switch_to_kernel)
+switch_to_kernel:
leaq VCPU_trap_bounce(%rbx),%rdx
/* TB_eip = 32-bit syscall ? syscall32_addr : syscall_addr */
call pv_hypercall
jmp test_all_events
+/* See lstar_enter for entry register state. */
+ENTRY(cstar_enter)
+#ifdef CONFIG_XEN_SHSTK
+ ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
+#endif
+ /* sti could live here when we don't switch page tables below. */
+ CR4_PV32_RESTORE
+ movq 8(%rsp), %rax /* Restore %rax. */
+ movq $FLAT_USER_SS32, 8(%rsp) /* Assume a 64bit domain. Compat handled lower. */
+ pushq %r11
+ pushq $FLAT_USER_CS32
+ pushq %rcx
+ pushq $0
+ movl $TRAP_syscall, 4(%rsp)
+ SAVE_ALL
+
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
+ GET_STACK_END(bx)
+ mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
+ test %rcx, %rcx
+ jz .Lcstar_cr3_okay
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
+ mov %rcx, %cr3
+ /* %r12 is still zero at this point. */
+ mov %r12, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
+.Lcstar_cr3_okay:
+ sti
+
+ movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
+
+#ifdef CONFIG_PV32
+ movq VCPU_domain(%rbx), %rcx
+ cmpb $0, DOMAIN_is_32bit_pv(%rcx)
+ jne compat_syscall
+#endif
+ jmp switch_to_kernel
+
ENTRY(sysenter_entry)
#ifdef CONFIG_XEN_SHSTK
ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
movq VCPU_domain(%rbx),%rdi
movq %rax,TRAPBOUNCE_eip(%rdx)
movb %cl,TRAPBOUNCE_flags(%rdx)
+#ifdef CONFIG_PV32
cmpb $0, DOMAIN_is_32bit_pv(%rdi)
jne compat_sysenter
+#endif
jmp .Lbounce_exception
ENTRY(int80_direct_trap)
mov 0x80 * TRAPINFO_sizeof + TRAPINFO_eip(%rsi), %rdi
movzwl 0x80 * TRAPINFO_sizeof + TRAPINFO_cs (%rsi), %ecx
+#ifdef CONFIG_PV32
mov %ecx, %edx
and $~3, %edx
test %rdx, %rdx
jz int80_slow_path
+#else
+ test %rdi, %rdi
+ jz int80_slow_path
+#endif
/* Construct trap_bounce from trap_ctxt[0x80]. */
lea VCPU_trap_bounce(%rbx), %rdx
lea (, %rcx, TBF_INTERRUPT), %ecx
mov %cl, TRAPBOUNCE_flags(%rdx)
+#ifdef CONFIG_PV32
cmpb $0, DOMAIN_is_32bit_pv(%rax)
jne compat_int80_direct_trap
+#endif
call create_bounce_frame
jmp test_all_events
GET_STACK_END(ax)
leaq STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
# create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
+#ifdef CONFIG_PV32
movq STACK_CPUINFO_FIELD(current_vcpu)(%rax), %rax
movq VCPU_domain(%rax),%rax
cmpb $0, DOMAIN_is_32bit_pv(%rax)
sete %al
leal (%rax,%rax,2),%eax
orb %al,UREGS_cs(%rsp)
+#else
+ orb $3, UREGS_cs(%rsp)
+#endif
xorl %edi,%edi
jmp asm_domain_crash_synchronous /* Does not return */
.popsection
GET_CURRENT(bx)
testb $3, UREGS_cs(%rsp)
jz restore_all_xen
+#ifdef CONFIG_PV32
movq VCPU_domain(%rbx), %rax
cmpb $0, DOMAIN_is_32bit_pv(%rax)
je test_all_events
jmp compat_test_all_events
+#else
+ jmp test_all_events
+#endif
#else
ret_from_intr:
ASSERT_CONTEXT_IS_XEN
testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
jz exception_with_ints_disabled
-#ifdef CONFIG_PV
+#if defined(CONFIG_PV32)
ALTERNATIVE_2 "jmp .Lcr4_pv32_done", \
__stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMEP, \
__stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMAP
test $~(PFEC_write_access|PFEC_insn_fetch),%eax
jz compat_test_all_events
.Lcr4_pv32_done:
-#else
+#elif !defined(CONFIG_PV)
ASSERT_CONTEXT_IS_XEN
#endif /* CONFIG_PV */
sti
#ifdef CONFIG_PV
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
+#ifdef CONFIG_PV32
movq VCPU_domain(%rbx),%rax
cmpb $0, DOMAIN_is_32bit_pv(%rax)
jne compat_test_all_events
+#endif
jmp test_all_events
#else
ASSERT_CONTEXT_IS_XEN
je 1f
movl $EVENT_CHECK_VECTOR,%edi
call send_IPI_self
-1: movq VCPU_domain(%rbx),%rax
+1:
+#ifdef CONFIG_PV32
+ movq VCPU_domain(%rbx),%rax
cmpb $0,DOMAIN_is_32bit_pv(%rax)
je restore_all_guest
jmp compat_restore_all_guest
+#else
+ jmp restore_all_guest
+#endif
#else
ASSERT_CONTEXT_IS_XEN
jmp restore_all_xen