#define BUG() abort()
#define cpu_has_amd_erratum(nr) 0
+#define mark_regs_dirty(r) ((void)(r))
#include "x86_emulate/x86_emulate.h"
#include "x86_emulate/x86_emulate.c"
static void continue_nonidle_domain(struct vcpu *v)
{
check_wakeup_from_wait();
+ mark_regs_dirty(guest_cpu_user_regs());
reset_stack_and_jump(ret_from_intr);
}
if ( test_bit(_VGCF_failsafe_disables_events, &n->arch.vgc_flags) )
vcpu_info(n, evtchn_upcall_mask) = 1;
- regs->entry_vector = TRAP_syscall;
+ regs->entry_vector |= TRAP_syscall;
regs->_eflags &= 0xFFFCBEFFUL;
regs->ss = FLAT_COMPAT_KERNEL_SS;
regs->_esp = (unsigned long)(esp-7);
if ( test_bit(_VGCF_failsafe_disables_events, &n->arch.vgc_flags) )
vcpu_info(n, evtchn_upcall_mask) = 1;
- regs->entry_vector = TRAP_syscall;
+ regs->entry_vector |= TRAP_syscall;
regs->rflags &= ~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|
X86_EFLAGS_NT|X86_EFLAGS_TF);
regs->ss = FLAT_KERNEL_SS;
if (likely(!desc->arch.move_in_progress))
return;
- vector = get_irq_regs()->entry_vector;
+ vector = (u8)get_irq_regs()->entry_vector;
me = smp_processor_id();
if ( vector == desc->arch.vector &&
struct irqaction *action;
uint32_t tsc_in;
struct irq_desc *desc;
- unsigned int vector = regs->entry_vector;
+ unsigned int vector = (u8)regs->entry_vector;
int irq = __get_cpu_var(vector_irq[vector]);
struct cpu_user_regs *old_regs = set_irq_regs(regs);
out:
if ( desc->handler->end )
- desc->handler->end(desc, regs->entry_vector);
+ desc->handler->end(desc, vector);
out_no_end:
spin_unlock(&desc->lock);
out_no_unlock:
struct domain *d;
int i, sp;
struct pending_eoi *peoi = this_cpu(pending_eoi);
- int vector = get_irq_regs()->entry_vector;
+ unsigned int vector = (u8)get_irq_regs()->entry_vector;
if ( unlikely(action->nr_guests == 0) )
{
goto fail;
if ( admin_io_okay(port, op_bytes, v, regs) )
{
+ mark_regs_dirty(regs);
io_emul(regs);
}
else
goto fail;
if ( admin_io_okay(port, op_bytes, v, regs) )
{
+ mark_regs_dirty(regs);
io_emul(regs);
if ( (op_bytes == 1) && pv_post_outb_hook )
pv_post_outb_hook(port, regs->eax);
ENTRY(compat_hypercall)
pushq $0
- movl $TRAP_syscall,4(%rsp)
- SAVE_ALL
+ SAVE_VOLATILE type=TRAP_syscall compat=1
cmpb $0,untrusted_msi(%rip)
UNLIKELY_START(ne, msi_check)
/* %rbx: struct vcpu */
compat_process_softirqs:
sti
+ andl $~TRAP_regs_partial,UREGS_entry_vector(%rsp)
call do_softirq
jmp compat_test_all_events
/* %rbx: struct vcpu, interrupts disabled */
compat_restore_all_guest:
ASSERT_INTERRUPTS_DISABLED
- RESTORE_ALL adj=8
+ RESTORE_ALL adj=8 compat=1
.Lft0: iretq
.section .fixup,"ax"
ENTRY(compat_sysenter)
movq VCPU_trap_ctxt(%rbx),%rcx
- cmpl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
+ cmpb $TRAP_gp_fault,UREGS_entry_vector(%rsp)
movzwl VCPU_sysenter_sel(%rbx),%eax
movzwl TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_cs(%rcx),%ecx
cmovel %ecx,%eax
movl $FLAT_KERNEL_SS,24(%rsp)
pushq %rcx
pushq $0
- movl $TRAP_syscall,4(%rsp)
- movq 24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before SAVE_ALL */
- SAVE_ALL
+ movq 24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before saving */
+ SAVE_VOLATILE TRAP_syscall
GET_CURRENT(%rbx)
movq VCPU_domain(%rbx),%rcx
testb $1,DOMAIN_is_32bit_pv(%rcx)
/* %rbx: struct vcpu */
process_softirqs:
sti
+ SAVE_PRESERVED
call do_softirq
jmp test_all_events
pushq $3 /* ring 3 null cs */
pushq $0 /* null rip */
pushq $0
- movl $TRAP_syscall,4(%rsp)
- SAVE_ALL
+ SAVE_VOLATILE TRAP_syscall
GET_CURRENT(%rbx)
cmpb $0,VCPU_sysenter_disables_events(%rbx)
movq VCPU_sysenter_addr(%rbx),%rax
leal (,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_START(z, sysenter_gpf)
movq VCPU_trap_ctxt(%rbx),%rsi
+ SAVE_PRESERVED
movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
movl %eax,TRAPBOUNCE_error_code(%rdx)
movq TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax
ENTRY(int80_direct_trap)
pushq $0
- SAVE_ALL
+ SAVE_VOLATILE 0x80
cmpb $0,untrusted_msi(%rip)
UNLIKELY_START(ne, msi_check)
* IDT entry with DPL==0.
*/
movl $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
+ SAVE_PRESERVED
movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
/* A GPF wouldn't have incremented the instruction pointer. */
subq $2,UREGS_rip(%rsp)
/* Rewrite our stack frame and return to guest-OS mode. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
/* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
- movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
+ orl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
jz exception_with_ints_disabled
sti
1: movq %rsp,%rdi
- movl UREGS_entry_vector(%rsp),%eax
+ movzbl UREGS_entry_vector(%rsp),%eax
leaq exception_table(%rip),%rdx
GET_CURRENT(%rbx)
PERFC_INCR(exceptions, %rax, %rbx)
/* No special register assumptions. */
FATAL_exception_with_ints_disabled:
- movl UREGS_entry_vector(%rsp),%edi
+ movzbl UREGS_entry_vector(%rsp),%edi
movq %rsp,%rsi
call fatal_trap
ud2
movq %rdi,%rsp
rep movsq
1: movq %rsp,%rdi
- movl UREGS_entry_vector(%rsp),%eax
+ movzbl UREGS_entry_vector(%rsp),%eax
leaq exception_table(%rip),%rdx
callq *(%rdx,%rax,8)
jmp ret_from_intr
regs->rbp, regs->rsp, regs->r8);
printk("r9: %016lx r10: %016lx r11: %016lx\n",
regs->r9, regs->r10, regs->r11);
- printk("r12: %016lx r13: %016lx r14: %016lx\n",
- regs->r12, regs->r13, regs->r14);
- printk("r15: %016lx cr0: %016lx cr4: %016lx\n",
- regs->r15, crs[0], crs[4]);
+ if ( !(regs->entry_vector & TRAP_regs_partial) )
+ {
+ printk("r12: %016lx r13: %016lx r14: %016lx\n",
+ regs->r12, regs->r13, regs->r14);
+ printk("r15: %016lx cr0: %016lx cr4: %016lx\n",
+ regs->r15, crs[0], crs[4]);
+ }
+ else
+ printk("cr0: %016lx cr4: %016lx\n", crs[0], crs[4]);
printk("cr3: %016lx cr2: %016lx\n", crs[3], crs[2]);
printk("ds: %04x es: %04x fs: %04x gs: %04x "
"ss: %04x cs: %04x\n",
if ( !(iret_saved.flags & VGCF_in_syscall) )
{
- regs->entry_vector = 0;
+ regs->entry_vector &= ~TRAP_syscall;
regs->r11 = iret_saved.r11;
regs->rcx = iret_saved.rcx;
}
*/
#include <asm/x86_emulate.h>
+#include <asm/asm_defns.h> /* mark_regs_dirty() */
#include <asm/processor.h> /* current_cpu_info */
#include <asm/amd.h> /* cpu_has_amd_erratum() */
case 9: p = ®s->r9; break;
case 10: p = ®s->r10; break;
case 11: p = ®s->r11; break;
- case 12: p = ®s->r12; break;
- case 13: p = ®s->r13; break;
- case 14: p = ®s->r14; break;
- case 15: p = ®s->r15; break;
+ case 12: mark_regs_dirty(regs); p = ®s->r12; break;
+ case 13: mark_regs_dirty(regs); p = ®s->r13; break;
+ case 14: mark_regs_dirty(regs); p = ®s->r14; break;
+ case 15: mark_regs_dirty(regs); p = ®s->r15; break;
#endif
default: BUG(); p = NULL; break;
}
static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
{
- char *cpu_info = (char *)get_cpu_info();
+ struct cpu_info *cpu_info = get_cpu_info();
struct vcpu *curr = current;
unsigned long dummy;
+ u32 entry_vector = cpu_info->guest_cpu_user_regs.entry_vector;
+ cpu_info->guest_cpu_user_regs.entry_vector &= ~TRAP_regs_partial;
ASSERT(wqv->esp == 0);
/* Save current VCPU affinity; force wakeup on *this* CPU only. */
gdprintk(XENLOG_ERR, "Stack too large in %s\n", __FUNCTION__);
domain_crash_synchronous();
}
+
+ cpu_info->guest_cpu_user_regs.entry_vector = entry_vector;
}
static void __finish_wait(struct waitqueue_vcpu *wqv)
#define ASSERT_INTERRUPTS_ENABLED ASSERT_INTERRUPT_STATUS(nz)
#define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
+/*
+ * This flag is set in an exception frame when registers R12-R15 did not get
+ * saved.
+ */
+#define _TRAP_regs_partial 16
+#define TRAP_regs_partial (1 << _TRAP_regs_partial)
+/*
+ * This flag gets set in an exception frame when registers R12-R15 possibly
+ * get modified from their originally saved values and hence need to be
+ * restored even if the normal call flow would restore register values.
+ *
+ * The flag being set implies _TRAP_regs_partial to be unset. Restoring
+ * R12-R15 thus is
+ * - required when this flag is set,
+ * - safe when _TRAP_regs_partial is unset.
+ */
+#define _TRAP_regs_dirty 17
+#define TRAP_regs_dirty (1 << _TRAP_regs_dirty)
+
+#define mark_regs_dirty(r) ({ \
+ struct cpu_user_regs *r__ = (r); \
+ ASSERT(!((r__)->entry_vector & TRAP_regs_partial)); \
+ r__->entry_vector |= TRAP_regs_dirty; \
+})
+
#define SAVE_ALL \
addq $-(UREGS_error_code-UREGS_r15), %rsp; \
cld; \
movq %r15,UREGS_r15(%rsp); \
#ifdef __ASSEMBLY__
-.macro LOAD_C_CLOBBERED
+
+/*
+ * Save all registers not preserved by C code or used in entry/exit code. Mark
+ * the frame as partial.
+ *
+ * @type: exception type
+ * @compat: R8-R15 don't need saving, and the frame nevertheless is complete
+ */
+.macro SAVE_VOLATILE type compat=0
+.if \compat
+ movl $\type,UREGS_entry_vector-UREGS_error_code(%rsp)
+.else
+ movl $\type|TRAP_regs_partial,\
+ UREGS_entry_vector-UREGS_error_code(%rsp)
+.endif
+ addq $-(UREGS_error_code-UREGS_r15),%rsp
+ cld
+ movq %rdi,UREGS_rdi(%rsp)
+ movq %rsi,UREGS_rsi(%rsp)
+ movq %rdx,UREGS_rdx(%rsp)
+ movq %rcx,UREGS_rcx(%rsp)
+ movq %rax,UREGS_rax(%rsp)
+.if !\compat
+ movq %r8,UREGS_r8(%rsp)
+ movq %r9,UREGS_r9(%rsp)
+ movq %r10,UREGS_r10(%rsp)
+ movq %r11,UREGS_r11(%rsp)
+.endif
+ movq %rbx,UREGS_rbx(%rsp)
+ movq %rbp,UREGS_rbp(%rsp)
+ SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp)
+.endm
+
+/*
+ * Complete a frame potentially only partially saved.
+ */
+.macro SAVE_PRESERVED
+ btrl $_TRAP_regs_partial,UREGS_entry_vector(%rsp)
+ jnc 987f
+ movq %r12,UREGS_r12(%rsp)
+ movq %r13,UREGS_r13(%rsp)
+ movq %r14,UREGS_r14(%rsp)
+ movq %r15,UREGS_r15(%rsp)
+987:
+.endm
+
+/*
+ * Reload registers not preserved by C code from frame.
+ *
+ * @compat: R8-R11 don't need reloading
+ *
+ * For the way it is used in RESTORE_ALL, this macro must preserve EFLAGS.ZF.
+ */
+.macro LOAD_C_CLOBBERED compat=0
+.if !\compat
movq UREGS_r11(%rsp),%r11
movq UREGS_r10(%rsp),%r10
movq UREGS_r9(%rsp),%r9
movq UREGS_r8(%rsp),%r8
+.endif
movq UREGS_rax(%rsp),%rax
movq UREGS_rcx(%rsp),%rcx
movq UREGS_rdx(%rsp),%rdx
movq UREGS_rdi(%rsp),%rdi
.endm
-.macro RESTORE_ALL adj=0
+/*
+ * Restore all previously saved registers.
+ *
+ * @adj: extra stack pointer adjustment to be folded into the adjustment done
+ * anyway at the end of the macro
+ * @compat: R8-R15 don't need reloading
+ */
+.macro RESTORE_ALL adj=0 compat=0
+.if !\compat
+ testl $TRAP_regs_dirty,UREGS_entry_vector(%rsp)
+.endif
+ LOAD_C_CLOBBERED \compat
+.if !\compat
+ jz 987f
movq UREGS_r15(%rsp),%r15
movq UREGS_r14(%rsp),%r14
movq UREGS_r13(%rsp),%r13
movq UREGS_r12(%rsp),%r12
- movq UREGS_rbp(%rsp),%rbp
+#ifndef NDEBUG
+ .subsection 1
+987: testl $TRAP_regs_partial,UREGS_entry_vector(%rsp)
+ jnz 987f
+ cmpq UREGS_r15(%rsp),%r15
+ jne 789f
+ cmpq UREGS_r14(%rsp),%r14
+ jne 789f
+ cmpq UREGS_r13(%rsp),%r13
+ jne 789f
+ cmpq UREGS_r12(%rsp),%r12
+ je 987f
+789: ud2
+ .subsection 0
+#endif
+.endif
+987: movq UREGS_rbp(%rsp),%rbp
movq UREGS_rbx(%rsp),%rbx
- LOAD_C_CLOBBERED
subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp
.endm
+
#endif
#ifdef PERF_COUNTERS