{
struct cpu_user_regs *regs = guest_cpu_user_regs();
- /* Restore EAX (clobbered by hypercall) */
- if (copy_from_user(®s->eax, (void __user *)regs->esp, 4))
+ /* Restore EAX (clobbered by hypercall). */
+ if ( copy_from_user(®s->eax, (void __user *)regs->esp, 4) )
domain_crash_synchronous();
regs->esp += 4;
- /* Restore EFLAGS, CS and EIP */
- if (copy_from_user(®s->eip, (void __user *)regs->esp, 12))
+ /* Restore EFLAGS, CS and EIP. */
+ if ( copy_from_user(®s->eip, (void __user *)regs->esp, 12) )
domain_crash_synchronous();
- if (VM86_MODE(regs)) {
- /* return to VM86 mode: restore ESP,SS,ES,DS,FS and GS */
+ if ( VM86_MODE(regs) )
+ {
+ /* Return to VM86 mode: restore ESP,SS,ES,DS,FS and GS. */
if(copy_from_user(®s->esp, (void __user *)(regs->esp+12), 24))
domain_crash_synchronous();
- } else if (RING_0(regs)) {
+ }
+ else if ( RING_0(regs) )
+ {
domain_crash_synchronous();
- } else if (RING_1(regs)) {
- /* return to ring 1: pop EFLAGS,CS and EIP */
+ }
+ else if ( RING_1(regs) ) {
+ /* Return to ring 1: pop EFLAGS,CS and EIP. */
regs->esp += 12;
- } else {
- /* return to ring 2/3: restore ESP and SS */
- if(copy_from_user(®s->esp, (void __user *)(regs->esp+12), 8))
+ }
+ else
+ {
+ /* Return to ring 2/3: restore ESP and SS. */
+ if ( copy_from_user(®s->esp, (void __user *)(regs->esp+12), 8) )
domain_crash_synchronous();
}
- /* Fixup EFLAGS */
+ /* Fixup EFLAGS. */
regs->eflags &= ~X86_EFLAGS_IOPL;
regs->eflags |= X86_EFLAGS_IF;
- /* No longer in NMI context */
+ /* No longer in NMI context. */
clear_bit(_VCPUF_nmi_masked, ¤t->vcpu_flags);
- /* Restore upcall mask from saved value */
+ /* Restore upcall mask from saved value. */
current->vcpu_info->evtchn_upcall_mask = regs->saved_upcall_mask;
- /* the hypercall exit path will overwrite eax
- * with this return value */
+ /*
+ * The hypercall exit path will overwrite EAX with this return
+ * value.
+ */
return regs->eax;
}
long do_iret(void)
{
- struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
struct iret_context iret_saved;
- struct vcpu *v = current;
+ struct vcpu *v = current;
if ( unlikely(copy_from_user(&iret_saved, (void *)regs->rsp, sizeof(iret_saved))) ||
unlikely(pagetable_get_paddr(v->arch.guest_table_user) == 0) )
return -EFAULT;
- /* returning to user mode */
- if ((iret_saved.cs & 0x03) == 3)
+ /* Returning to user mode. */
+ if ( (iret_saved.cs & 0x03) == 3 )
toggle_guest_mode(v);
regs->rip = iret_saved.rip;
regs->rcx = iret_saved.rcx;
}
- /* No longer in NMI context */
+ /* No longer in NMI context. */
clear_bit(_VCPUF_nmi_masked, ¤t->vcpu_flags);
/* Saved %rax gets written back to regs->rax in entry.S. */
uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
/* Bottom of iret stack frame. */
};
-/* For compatibility with HYPERVISOR_switch_to_user which is the old
- * name for HYPERVISOR_iret */
+/*
+ * For compatibility with HYPERVISOR_switch_to_user which is the old
+ * name for HYPERVISOR_iret.
+ */
struct switch_to_user {
/* Top of stack (%rsp at point of hypercall). */
uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;