(unsigned long *)regs->rsp :
(unsigned long *)n->arch.kernel_sp;
+ /* Set the kernel-mode indicator byte at the top of RFLAGS. */
+ ((char *)regs->rflags)[7] = !!(n->arch.flags & TF_kernel_mode);
+
+ if ( !(n->arch.flags & TF_kernel_mode) )
+ toggle_guest_mode(n);
+
if ( put_user(regs->ss, rsp- 1) |
put_user(regs->rsp, rsp- 2) |
put_user(regs->rflags, rsp- 3) |
domain_crash();
}
- if ( !(n->arch.flags & TF_kernel_mode) )
- toggle_guest_mode(n);
-
regs->entry_vector = TRAP_syscall;
regs->rflags &= 0xFFFCBEFFUL;
regs->ss = __GUEST_SS;
loaddebug(&next_p->arch, 7);
}
-#ifdef CONFIG_VMX
if ( VMX_DOMAIN(next_p) )
{
- /* Switch page tables. */
write_ptbase(next_p);
-
set_current(next_p);
- /* Switch GDT and LDT. */
__asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->arch.gdt));
-
__sti();
goto done;
}
-#endif
SET_FAST_TRAP(&next_p->arch);
struct domain *d = ed->domain;
unsigned long l1e;
+#if defined(__x86_64__)
+ /* If in user mode, switch to kernel mode just to read LDT mapping. */
+ extern void toggle_guest_mode(struct exec_domain *);
+ int user_mode = !(ed->arch.flags & TF_kernel_mode);
+#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(ed)
+#elif defined(__i386__)
+#define TOGGLE_MODE() ((void)0)
+#endif
+
if ( unlikely(in_irq()) )
BUG();
+ TOGGLE_MODE();
__get_user(l1e, (unsigned long *)
&linear_pg_table[l1_linear_offset(ed->arch.ldt_base) + off]);
+ TOGGLE_MODE();
if ( unlikely(!(l1e & _PAGE_PRESENT)) ||
unlikely(!get_page_and_type(
ENTRY(vmx_asm_do_resume)
vmx_test_all_events:
GET_CURRENT(%ebx)
-/* test_all_events: */
+/*test_all_events:*/
xorl %ecx,%ecx
notl %ecx
cli # tests must not race interrupts
ALIGN
restore_all_guest:
- btr $_TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
- jc failsafe_callback
testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
jnz restore_all_vm86
FLT1: movl XREGS_ds(%esp),%ds
jmp error_code
DBLFLT1:GET_CURRENT(%ebx)
jmp test_all_events
-DBLFIX1:GET_CURRENT(%ebx)
- bts $_TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
- jc domain_crash # cannot reenter failsafe code
- jmp test_all_events # will return via failsafe code
+failsafe_callback:
+ GET_CURRENT(%ebx)
+ leal EDOMAIN_trap_bounce(%ebx),%edx
+ movl EDOMAIN_failsafe_addr(%ebx),%eax
+ movl EDOMAIN_failsafe_sel(%ebx),%eax
+ movw %ax,TRAPBOUNCE_cs(%edx)
+ movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
+ call create_bounce_frame
+ xorl %eax,%eax
+ movl %eax,XREGS_ds(%esp)
+ movl %eax,XREGS_es(%esp)
+ movl %eax,XREGS_fs(%esp)
+ movl %eax,XREGS_gs(%esp)
+ jmp test_all_events
.previous
.section __pre_ex_table,"a"
.long FLT1,FIX1
.long FLT5,FIX5
.previous
.section __ex_table,"a"
- .long DBLFLT1,DBLFIX1
-.previous
-
-/* No special register assumptions */
-failsafe_callback:
- GET_CURRENT(%ebx)
- leal EDOMAIN_trap_bounce(%ebx),%edx
- movl EDOMAIN_failsafe_addr(%ebx),%eax
- movl %eax,TRAPBOUNCE_eip(%edx)
- movl EDOMAIN_failsafe_sel(%ebx),%eax
- movw %ax,TRAPBOUNCE_cs(%edx)
- movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
- call create_bounce_frame
- popl %ebx
- popl %ecx
- popl %edx
- popl %esi
- popl %edi
- popl %ebp
- popl %eax
- addl $4,%esp
-FLT6: iret
-.section .fixup,"ax"
-FIX6: pushl %ebx
- GET_CURRENT(%ebx)
- orb $TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
- pop %ebx
- jmp FIX5
-.previous
-.section __pre_ex_table,"a"
- .long FLT6,FIX6
+ .long DBLFLT1,failsafe_callback
.previous
ALIGN
testb $0xFF,VCPUINFO_upcall_pending(%eax)
jz restore_all_guest
/*process_guest_events:*/
+ sti
leal EDOMAIN_trap_bounce(%ebx),%edx
movl EDOMAIN_event_addr(%ebx),%eax
movl %eax,TRAPBOUNCE_eip(%edx)
call create_bounce_frame
movl EDOMAIN_vcpu_info(%ebx),%eax
movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
- jmp restore_all_guest
+ jmp test_all_events
ALIGN
process_softirqs:
addl %ecx,%eax
addl $init_tss + 12,%eax
movl (%eax),%esi /* tss->esp1 */
-FLT7: movl 4(%eax),%gs /* tss->ss1 */
+FLT6: movl 4(%eax),%gs /* tss->ss1 */
testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
jz nvm86_1
- subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
- movl XREGS_es+4(%esp),%eax
-FLT8: movl %eax,%gs:(%esi)
- movl XREGS_ds+4(%esp),%eax
-FLT9: movl %eax,%gs:4(%esi)
- movl XREGS_fs+4(%esp),%eax
-FLT10: movl %eax,%gs:8(%esi)
- movl XREGS_gs+4(%esp),%eax
-FLT11: movl %eax,%gs:12(%esi)
+ subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
+ movl XREGS_es+4(%esp),%eax
+FLT7: movl %eax,%gs:(%esi)
+ movl XREGS_ds+4(%esp),%eax
+FLT8: movl %eax,%gs:4(%esi)
+ movl XREGS_fs+4(%esp),%eax
+FLT9: movl %eax,%gs:8(%esi)
+ movl XREGS_gs+4(%esp),%eax
+FLT10: movl %eax,%gs:12(%esi)
nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
movl XREGS_esp+4(%esp),%eax
-FLT12: movl %eax,%gs:(%esi)
+FLT11: movl %eax,%gs:(%esi)
movl XREGS_ss+4(%esp),%eax
-FLT13: movl %eax,%gs:4(%esi)
+FLT12: movl %eax,%gs:4(%esi)
jmp 1f
ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
movl XREGS_esp+4(%esp),%esi
-FLT14: movl XREGS_ss+4(%esp),%gs
+FLT13: movl XREGS_ss+4(%esp),%gs
1: /* Construct a stack frame: EFLAGS, CS/EIP */
subl $12,%esi
movl XREGS_eip+4(%esp),%eax
-FLT15: movl %eax,%gs:(%esi)
+FLT14: movl %eax,%gs:(%esi)
movl XREGS_cs+4(%esp),%eax
-FLT16: movl %eax,%gs:4(%esi)
+FLT15: movl %eax,%gs:4(%esi)
movl XREGS_eflags+4(%esp),%eax
-FLT17: movl %eax,%gs:8(%esi)
+FLT16: movl %eax,%gs:8(%esi)
movb TRAPBOUNCE_flags(%edx),%cl
test $TBF_EXCEPTION_ERRCODE,%cl
jz 1f
subl $4,%esi # push error_code onto guest frame
movl TRAPBOUNCE_error_code(%edx),%eax
-FLT18: movl %eax,%gs:(%esi)
+FLT17: movl %eax,%gs:(%esi)
testb $TBF_EXCEPTION_CR2,%cl
jz 2f
subl $4,%esi # push %cr2 onto guest frame
movl TRAPBOUNCE_cr2(%edx),%eax
-FLT19: movl %eax,%gs:(%esi)
+FLT18: movl %eax,%gs:(%esi)
1: testb $TBF_FAILSAFE,%cl
jz 2f
subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
jz nvm86_2
xorl %eax,%eax # VM86: we write zero selector values
-FLT20: movl %eax,%gs:(%esi)
-FLT21: movl %eax,%gs:4(%esi)
-FLT22: movl %eax,%gs:8(%esi)
-FLT23: movl %eax,%gs:12(%esi)
+FLT19: movl %eax,%gs:(%esi)
+FLT20: movl %eax,%gs:4(%esi)
+FLT21: movl %eax,%gs:8(%esi)
+FLT22: movl %eax,%gs:12(%esi)
jmp 2f
nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
-FLT24: movl %eax,%gs:(%esi)
+FLT23: movl %eax,%gs:(%esi)
movl XREGS_es+4(%esp),%eax
-FLT25: movl %eax,%gs:4(%esi)
+FLT24: movl %eax,%gs:4(%esi)
movl XREGS_fs+4(%esp),%eax
-FLT26: movl %eax,%gs:8(%esi)
+FLT25: movl %eax,%gs:8(%esi)
movl XREGS_gs+4(%esp),%eax
-FLT27: movl %eax,%gs:12(%esi)
-2: movb $0,TRAPBOUNCE_flags(%edx)
- testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
+FLT26: movl %eax,%gs:12(%esi)
+2: testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
jz nvm86_3
xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
movl %eax,XREGS_ds+4(%esp)
movl %eax,XREGS_cs+4(%esp)
movl TRAPBOUNCE_eip(%edx),%eax
movl %eax,XREGS_eip+4(%esp)
+ movb $0,TRAPBOUNCE_flags(%edx)
ret
-.section .fixup,"ax"
-FIX7: sti
- popl %esi
- addl $4,%esp # Discard create_b_frame return address
- pushfl # EFLAGS
- movl $__HYPERVISOR_CS,%eax
- pushl %eax # CS
- movl $DBLFLT2,%eax
- pushl %eax # EIP
- pushl %esi # error_code/entry_vector
- jmp error_code
-DBLFLT2:jmp process_guest_exception_and_events
-.previous
-.section __pre_ex_table,"a"
- .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
- .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
- .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
- .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
- .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
-.previous
.section __ex_table,"a"
- .long DBLFLT2,domain_crash
+ .long FLT6,domain_crash , FLT7,domain_crash , FLT8,domain_crash
+ .long FLT9,domain_crash , FLT10,domain_crash , FLT11,domain_crash
+ .long FLT12,domain_crash , FLT13,domain_crash , FLT14,domain_crash
+ .long FLT15,domain_crash , FLT16,domain_crash , FLT17,domain_crash
+ .long FLT18,domain_crash , FLT19,domain_crash , FLT20,domain_crash
+ .long FLT21,domain_crash , FLT22,domain_crash , FLT23,domain_crash
+ .long FLT24,domain_crash , FLT25,domain_crash , FLT26,domain_crash
.previous
ALIGN
leal EDOMAIN_trap_bounce(%ebx),%edx
testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
jz test_all_events
- cli # create_bounce_frame needs CLI for pre-exceptions to work
call create_bounce_frame
jmp test_all_events
ALIGN
/* %rbx: struct exec_domain */
restore_all_guest:
- btr $_TF_failsafe_return,EDOMAIN_thread_flags(%rbx)
- jc failsafe_callback
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
jz iret_exit_to_guest
jmp error_code
DBLFLT1:GET_CURRENT(%rbx)
jmp test_all_events
-DBLFIX1:GET_CURRENT(%rbx)
- bts $_TF_failsafe_return,EDOMAIN_thread_flags(%rbx)
- jc domain_crash # cannot reenter failsafe code
- jmp test_all_events # will return via failsafe code
+failsafe_callback:
+ GET_CURRENT(%rbx)
+ leaq EDOMAIN_trap_bounce(%rbx),%rdx
+ movq EDOMAIN_failsafe_addr(%rbx),%rax
+ movq %rax,TRAPBOUNCE_eip(%rdx)
+ movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
+ call create_bounce_frame
+ jmp test_all_events
.previous
.section __pre_ex_table,"a"
.quad FLT1,FIX1
.previous
.section __ex_table,"a"
- .quad DBLFLT1,DBLFIX1
-.previous
-
-/* No special register assumptions */
-failsafe_callback:
- GET_CURRENT(%rbx)
- leaq EDOMAIN_trap_bounce(%rbx),%rdx
- movq EDOMAIN_failsafe_addr(%rbx),%rax
- movq %rax,TRAPBOUNCE_eip(%rdx)
- movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
- call create_bounce_frame
- RESTORE_ALL
- addq $8,%rsp
-FLT2: iret
-
-.section .fixup,"ax"
-FIX2: pushq %rbx
- GET_CURRENT(%rbx)
- orb $TF_failsafe_return,EDOMAIN_thread_flags(%rbx)
- popq %rbx
- jmp FIX1
-.previous
-.section __pre_ex_table,"a"
- .quad FLT2,FIX2
+ .quad DBLFLT1,failsafe_callback
.previous
ALIGN
pushq $0
movl $TRAP_syscall,4(%rsp)
SAVE_ALL
+ sti
GET_CURRENT(%rbx)
testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
jnz hypercall
/* %rbx: struct exec_domain */
hypercall:
- sti
movq %r10,%rcx
andq $(NR_hypercalls-1),%rax
leaq SYMBOL_NAME(hypercall_table)(%rip),%r10
testb $0xFF,VCPUINFO_upcall_pending(%rax)
jz restore_all_guest
/*process_guest_events:*/
+ sti
leaq EDOMAIN_trap_bounce(%rbx),%rdx
movq EDOMAIN_event_addr(%rbx),%rax
movq %rax,TRAPBOUNCE_eip(%rdx)
call create_bounce_frame
movq EDOMAIN_vcpu_info(%rbx),%rax
movb $1,VCPUINFO_upcall_mask(%rax) # Upcalls masked during delivery
- jmp restore_all_guest
+ jmp test_all_events
ALIGN
/* %rbx: struct exec_domain */
sti
call SYMBOL_NAME(do_softirq)
jmp test_all_events
-
+
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
/* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
/* %rdx: trap_bounce, %rbx: struct exec_domain */
/* Push new frame at existing %rsp if already in guest-OS mode. */
movq XREGS_rsp+8(%rsp),%rsi
testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
+ /* Set kernel-mode indicator byte (RFLAGS[63:56]). */
+ setnz XREGS_eflags+15(%rsp)
jnz 1f
/* Push new frame at registered guest-OS stack base. */
+ /* Then call to C: toggle_guest_mode(current) */
movq EDOMAIN_kernel_sp(%rbx),%rsi
+ movq %rbx,%rdi
+ pushq %rdx
+ call SYMBOL_NAME(toggle_guest_mode)
+ popq %rdx
1: movq $HYPERVISOR_VIRT_START,%rax
cmpq %rax,%rsi
jb 1f # In +ve address space? Then okay.
jb domain_crash # Above Xen private area? Then okay.
1: subq $40,%rsi
movq XREGS_ss+8(%rsp),%rax
-FLT3: movq %rax,32(%rsi) # SS
+FLT2: movq %rax,32(%rsi) # SS
movq XREGS_rsp+8(%rsp),%rax
-FLT4: movq %rax,24(%rsi) # RSP
+FLT3: movq %rax,24(%rsi) # RSP
movq XREGS_eflags+8(%rsp),%rax
-FLT5: movq %rax,16(%rsi) # RFLAGS
+FLT4: movq %rax,16(%rsi) # RFLAGS
movq XREGS_cs+8(%rsp),%rax
-FLT6: movq %rax,8(%rsi) # CS
+FLT5: movq %rax,8(%rsi) # CS
movq XREGS_rip+8(%rsp),%rax
-FLT7: movq %rax,(%rsi) # RIP
+FLT6: movq %rax,(%rsi) # RIP
movb TRAPBOUNCE_flags(%rdx),%cl
testb $TBF_EXCEPTION_ERRCODE,%cl
jz 1f
subq $8,%rsi
- movq TRAPBOUNCE_error_code(%rdx),%rax
-FLT8: movq %rax,(%rsi) # ERROR CODE
+ movl TRAPBOUNCE_error_code(%rdx),%eax
+FLT7: movq %rax,(%rsi) # ERROR CODE
testb $TBF_EXCEPTION_CR2,%cl
jz 2f
subq $8,%rsi
movq TRAPBOUNCE_cr2(%rdx),%rax
-FLT9: movq %rax,(%rsi) # CR2
+FLT8: movq %rax,(%rsi) # CR2
1: testb $TBF_FAILSAFE,%cl
jz 2f
subq $32,%rsi
movl %gs,%eax
-FLT10: movq %rax,24(%rsi) # GS
+FLT9: movq %rax,24(%rsi) # GS
movl %fs,%eax
-FLT11: movq %rax,16(%rsi) # FS
+FLT10: movq %rax,16(%rsi) # FS
movl %es,%eax
-FLT12: movq %rax,8(%rsi) # ES
+FLT11: movq %rax,8(%rsi) # ES
movl %ds,%eax
-FLT13: movq %rax,(%rsi) # DS
+FLT12: movq %rax,(%rsi) # DS
2: subq $16,%rsi
movq XREGS_r11+8(%rsp),%rax
-FLT14: movq %rax,(%rsi) # R11
+FLT13: movq %rax,(%rsi) # R11
movq XREGS_rcx+8(%rsp),%rax
-FLT15: movq %rax,(%rsi) # RCX
+FLT14: movq %rax,(%rsi) # RCX
/* Rewrite our stack frame and return to guest-OS mode. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
- movl $TRAP_syscall,XREGS_entry_vector+8(%rsp)
+ movq $TRAP_syscall,XREGS_entry_vector+8(%rsp)
andl $0xfffcbeff,XREGS_eflags+8(%rsp)
- movl $__GUEST_SS,XREGS_ss+8(%rsp)
+ movq $__GUEST_SS,XREGS_ss+8(%rsp)
movq %rsi,XREGS_rsp+8(%rsp)
- movl $__GUEST_CS,XREGS_cs+8(%rsp)
+ movq $__GUEST_CS,XREGS_cs+8(%rsp)
movq TRAPBOUNCE_eip(%rdx),%rax
movq %rax,XREGS_rip+8(%rsp)
movb $0,TRAPBOUNCE_flags(%rdx)
- testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
- movq %rbx,%rdi # toggle_guest_mode(current)
- jz SYMBOL_NAME(toggle_guest_mode)
ret
-.section .fixup,"ax"
-FIX3: sti
- popq %rsi
- addq $8,%rsp # Discard create_b_frame return address
- pushq $__HYPERVISOR_DS # SS
- leaq 8(%rsp),%rax
- pushq %rax # RSP
- pushf # RFLAGS
- pushq $__HYPERVISOR_CS # CS
- leaq DBLFLT2(%rip),%rax
- pushq %rax # RIP
- pushq %rsi # error_code/entry_vector
- jmp error_code
-DBLFLT2:jmp process_guest_exception_and_events
-.previous
-.section __pre_ex_table,"a"
- .quad FLT3,FIX3 , FLT4,FIX3 , FLT5,FIX3 , FLT6,FIX3
- .quad FLT7,FIX3 , FLT8,FIX3 , FLT9,FIX3 , FLT10,FIX3
- .quad FLT11,FIX3 , FLT12,FIX3 , FLT13,FIX3 , FLT14,FIX3 , FLT15,FIX3
-.previous
.section __ex_table,"a"
- .quad DBLFLT2,domain_crash
+ .quad FLT2,domain_crash , FLT3,domain_crash , FLT4,domain_crash
+ .quad FLT5,domain_crash , FLT6,domain_crash , FLT7,domain_crash
+ .quad FLT8,domain_crash , FLT9,domain_crash , FLT10,domain_crash
+ .quad FLT11,domain_crash , FLT12,domain_crash , FLT13,domain_crash
+ .quad FLT14,domain_crash
.previous
ALIGN
leaq EDOMAIN_trap_bounce(%rbx),%rdx
testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
jz test_all_events
- cli # create_bounce_frame needs CLI for pre-exceptions to work
call create_bounce_frame
jmp test_all_events
#define TRAP_deferred_nmi 31
/* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
-#define TRAP_syscall 256 /* NB. Same as ECF_IN_SYSCALL */
+/* NB. Same as ECF_IN_SYSCALL. No bits in common with any other TRAP_* defn. */
+#define TRAP_syscall 256
/*
* Non-fatal fault/trap handlers return an error code to the caller. If the
#define TBF_INTERRUPT 8
#define TBF_FAILSAFE 16
-/* arch_exec_domain' flags values */
-#define _TF_failsafe_return 0
-#define _TF_kernel_mode 1
-#define TF_failsafe_return (1<<_TF_failsafe_return)
+/* 'arch_exec_domain' flags values */
+#define _TF_kernel_mode 0
#define TF_kernel_mode (1<<_TF_kernel_mode)
#ifndef __ASSEMBLY__
u32 edi;
u32 ebp;
u32 eax;
- u16 error_code; /* private */
- union {
- u16 entry_vector; /* private */
- u16 flags;
- } PACKED;
+ u16 error_code; /* private */
+ u16 entry_vector; /* private */
u32 eip;
u32 cs;
u32 eflags;
* Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
* All other registers are saved on hypercall entry and restored to user.
*/
+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
+#define ECF_IN_SYSCALL (1<<8)
struct switch_to_user {
/* Top of stack (%rsp at point of hypercall). */
u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
union { u64 rdx, edx; } PACKED;
union { u64 rsi, esi; } PACKED;
union { u64 rdi, edi; } PACKED;
- u32 error_code; /* private */
- union {
- u32 entry_vector; /* private */
-#define ECF_IN_SYSCALL (1<<8) /* Guest synchronously interrupted by SYSCALL? */
- u32 flags;
- } PACKED;
+ u32 error_code; /* private */
+ u32 entry_vector; /* private */
union { u64 rip, eip; } PACKED;
u64 cs;
union { u64 rflags, eflags; } PACKED;