OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv_vcpu.kernel_ss);
OFFSET(VCPU_iopl, struct vcpu, arch.pv_vcpu.iopl);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
+ OFFSET(VCPU_cr3, struct vcpu, arch.cr3);
OFFSET(VCPU_arch_msr, struct vcpu, arch.msr);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
mov VCPUMSR_spec_ctrl_raw(%rdx), %r15d
/* Copy guest mappings and switch to per-CPU root page table. */
- mov %cr3, %r9
+ mov VCPU_cr3(%rbx), %r9
GET_STACK_END(dx)
mov STACK_CPUINFO_FIELD(pv_cr3)(%rdx), %rdi
movabs $PADDR_MASK & PAGE_MASK, %rsi
sub $(ROOT_PAGETABLE_FIRST_XEN_SLOT - \
ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rdi
rep movsq
+ mov STACK_CPUINFO_FIELD(cr4)(%rdx), %rdi
mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
- write_cr3 rax, rdi, rsi
+ mov %rdi, %rsi
+ and $~X86_CR4_PGE, %rdi
+ mov %rdi, %cr4
+ mov %rax, %cr3
+ mov %rsi, %cr4
.Lrag_keep_cr3:
/* Restore stashed SPEC_CTRL value. */
* so "g" will have to do.
*/
UNLIKELY_START(g, exit_cr3)
- write_cr3 rax, rdi, rsi
+ mov %cr4, %rdi
+ mov %rdi, %rsi
+ and $~X86_CR4_PGE, %rdi
+ mov %rdi, %cr4
+ mov %rax, %cr3
+ mov %rsi, %cr4
UNLIKELY_END(exit_cr3)
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
#define ASM_STAC ASM_AC(STAC)
#define ASM_CLAC ASM_AC(CLAC)
-.macro write_cr3 val:req, tmp1:req, tmp2:req
- mov %cr4, %\tmp1
- mov %\tmp1, %\tmp2
- and $~X86_CR4_PGE, %\tmp1
- mov %\tmp1, %cr4
- mov %\val, %cr3
- mov %\tmp2, %cr4
-.endm
-
#define CR4_PV32_RESTORE \
667: ASM_NOP5; \
.pushsection .altinstr_replacement, "ax"; \