{
/*
* Force other vCPU-s of the affected guest to pick up L4 entry
- * changes (if any). Issue a flush IPI with empty operation mask to
- * facilitate this (including ourselves waiting for the IPI to
- * actually have arrived). Utilize the fact that FLUSH_VA_VALID is
- * meaningless without FLUSH_CACHE, but will allow to pass the no-op
- * check in flush_area_mask().
+ * changes (if any).
*/
unsigned int cpu = smp_processor_id();
cpumask_t *mask = per_cpu(scratch_cpumask, cpu);
cpumask_andnot(mask, pt_owner->dirty_cpumask, cpumask_of(cpu));
if ( !cpumask_empty(mask) )
- flush_area_mask(mask, ZERO_BLOCK_PTR, FLUSH_VA_VALID);
+ flush_mask(mask, FLUSH_TLB_GLOBAL);
}
perfc_add(num_page_updates, i);
}
pl1e += l1_table_offset(linear);
+ flags &= ~_PAGE_GLOBAL;
if ( l1e_get_flags(*pl1e) & _PAGE_PRESENT )
{
if ( rc )
panic("Error %d setting up PV root page table\n", rc);
if ( per_cpu(root_pgt, 0) )
+ {
get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
+ /*
+ * All entry points which may need to switch page tables have to start
+ * with interrupts off. Re-write what pv_trap_init() has put there.
+ */
+ _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_irq_gate, 3,
+ &int80_direct_trap);
+ }
+
set_nr_sockets();
socket_cpumask = xzalloc_array(cpumask_t *, nr_sockets);
/* See lstar_enter for entry register state. */
ENTRY(cstar_enter)
- sti
+ /* sti could live here when we don't switch page tables below. */
CR4_PV32_RESTORE
movq 8(%rsp),%rax /* Restore %rax. */
movq $FLAT_KERNEL_SS,8(%rsp)
jz .Lcstar_cr3_okay
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
neg %rcx
- write_cr3 rcx, rdi, rsi
+ mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lcstar_cr3_okay:
+ sti
movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
movq VCPU_domain(%rbx),%rcx
* %ss must be saved into the space left by the trampoline.
*/
ENTRY(lstar_enter)
- sti
+ /* sti could live here when we don't switch page tables below. */
movq 8(%rsp),%rax /* Restore %rax. */
movq $FLAT_KERNEL_SS,8(%rsp)
pushq %r11
jz .Llstar_cr3_okay
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
neg %rcx
- write_cr3 rcx, rdi, rsi
+ mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Llstar_cr3_okay:
+ sti
movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
jmp test_all_events
ENTRY(sysenter_entry)
- sti
+ /* sti could live here when we don't switch page tables below. */
pushq $FLAT_USER_SS
pushq $0
pushfq
jz .Lsyse_cr3_okay
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
neg %rcx
- write_cr3 rcx, rdi, rsi
+ mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lsyse_cr3_okay:
+ sti
movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
cmpb $0,VCPU_sysenter_disables_events(%rbx)
jz .Lint80_cr3_okay
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
neg %rcx
- write_cr3 rcx, rdi, rsi
+ mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lint80_cr3_okay:
+ sti
cmpb $0,untrusted_msi(%rip)
UNLIKELY_START(ne, msi_check)
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
neg %rcx
.Lintr_cr3_load:
- write_cr3 rcx, rdi, rsi
+ mov %rcx, %cr3
xor %ecx, %ecx
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
testb $3, UREGS_cs(%rsp)
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
neg %rcx
.Lxcpt_cr3_load:
- write_cr3 rcx, rdi, rsi
+ mov %rcx, %cr3
xor %ecx, %ecx
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
testb $3, UREGS_cs(%rsp)
jns .Ldblf_cr3_load
neg %rbx
.Ldblf_cr3_load:
- write_cr3 rbx, rdi, rsi
+ mov %rbx, %cr3
.Ldblf_cr3_okay:
movq %rsp,%rdi
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
neg %rcx
.List_cr3_load:
- write_cr3 rcx, rdi, rsi
+ mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
.List_cr3_okay: