void write_cr3(unsigned long cr3)
{
- unsigned long flags;
+ unsigned long flags, cr4 = read_cr4();
u32 t;
/* This non-reentrant function is sometimes called in interrupt context. */
hvm_flush_guest_tlbs();
-#ifdef USER_MAPPINGS_ARE_GLOBAL
- {
- unsigned long cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE);
- asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
- write_cr4(cr4);
- }
-#else
+ write_cr4(cr4 & ~X86_CR4_PGE);
asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
-#endif
+ write_cr4(cr4);
post_flush(t);
else
{
u32 t = pre_flush();
+ unsigned long cr4 = read_cr4();
hvm_flush_guest_tlbs();
-#ifndef USER_MAPPINGS_ARE_GLOBAL
- if ( !(flags & FLUSH_TLB_GLOBAL) || !(read_cr4() & X86_CR4_PGE) )
- {
- asm volatile ( "mov %0, %%cr3"
- : : "r" (read_cr3()) : "memory" );
- }
- else
-#endif
- {
- unsigned long cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE);
- barrier();
- write_cr4(cr4);
- }
+ write_cr4(cr4 & ~X86_CR4_PGE);
+ barrier();
+ write_cr4(cr4);
post_flush(t);
}
static void put_superpage(unsigned long mfn);
static uint32_t base_disallow_mask;
-#define L1_DISALLOW_MASK (base_disallow_mask | _PAGE_GNTTAB)
+/* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */
+#define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
#define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE)
#define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ? \
#define L4_DISALLOW_MASK (base_disallow_mask)
-#ifdef USER_MAPPINGS_ARE_GLOBAL
-/* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */
-#undef L1_DISALLOW_MASK
-#define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
-#endif
-
#define l1_disallow_mask(d) \
((d != dom_io) && \
(rangeset_is_empty((d)->iomem_caps) && \
return rc;
}
-#ifdef USER_MAPPINGS_ARE_GLOBAL
#define adjust_guest_l1e(pl1e, d) \
do { \
if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) && \
l1e_add_flags((pl1e), (_PAGE_GLOBAL|_PAGE_USER)); \
} \
} while ( 0 )
-#else
-#define adjust_guest_l1e(pl1e, d) \
- do { \
- if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) && \
- likely(!is_pv_32on64_domain(d)) ) \
- l1e_add_flags((pl1e), _PAGE_USER); \
- } while ( 0 )
-#endif
#define adjust_guest_l2e(pl2e, d) \
do { \
v->arch.flags ^= TF_kernel_mode;
asm volatile ( "swapgs" );
update_cr3(v);
-#ifdef USER_MAPPINGS_ARE_GLOBAL
/* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
-#else
- write_ptbase(v);
-#endif
if ( !(v->arch.flags & TF_kernel_mode) )
return;
/* Bit 22 of a 24-bit flag mask. This corresponds to bit 62 of a pte.*/
#define _PAGE_GNTTAB (1U<<22)
-#define PAGE_HYPERVISOR (__PAGE_HYPERVISOR | _PAGE_GLOBAL)
-#define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
-
-#define USER_MAPPINGS_ARE_GLOBAL
-#ifdef USER_MAPPINGS_ARE_GLOBAL
/*
* Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
* This is needed to distinguish between user and kernel PTEs since _PAGE_USER
* is asserted for both.
*/
#define _PAGE_GUEST_KERNEL (1U<<12)
-#else
-#define _PAGE_GUEST_KERNEL 0
-#endif
+
+#define PAGE_HYPERVISOR (__PAGE_HYPERVISOR | _PAGE_GLOBAL)
+#define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
#endif /* __X86_64_PAGE_H__ */