free_vcpu_guest_context(NULL);
return NULL;
}
- __set_fixmap(idx - i, page_to_mfn(pg), __PAGE_HYPERVISOR);
+ __set_fixmap(idx - i, page_to_mfn(pg), __PAGE_HYPERVISOR_RW);
per_cpu(vgc_pages[i], cpu) = pg;
}
return (void *)fix_to_virt(idx);
spin_unlock(&dcache->lock);
- l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+ l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR_RW));
out:
local_irq_restore(flags);
for ( i = 0; i < nr_pages; i++ )
{
v->arch.pv_vcpu.gdt_frames[i] = frames[i];
- l1e_write(&pl1e[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR));
+ l1e_write(&pl1e[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR_RW));
}
xfree(pfns);
if ( !IS_NIL(ppg) )
*ppg++ = pg;
l1tab[l1_table_offset(va)] =
- l1e_from_page(pg, __PAGE_HYPERVISOR | _PAGE_AVAIL0);
+ l1e_from_page(pg, __PAGE_HYPERVISOR_RW | _PAGE_AVAIL0);
l2e_add_flags(*pl2e, _PAGE_AVAIL0);
}
else
(unsigned long)__va(start),
start >> PAGE_SHIFT,
(__pa(&_end) + PAGE_SIZE - 1 - start) >> PAGE_SHIFT,
- __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
+ __PAGE_HYPERVISOR_RW|MAP_SMALL_PAGES);
BUG_ON(start != xen_phys_start);
map_pages_to_xen(
XEN_VIRT_START,
{
unsigned long _p = (unsigned long)p;
unsigned long _l = (unsigned long)l;
- unsigned int flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
+ unsigned int flags = __PAGE_HYPERVISOR_RW | MAP_SMALL_PAGES;
/* Ensure we are dealing with a page-aligned whole number of pages. */
ASSERT((_p&~PAGE_MASK) == 0);
/* The only data mappings to be relocated are in the Xen area. */
pl2e = __va(__pa(l2_xenmap));
*pl2e++ = l2e_from_pfn(xen_phys_start >> PAGE_SHIFT,
- PAGE_HYPERVISOR | _PAGE_PSE);
+ PAGE_HYPERVISOR_RWX | _PAGE_PSE);
for ( i = 1; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
{
if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
/* This range must not be passed to the boot allocator and
* must also not be mapped with _PAGE_GLOBAL. */
map_pages_to_xen((unsigned long)__va(map_e), PFN_DOWN(map_e),
- PFN_DOWN(e - map_e), __PAGE_HYPERVISOR);
+ PFN_DOWN(e - map_e), __PAGE_HYPERVISOR_RW);
}
if ( s < map_s )
{
if ( cpu_has_smap )
write_cr4(read_cr4() & ~X86_CR4_SMAP);
+ printk("%sNX (Execute Disable) protection %sactive\n",
+ cpu_has_nx ? XENLOG_INFO : XENLOG_WARNING "Warning: ",
+ cpu_has_nx ? "" : "not ");
+
/*
* We're going to setup domain0 using the module(s) that we stashed safely
* above our heap. The second module, if present, is an initrd ramdisk.
share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
}
}
+
+ /* Mark low 16Mb of direct map NX if hardware supports it. */
+ if ( !cpu_has_nx )
+ return;
+
+ v = DIRECTMAP_VIRT_START + (1UL << 20);
+ l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[l3_table_offset(v)];
+ ASSERT(l3e_get_flags(l3e) & _PAGE_PRESENT);
+ do {
+ l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
+ ASSERT(l2e_get_flags(l2e) & _PAGE_PRESENT);
+ if ( l2e_get_flags(l2e) & _PAGE_PSE )
+ {
+ l2e_add_flags(l2e, _PAGE_NX_BIT);
+ l3e_to_l2e(l3e)[l2_table_offset(v)] = l2e;
+ v += 1 << L2_PAGETABLE_SHIFT;
+ }
+ else
+ {
+ l1_pgentry_t l1e = l2e_to_l1e(l2e)[l1_table_offset(v)];
+
+ ASSERT(l1e_get_flags(l1e) & _PAGE_PRESENT);
+ l1e_add_flags(l1e, _PAGE_NX_BIT);
+ l2e_to_l1e(l2e)[l1_table_offset(v)] = l1e;
+ v += 1 << L1_PAGETABLE_SHIFT;
+ }
+ } while ( v < DIRECTMAP_VIRT_START + (16UL << 20) );
}
long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
if ( i < spfn )
i = spfn;
ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), i,
- epfn - i, __PAGE_HYPERVISOR);
+ epfn - i, __PAGE_HYPERVISOR_RW);
if ( ret )
return ret;
}
EFI_MEMORY_DESCRIPTOR *desc = efi_memmap + i;
u64 len = desc->NumberOfPages << EFI_PAGE_SHIFT;
unsigned long smfn, emfn;
- unsigned int prot = PAGE_HYPERVISOR;
+ unsigned int prot = PAGE_HYPERVISOR_RWX;
printk(XENLOG_INFO " %013" PRIx64 "-%013" PRIx64
" type=%u attr=%016" PRIx64 "\n",
if ( desc->Attribute & EFI_MEMORY_WP )
prot &= _PAGE_RW;
if ( desc->Attribute & EFI_MEMORY_XP )
- prot |= _PAGE_NX_BIT;
+ prot |= _PAGE_NX;
if ( pfn_to_pdx(emfn - 1) < (DIRECTMAP_SIZE >> PAGE_SHIFT) &&
!(smfn & pfn_hole_mask) &&
#define _PAGE_AVAIL1 _AC(0x400,U)
#define _PAGE_AVAIL2 _AC(0x800,U)
#define _PAGE_AVAIL _AC(0xE00,U)
-#define _PAGE_PSE_PAT _AC(0x1000,U)
+#define _PAGE_PSE_PAT _AC(0x1000,U)
+#define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0)
/* non-architectural flags */
#define _PAGE_PAGED 0x2000U
#define _PAGE_SHARED 0x4000U
#define _PAGE_GNTTAB 0
#endif
+#define __PAGE_HYPERVISOR_RO (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NX)
+#define __PAGE_HYPERVISOR_RW (__PAGE_HYPERVISOR_RO | \
+ _PAGE_DIRTY | _PAGE_RW)
#define __PAGE_HYPERVISOR_RX (_PAGE_PRESENT | _PAGE_ACCESSED)
#define __PAGE_HYPERVISOR (__PAGE_HYPERVISOR_RX | \
_PAGE_DIRTY | _PAGE_RW)
*/
#define _PAGE_GUEST_KERNEL (1U<<12)
-#define PAGE_HYPERVISOR (__PAGE_HYPERVISOR | _PAGE_GLOBAL)
+#define PAGE_HYPERVISOR_RO (__PAGE_HYPERVISOR_RO | _PAGE_GLOBAL)
+#define PAGE_HYPERVISOR_RW (__PAGE_HYPERVISOR_RW | _PAGE_GLOBAL)
#define PAGE_HYPERVISOR_RX (__PAGE_HYPERVISOR_RX | _PAGE_GLOBAL)
-#define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
+#define PAGE_HYPERVISOR_RWX (__PAGE_HYPERVISOR | _PAGE_GLOBAL)
+
+#ifdef __ASSEMBLY__
+/* Dependency on NX being available can't be expressed. */
+# define PAGE_HYPERVISOR PAGE_HYPERVISOR_RWX
+# define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
+#else
+# define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW
+# define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | \
+ _PAGE_GLOBAL | _PAGE_NX)
+#endif
#endif /* __X86_64_PAGE_H__ */