x86: switch default mapping attributes to non-executable
authorJan Beulich <jbeulich@suse.com>
Fri, 22 May 2015 08:50:14 +0000 (10:50 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 22 May 2015 08:50:14 +0000 (10:50 +0200)
Only a very limited subset of mappings need to be done as executable
ones; in particular the direct mapping should not be executable to
limit the damage attackers can cause by exploiting security relevant
bugs.

The EFI change at once includes an adjustment to set NX only when
supported by the hardware.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/domain.c
xen/arch/x86/domain_page.c
xen/arch/x86/mm.c
xen/arch/x86/setup.c
xen/arch/x86/x86_64/mm.c
xen/common/efi/boot.c
xen/include/asm-x86/page.h
xen/include/asm-x86/x86_64/page.h

index 6e9464c77bc5644e5e19c7000e8e74ca78ade5da..db073a61d1243e32e8ad7176f5c830293a4edc4d 100644 (file)
@@ -293,7 +293,7 @@ struct vcpu_guest_context *alloc_vcpu_guest_context(void)
             free_vcpu_guest_context(NULL);
             return NULL;
         }
-        __set_fixmap(idx - i, page_to_mfn(pg), __PAGE_HYPERVISOR);
+        __set_fixmap(idx - i, page_to_mfn(pg), __PAGE_HYPERVISOR_RW);
         per_cpu(vgc_pages[i], cpu) = pg;
     }
     return (void *)fix_to_virt(idx);
index 158a1645d74fa9c38e6417b2c40f616db4d4017f..5f6f3974c31be88f5d42e4f86154e5d609c4e35e 100644 (file)
@@ -160,7 +160,7 @@ void *map_domain_page(unsigned long mfn)
 
     spin_unlock(&dcache->lock);
 
-    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR_RW));
 
  out:
     local_irq_restore(flags);
index 74e146d0275d81895f4be4fc33f3614e22d15a1e..7a7a85402dcc7c4da070d652fd8a3cc985357d65 100644 (file)
@@ -4412,7 +4412,7 @@ long set_gdt(struct vcpu *v,
     for ( i = 0; i < nr_pages; i++ )
     {
         v->arch.pv_vcpu.gdt_frames[i] = frames[i];
-        l1e_write(&pl1e[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR));
+        l1e_write(&pl1e[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR_RW));
     }
 
     xfree(pfns);
@@ -6003,7 +6003,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
                 if ( !IS_NIL(ppg) )
                     *ppg++ = pg;
                 l1tab[l1_table_offset(va)] =
-                    l1e_from_page(pg, __PAGE_HYPERVISOR | _PAGE_AVAIL0);
+                    l1e_from_page(pg, __PAGE_HYPERVISOR_RW | _PAGE_AVAIL0);
                 l2e_add_flags(*pl2e, _PAGE_AVAIL0);
             }
             else
@@ -6132,7 +6132,7 @@ void memguard_init(void)
         (unsigned long)__va(start),
         start >> PAGE_SHIFT,
         (__pa(&_end) + PAGE_SIZE - 1 - start) >> PAGE_SHIFT,
-        __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
+        __PAGE_HYPERVISOR_RW|MAP_SMALL_PAGES);
     BUG_ON(start != xen_phys_start);
     map_pages_to_xen(
         XEN_VIRT_START,
@@ -6145,7 +6145,7 @@ static void __memguard_change_range(void *p, unsigned long l, int guard)
 {
     unsigned long _p = (unsigned long)p;
     unsigned long _l = (unsigned long)l;
-    unsigned int flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
+    unsigned int flags = __PAGE_HYPERVISOR_RW | MAP_SMALL_PAGES;
 
     /* Ensure we are dealing with a page-aligned whole number of pages. */
     ASSERT((_p&~PAGE_MASK) == 0);
index e808ea905f9b5657de55681c654866db1af2a9a5..8e21859bf2f358a3795765254e0bdfedd083a965 100644 (file)
@@ -901,7 +901,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
             /* The only data mappings to be relocated are in the Xen area. */
             pl2e = __va(__pa(l2_xenmap));
             *pl2e++ = l2e_from_pfn(xen_phys_start >> PAGE_SHIFT,
-                                   PAGE_HYPERVISOR | _PAGE_PSE);
+                                   PAGE_HYPERVISOR_RWX | _PAGE_PSE);
             for ( i = 1; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
             {
                 if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
@@ -1088,7 +1088,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
             /* This range must not be passed to the boot allocator and
              * must also not be mapped with _PAGE_GLOBAL. */
             map_pages_to_xen((unsigned long)__va(map_e), PFN_DOWN(map_e),
-                             PFN_DOWN(e - map_e), __PAGE_HYPERVISOR);
+                             PFN_DOWN(e - map_e), __PAGE_HYPERVISOR_RW);
         }
         if ( s < map_s )
         {
@@ -1424,6 +1424,10 @@ void __init noreturn __start_xen(unsigned long mbi_p)
     if ( cpu_has_smap )
         write_cr4(read_cr4() & ~X86_CR4_SMAP);
 
+    printk("%sNX (Execute Disable) protection %sactive\n",
+           cpu_has_nx ? XENLOG_INFO : XENLOG_WARNING "Warning: ",
+           cpu_has_nx ? "" : "not ");
+
     /*
      * We're going to setup domain0 using the module(s) that we stashed safely
      * above our heap. The second module, if present, is an initrd ramdisk.
index a771a014fdc803d8315790298252462727595b4b..e58de58bec70d2164fe0de47d3bb68f1aec31cf0 100644 (file)
@@ -895,6 +895,33 @@ void __init subarch_init_memory(void)
             share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
+
+    /* Mark low 16Mb of direct map NX if hardware supports it. */
+    if ( !cpu_has_nx )
+        return;
+
+    v = DIRECTMAP_VIRT_START + (1UL << 20);
+    l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[l3_table_offset(v)];
+    ASSERT(l3e_get_flags(l3e) & _PAGE_PRESENT);
+    do {
+        l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
+        ASSERT(l2e_get_flags(l2e) & _PAGE_PRESENT);
+        if ( l2e_get_flags(l2e) & _PAGE_PSE )
+        {
+            l2e_add_flags(l2e, _PAGE_NX_BIT);
+            l3e_to_l2e(l3e)[l2_table_offset(v)] = l2e;
+            v += 1 << L2_PAGETABLE_SHIFT;
+        }
+        else
+        {
+            l1_pgentry_t l1e = l2e_to_l1e(l2e)[l1_table_offset(v)];
+
+            ASSERT(l1e_get_flags(l1e) & _PAGE_PRESENT);
+            l1e_add_flags(l1e, _PAGE_NX_BIT);
+            l2e_to_l1e(l2e)[l1_table_offset(v)] = l1e;
+            v += 1 << L1_PAGETABLE_SHIFT;
+        }
+    } while ( v < DIRECTMAP_VIRT_START + (16UL << 20) );
 }
 
 long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
@@ -1359,7 +1386,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
         if ( i < spfn )
             i = spfn;
         ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), i,
-                               epfn - i, __PAGE_HYPERVISOR);
+                               epfn - i, __PAGE_HYPERVISOR_RW);
         if ( ret )
             return ret;
     }
index f5e179beac440fff4a16869f24091e00f0839815..ef8476c67646bbb7a05ba91529637d8426eb9ee1 100644 (file)
@@ -1162,7 +1162,7 @@ void __init efi_init_memory(void)
         EFI_MEMORY_DESCRIPTOR *desc = efi_memmap + i;
         u64 len = desc->NumberOfPages << EFI_PAGE_SHIFT;
         unsigned long smfn, emfn;
-        unsigned int prot = PAGE_HYPERVISOR;
+        unsigned int prot = PAGE_HYPERVISOR_RWX;
 
         printk(XENLOG_INFO " %013" PRIx64 "-%013" PRIx64
                            " type=%u attr=%016" PRIx64 "\n",
@@ -1195,7 +1195,7 @@ void __init efi_init_memory(void)
         if ( desc->Attribute & EFI_MEMORY_WP )
             prot &= _PAGE_RW;
         if ( desc->Attribute & EFI_MEMORY_XP )
-            prot |= _PAGE_NX_BIT;
+            prot |= _PAGE_NX;
 
         if ( pfn_to_pdx(emfn - 1) < (DIRECTMAP_SIZE >> PAGE_SHIFT) &&
              !(smfn & pfn_hole_mask) &&
index ef1e86fb1c56b4837064f5df4d5310b216eea9c9..e26daafc8b9bb943591cf8c61cd0a8f331e99f31 100644 (file)
@@ -306,7 +306,8 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
 #define _PAGE_AVAIL1   _AC(0x400,U)
 #define _PAGE_AVAIL2   _AC(0x800,U)
 #define _PAGE_AVAIL    _AC(0xE00,U)
-#define _PAGE_PSE_PAT _AC(0x1000,U)
+#define _PAGE_PSE_PAT  _AC(0x1000,U)
+#define _PAGE_NX       (cpu_has_nx ? _PAGE_NX_BIT : 0)
 /* non-architectural flags */
 #define _PAGE_PAGED   0x2000U
 #define _PAGE_SHARED  0x4000U
@@ -323,6 +324,9 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
 #define _PAGE_GNTTAB   0
 #endif
 
+#define __PAGE_HYPERVISOR_RO      (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NX)
+#define __PAGE_HYPERVISOR_RW      (__PAGE_HYPERVISOR_RO | \
+                                   _PAGE_DIRTY | _PAGE_RW)
 #define __PAGE_HYPERVISOR_RX      (_PAGE_PRESENT | _PAGE_ACCESSED)
 #define __PAGE_HYPERVISOR         (__PAGE_HYPERVISOR_RX | \
                                    _PAGE_DIRTY | _PAGE_RW)
index fd2f700ee8a7c21e0f6021c2441204f777994d36..19ab4d05b43d96a55317eb07aaca09b4c007b2a6 100644 (file)
@@ -147,9 +147,20 @@ typedef l4_pgentry_t root_pgentry_t;
  */
 #define _PAGE_GUEST_KERNEL (1U<<12)
 
-#define PAGE_HYPERVISOR         (__PAGE_HYPERVISOR         | _PAGE_GLOBAL)
+#define PAGE_HYPERVISOR_RO      (__PAGE_HYPERVISOR_RO      | _PAGE_GLOBAL)
+#define PAGE_HYPERVISOR_RW      (__PAGE_HYPERVISOR_RW      | _PAGE_GLOBAL)
 #define PAGE_HYPERVISOR_RX      (__PAGE_HYPERVISOR_RX      | _PAGE_GLOBAL)
-#define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
+#define PAGE_HYPERVISOR_RWX     (__PAGE_HYPERVISOR         | _PAGE_GLOBAL)
+
+#ifdef __ASSEMBLY__
+/* Dependency on NX being available can't be expressed. */
+# define PAGE_HYPERVISOR         PAGE_HYPERVISOR_RWX
+# define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
+#else
+# define PAGE_HYPERVISOR         PAGE_HYPERVISOR_RW
+# define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | \
+                                  _PAGE_GLOBAL | _PAGE_NX)
+#endif
 
 #endif /* __X86_64_PAGE_H__ */