XENFEAT_writable_page_tables/XENFEAT_writable_descriptor_tables.
Name the make_readonly/writable functions back to their old
names, and they now accept a feature flag that they test
before performing their operation.
Signed-off-by: Keir Fraser <keir@xensource.com>
va < gdt_descr->address + gdt_descr->size;
va += PAGE_SIZE, f++) {
frames[f] = virt_to_mfn(va);
- make_lowmem_mmu_page_readonly((void *)va);
+ make_lowmem_page_readonly(
+ (void *)va, XENFEAT_writable_descriptor_tables);
}
if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
BUG();
cpumask_t mask;
preempt_disable();
#endif
- make_mmu_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
+ make_pages_readonly(
+ pc->ldt,
+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
+ XENFEAT_writable_descriptor_tables);
load_LDT(pc);
#ifdef CONFIG_SMP
mask = cpumask_of_cpu(smp_processor_id());
#endif
}
if (oldsize) {
- make_mmu_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
+ make_pages_writable(
+ oldldt,
+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
+ XENFEAT_writable_descriptor_tables);
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
else
if (err < 0)
return err;
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
- make_mmu_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
+ make_pages_readonly(
+ new->ldt,
+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
+ XENFEAT_writable_descriptor_tables);
return 0;
}
if (mm->context.size) {
if (mm == current->active_mm)
clear_LDT();
- make_mmu_pages_writable(mm->context.ldt,
- (mm->context.size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
+ make_pages_writable(
+ mm->context.ldt,
+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
+ XENFEAT_writable_descriptor_tables);
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
* default LDT is a single-entry callgate to lcall7 for iBCS
* and a callgate to lcall27 for Solaris/x86 binaries
*/
- make_lowmem_mmu_page_readonly(&default_ldt[0]);
+ make_lowmem_page_readonly(
+ &default_ldt[0], XENFEAT_writable_descriptor_tables);
/*
* Should be a barrier for any external CPU state.
#ifdef CONFIG_X86_PAE
pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- make_lowmem_mmu_page_readonly(pmd_table);
+ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
pud = pud_offset(pgd, 0);
if (pmd_table != pmd_offset(pud, 0))
{
if (pmd_none(*pmd)) {
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- make_lowmem_mmu_page_readonly(page_table);
+ make_lowmem_page_readonly(page_table,
+ XENFEAT_writable_page_tables);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
if (page_table != pte_offset_kernel(pmd, 0))
BUG();
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte)
- make_lowmem_mmu_page_readonly(pte);
+ make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
return pte;
}
pmd_t *kpmd = pmd_offset(kpud, v);
pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
memcpy(pmd, kpmd, PAGE_SIZE);
- make_lowmem_mmu_page_readonly(pmd);
+ make_lowmem_page_readonly(
+ pmd, XENFEAT_writable_page_tables);
}
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
spin_unlock_irqrestore(&pgd_lock, flags);
for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
- make_lowmem_mmu_page_writable(pmd);
+ make_lowmem_page_writable(
+ pmd, XENFEAT_writable_page_tables);
memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
kmem_cache_free(pmd_cache, pmd);
}
}
#ifndef CONFIG_XEN_SHADOW_MODE
-void make_lowmem_mmu_page_readonly(void *va)
+void make_lowmem_page_readonly(void *va, unsigned int feature)
{
pte_t *pte;
int rc;
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
pte = virt_to_ptep(va);
BUG_ON(rc);
}
-void make_lowmem_mmu_page_writable(void *va)
+void make_lowmem_page_writable(void *va, unsigned int feature)
{
pte_t *pte;
int rc;
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
pte = virt_to_ptep(va);
BUG_ON(rc);
}
-void make_mmu_page_readonly(void *va)
+void make_page_readonly(void *va, unsigned int feature)
{
pte_t *pte;
int rc;
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
pte = virt_to_ptep(va);
kmap_flush_unused(); /* flush stale writable kmaps */
else
#endif
- make_lowmem_mmu_page_readonly(
- phys_to_virt(pfn << PAGE_SHIFT));
+ make_lowmem_page_readonly(
+ phys_to_virt(pfn << PAGE_SHIFT), feature);
}
}
-void make_mmu_page_writable(void *va)
+void make_page_writable(void *va, unsigned int feature)
{
pte_t *pte;
int rc;
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
pte = virt_to_ptep(va);
#ifdef CONFIG_HIGHMEM
if (pfn < highstart_pfn)
#endif
- make_lowmem_mmu_page_writable(
- phys_to_virt(pfn << PAGE_SHIFT));
+ make_lowmem_page_writable(
+ phys_to_virt(pfn << PAGE_SHIFT), feature);
}
}
-void make_mmu_pages_readonly(void *va, unsigned int nr)
+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
{
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
while (nr-- != 0) {
- make_mmu_page_readonly(va);
+ make_page_readonly(va, feature);
va = (void *)((unsigned long)va + PAGE_SIZE);
}
}
-void make_mmu_pages_writable(void *va, unsigned int nr)
+void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
{
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
+
while (nr-- != 0) {
- make_mmu_page_writable(va);
+ make_page_writable(va, feature);
va = (void *)((unsigned long)va + PAGE_SIZE);
}
}
*
* Xen feature flags.
*
- * Copyright (c) 2006, Ian Campbell
+ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
*/
#include <linux/types.h>
#include <linux/cache.h>
#include <asm/hypervisor.h>
#include <asm-xen/features.h>
-/* When we rebase to a more recent version of Linux we can use __read_mostly here. */
+/* When we rebase to a more recent Linux we can use __read_mostly here. */
unsigned long xen_features[XENFEAT_NR_SUBMAPS] __cacheline_aligned;
void setup_xen_features(void)
{
- uint32_t *flags = (uint32_t *)&xen_features[0];
- xen_feature_info_t fi;
- int i;
+ uint32_t *flags = (uint32_t *)&xen_features[0];
+ xen_feature_info_t fi;
+ int i;
- for (i=0; i<XENFEAT_NR_SUBMAPS; i++) {
- fi.submap_idx = i;
- if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
- break;
- flags[i] = fi.submap;
- }
+ for (i=0; i<XENFEAT_NR_SUBMAPS; i++) {
+ fi.submap_idx = i;
+ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
+ break;
+ flags[i] = fi.submap;
+ }
}
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
memcpy((void *)cpu_gdt_descr[cpu].address,
(void *)cpu_gdt_descr[0].address,
cpu_gdt_descr[0].size);
- make_mmu_page_readonly((void *)cpu_gdt_descr[cpu].address);
+ make_page_readonly(
+ (void *)cpu_gdt_descr[cpu].address,
+ XENFEAT_writable_descriptor_tables);
cpu_set(cpu, cpu_possible_map);
#ifdef CONFIG_HOTPLUG_CPU
preempt_disable();
#endif
- make_mmu_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
+ make_pages_readonly(
+ pc->ldt,
+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
+ XENFEAT_writable_descriptor_tables);
load_LDT(pc);
#ifdef CONFIG_SMP
mask = cpumask_of_cpu(smp_processor_id());
#endif
}
if (oldsize) {
- make_mmu_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
+ make_pages_writable(
+ oldldt,
+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
+ XENFEAT_writable_descriptor_tables);
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
else
if (err < 0)
return err;
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
- make_mmu_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
+ make_pages_readonly(
+ new->ldt,
+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
+ XENFEAT_writable_descriptor_tables);
return 0;
}
if (mm->context.size) {
if (mm == current->active_mm)
clear_LDT();
- make_mmu_pages_writable(mm->context.ldt,
- (mm->context.size * LDT_ENTRY_SIZE) /
- PAGE_SIZE);
+ make_pages_writable(
+ mm->context.ldt,
+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
+ XENFEAT_writable_descriptor_tables);
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
va < gdt_descr->address + gdt_descr->size;
va += PAGE_SIZE, f++) {
frames[f] = virt_to_mfn(va);
- make_mmu_page_readonly((void *)va);
+ make_page_readonly(
+ (void *)va, XENFEAT_writable_descriptor_tables);
}
if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
sizeof (struct desc_struct)))
(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \
__START_KERNEL_map)))
-static void early_make_mmu_page_readonly(void *va)
+static void early_make_page_readonly(void *va, unsigned int feature)
{
unsigned long addr, _va = (unsigned long)va;
pte_t pte, *ptep;
unsigned long *page = (unsigned long *) init_level4_pgt;
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
addr = (unsigned long) page[pgd_index(_va)];
BUG();
}
-void make_mmu_page_readonly(void *va)
+void make_page_readonly(void *va, unsigned int feature)
{
pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
unsigned long addr = (unsigned long) va;
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
pgd = pgd_offset_k(addr);
xen_l1_entry_update(ptep, pte); /* fallback */
if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
- make_mmu_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT));
+ make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT), feature);
}
-void make_mmu_page_writable(void *va)
+void make_page_writable(void *va, unsigned int feature)
{
pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
unsigned long addr = (unsigned long) va;
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
pgd = pgd_offset_k(addr);
xen_l1_entry_update(ptep, pte); /* fallback */
if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
- make_mmu_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
+ make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT), feature);
}
-void make_mmu_pages_readonly(void *va, unsigned nr)
+void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
{
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
while (nr-- != 0) {
- make_mmu_page_readonly(va);
+ make_page_readonly(va, feature);
va = (void*)((unsigned long)va + PAGE_SIZE);
}
}
-void make_mmu_pages_writable(void *va, unsigned nr)
+void make_pages_writable(void *va, unsigned nr, unsigned int feature)
{
- if (xen_feature(writable_mmu_structures))
+ if (xen_feature(feature))
return;
+
while (nr-- != 0) {
- make_mmu_page_writable(va);
+ make_page_writable(va, feature);
va = (void*)((unsigned long)va + PAGE_SIZE);
}
}
pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
if (pud_none(*pud)) {
pmd = (pmd_t *) spp_getpage();
- make_mmu_page_readonly(pmd);
+ make_page_readonly(pmd, XENFEAT_writable_page_tables);
xen_pmd_pin(__pa(pmd));
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
if (pmd != pmd_offset(pud, 0)) {
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd)) {
pte = (pte_t *) spp_getpage();
- make_mmu_page_readonly(pte);
+ make_page_readonly(pte, XENFEAT_writable_page_tables);
xen_pte_pin(__pa(pte));
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
if (pte != pte_offset_kernel(pmd, 0)) {
if (pud_none(*pud)) {
pmd = (pmd_t *) spp_getpage();
- make_mmu_page_readonly(pmd);
+ make_page_readonly(pmd, XENFEAT_writable_page_tables);
xen_pmd_pin(__pa(pmd));
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
if (pmd_none(*pmd)) {
pte = (pte_t *) spp_getpage();
- make_mmu_page_readonly(pte);
+ make_page_readonly(pte, XENFEAT_writable_page_tables);
xen_pte_pin(__pa(pte));
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
{
int readonly = 0;
- if (xen_feature(writable_mmu_structures))
- return 0;
-
/* Make old and new page tables read-only. */
- if ((paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
+ if (!xen_feature(XENFEAT_writable_page_tables)
+ && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
&& (paddr < ((table_start << PAGE_SHIFT) + tables_space)))
readonly = 1;
/*
}
pmd = alloc_static_page(&pmd_phys);
- early_make_mmu_page_readonly(pmd);
+ early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
xen_pmd_pin(pmd_phys);
set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
__set_pte(pte, __pte(paddr | _KERNPG_TABLE));
}
pte = pte_save;
- early_make_mmu_page_readonly(pte);
+ early_make_page_readonly(
+ pte, XENFEAT_writable_page_tables);
xen_pte_pin(pte_phys);
set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
}
_KERNPG_TABLE | _PAGE_USER);
memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE);
- early_make_mmu_page_readonly(init_level4_pgt);
- early_make_mmu_page_readonly(init_level4_user_pgt);
- early_make_mmu_page_readonly(level3_kernel_pgt);
- early_make_mmu_page_readonly(level3_user_pgt);
- early_make_mmu_page_readonly(level2_kernel_pgt);
+ early_make_page_readonly(init_level4_pgt,
+ XENFEAT_writable_page_tables);
+ early_make_page_readonly(init_level4_user_pgt,
+ XENFEAT_writable_page_tables));
+ early_make_page_readonly(level3_kernel_pgt,
+ XENFEAT_writable_page_tables));
+ early_make_page_readonly(level3_user_pgt,
+ XENFEAT_writable_page_tables));
+ early_make_page_readonly(level2_kernel_pgt,
+ XENFEAT_writable_page_tables));
xen_pgd_pin(__pa_symbol(init_level4_pgt));
xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
pmd = (pmd_t *)&page[pmd_index(va)];
if (pmd_none(*pmd)) {
pte_page = alloc_static_page(&phys);
- early_make_mmu_page_readonly(pte_page);
+ early_make_page_readonly(
+ pte_page, XENFEAT_writable_page_tables);
xen_pte_pin(phys);
set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER));
} else {
for (; start < end; start = next) {
unsigned long pud_phys;
pud_t *pud = alloc_static_page(&pud_phys);
- early_make_mmu_page_readonly(pud);
+ early_make_page_readonly(pud, XENFEAT_writable_page_tables);
xen_pud_pin(pud_phys);
next = start + PGDIR_SIZE;
if (next > end)
set_page_count(virt_to_page(addr), 1);
memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
xen_pte_unpin(__pa(addr));
- make_mmu_page_writable(__va(__pa(addr)));
+ make_page_writable(
+ __va(__pa(addr)), XENFEAT_writable_page_tables);
/*
* Make pages from __PAGE_OFFSET address as well
*/
- make_mmu_page_writable((void *)addr);
+ make_page_writable(
+ (void *)addr, XENFEAT_writable_page_tables);
free_page(addr);
totalram_pages++;
}
static inline void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
- make_mmu_page_writable(pte);
+ make_page_writable(pte, XENFEAT_writable_page_tables);
}
extern void pte_free(struct page *pte);
ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
} while (0)
-#ifndef CONFIG_XEN_SHADOW_MODE
-void make_lowmem_mmu_page_readonly(void *va);
-void make_lowmem_mmu_page_writable(void *va);
-void make_mmu_page_readonly(void *va);
-void make_mmu_page_writable(void *va);
-void make_mmu_pages_readonly(void *va, unsigned int nr);
-void make_mmu_pages_writable(void *va, unsigned int nr);
-#else
-#define make_lowmem_mmu_page_readonly(_va) ((void)0)
-#define make_lowmem_mmu_page_writable(_va) ((void)0)
-#define make_mmu_page_readonly(_va) ((void)0)
-#define make_mmu_page_writable(_va) ((void)0)
-#define make_mmu_pages_readonly(_va, _nr) ((void)0)
-#define make_mmu_pages_writable(_va, _nr) ((void)0)
-#endif
+#include <asm-xen/features.h>
+void make_lowmem_page_readonly(void *va, unsigned int feature);
+void make_lowmem_page_writable(void *va, unsigned int feature);
+void make_page_readonly(void *va, unsigned int feature);
+void make_page_writable(void *va, unsigned int feature);
+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
#define virt_to_ptep(__va) \
({ \
#include <linux/mm.h>
#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
-void make_mmu_page_readonly(void *va);
-void make_mmu_page_writable(void *va);
-void make_mmu_pages_readonly(void *va, unsigned int nr);
-void make_mmu_pages_writable(void *va, unsigned int nr);
+#include <asm-xen/features.h>
+void make_page_readonly(void *va, unsigned int feature);
+void make_page_writable(void *va, unsigned int feature);
+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
{
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
- make_mmu_page_readonly(pte);
+ make_page_readonly(pte, XENFEAT_writable_page_tables);
return pte;
}
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
xen_pte_unpin(__pa(pte));
- make_mmu_page_writable(pte);
+ make_page_writable(pte, XENFEAT_writable_page_tables);
free_page((unsigned long)pte);
}
extern unsigned long xen_features[XENFEAT_NR_SUBMAPS];
-#define xen_feature(flag) (test_bit(_XENFEAT_ ## flag, xen_features))
+#define xen_feature(flag) (test_bit(flag, xen_features))
-#endif
+#endif /* __ASM_XEN_FEATURES_H__ */
uint32_t submap; /* OUT: 32-bit submap */
} xen_feature_info_t;
-#define _XENFEAT_writable_mmu_structures 0
-#define XENFEAT_writable_mmu_structures (1UL<<_XENFEAT_writable_mmu_structures)
+#define XENFEAT_writable_page_tables 0
+#define XENFEAT_writable_descriptor_tables 1
#define XENFEAT_NR_SUBMAPS 1