}
}
-static inline void pmd_free(pmd_t *pmd)
+extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern void pte_free(struct page *pte);
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- pte_t *ptep = virt_to_ptep(pmd);
+ struct page *pg;
- if (!pte_write(*ptep)) {
- BUG_ON(HYPERVISOR_update_va_mapping(
- (unsigned long)pmd,
- pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
- 0));
- }
- free_page((unsigned long)pmd);
+ pg = pte_alloc_one(mm, addr);
+ return pg ? page_address(pg) : NULL;
}
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline void pmd_free(pmd_t *pmd)
{
- pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
- return pmd;
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ pte_free(virt_to_page(pmd));
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
- return pud;
+ struct page *pg;
+
+ pg = pte_alloc_one(mm, addr);
+ return pg ? page_address(pg) : NULL;
}
static inline void pud_free(pud_t *pud)
{
- pte_t *ptep = virt_to_ptep(pud);
-
- if (!pte_write(*ptep)) {
- BUG_ON(HYPERVISOR_update_va_mapping(
- (unsigned long)pud,
- pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
- 0));
- }
- free_page((unsigned long)pud);
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ pte_free(virt_to_page(pud));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- /*
- * We allocate two contiguous pages for kernel and user.
- */
- unsigned boundary;
+ /*
+ * We allocate two contiguous pages for kernel and user.
+ */
+ unsigned boundary;
pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
if (!pgd)
(PTRS_PER_PGD - boundary) * sizeof(pgd_t));
memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
- /*
- * Set level3_user_pgt for vsyscall area
- */
+ /*
+ * Set level3_user_pgt for vsyscall area
+ */
set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
- mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
+ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
return pgd;
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
- if (pte)
+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ if (pte)
make_page_readonly(pte, XENFEAT_writable_page_tables);
return pte;
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- struct page *pte;
-
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
- return pte;
-}
-
/* Should really implement gc for free page table pages. This could be
done with a reference count in struct page. */
static inline void pte_free_kernel(pte_t *pte)
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
- make_page_writable(pte, XENFEAT_writable_page_tables);
+ make_page_writable(pte, XENFEAT_writable_page_tables);
free_page((unsigned long)pte);
}
-extern void pte_free(struct page *pte);
-
-//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
-//#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-//#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-
-#define __pte_free_tlb(tlb,x) pte_free((x))
-#define __pmd_free_tlb(tlb,x) pmd_free((x))
-#define __pud_free_tlb(tlb,x) pud_free((x))
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
+#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#endif /* _X86_64_PGALLOC_H */