From 8f529b5debe04825c21cbc6ec787566a66029b2b Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Fri, 12 Jan 2007 10:22:36 +0000 Subject: [PATCH] [LINUX] x86/64: Sync pagetable management with i386 Xen code. PUDs,PMDs,PTEs are all marked as ForeignPage so that they can be grabbed from tlb_remove_page() at the appropriate time and freed in a special way. Signed-off-by: Jan Beulich --- .../arch/x86_64/mm/pageattr-xen.c | 16 ++++ .../include/asm-x86_64/mach-xen/asm/pgalloc.h | 81 +++++++------------ 2 files changed, 46 insertions(+), 51 deletions(-) diff --git a/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c b/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c index 3641bfb805..d717708de9 100644 --- a/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c +++ b/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c @@ -164,6 +164,18 @@ void _arch_exit_mmap(struct mm_struct *mm) mm_unpin(mm); } +struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) +{ + struct page *pte; + + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); + if (pte) { + SetPageForeign(pte, pte_free); + set_page_count(pte, 1); + } + return pte; +} + void pte_free(struct page *pte) { unsigned long va = (unsigned long)__va(page_to_pfn(pte)<>PAGE_SHIFT, PAGE_KERNEL), - 0)); - } - free_page((unsigned long)pmd); + pg = pte_alloc_one(mm, addr); + return pg ? page_address(pg) : NULL; } -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline void pmd_free(pmd_t *pmd) { - pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); - return pmd; + BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); + pte_free(virt_to_page(pmd)); } static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); - return pud; + struct page *pg; + + pg = pte_alloc_one(mm, addr); + return pg ? page_address(pg) : NULL; } static inline void pud_free(pud_t *pud) { - pte_t *ptep = virt_to_ptep(pud); - - if (!pte_write(*ptep)) { - BUG_ON(HYPERVISOR_update_va_mapping( - (unsigned long)pud, - pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL), - 0)); - } - free_page((unsigned long)pud); + BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); + pte_free(virt_to_page(pud)); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - /* - * We allocate two contiguous pages for kernel and user. - */ - unsigned boundary; + /* + * We allocate two contiguous pages for kernel and user. + */ + unsigned boundary; pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1); if (!pgd) @@ -124,11 +117,11 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */ - /* - * Set level3_user_pgt for vsyscall area - */ + /* + * Set level3_user_pgt for vsyscall area + */ set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START), - mk_kernel_pgd(__pa_symbol(level3_user_pgt))); + mk_kernel_pgd(__pa_symbol(level3_user_pgt))); return pgd; } @@ -160,39 +153,25 @@ static inline void pgd_free(pgd_t *pgd) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); - if (pte) + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + if (pte) make_page_readonly(pte, XENFEAT_writable_page_tables); return pte; } -static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) -{ - struct page *pte; - - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); - return pte; -} - /* Should really implement gc for free page table pages. This could be done with a reference count in struct page. */ static inline void pte_free_kernel(pte_t *pte) { BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); - make_page_writable(pte, XENFEAT_writable_page_tables); + make_page_writable(pte, XENFEAT_writable_page_tables); free_page((unsigned long)pte); } -extern void pte_free(struct page *pte); - -//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) -//#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) -//#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) - -#define __pte_free_tlb(tlb,x) pte_free((x)) -#define __pmd_free_tlb(tlb,x) pmd_free((x)) -#define __pud_free_tlb(tlb,x) pud_free((x)) +#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) +#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) +#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) #endif /* _X86_64_PGALLOC_H */ -- 2.30.2