From: Andres Lagar-Cavilla Date: Thu, 1 Dec 2011 14:17:15 +0000 (+0000) Subject: x86/mm: Don't trigger unnecessary shadow scans on p2m entry update X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=67b8a0a32430a43c1e2c19f8f26c8f4dcb55cf62;p=xen.git x86/mm: Don't trigger unnecessary shadow scans on p2m entry update When updating a p2m entry, the hypervisor scans all shadow pte's to find mappings of that gfn and tear them down. This is avoided if the page count reveals that there are no additional mappings. The current test ignores the PGC_allocated flag and its effect on the page count. Signed-off-by: Andres Lagar-Cavilla Signed-off-by: Adin Scannell Signed-off-by: Tim Deegan Committed-by: Tim Deegan --- diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 51c7193f5e..b1a1e1d634 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -2464,7 +2464,6 @@ int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn, int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn) { struct page_info *page = mfn_to_page(gmfn); - int expected_count; /* Dispatch table for getting per-type functions */ static const hash_callback_t callbacks[SH_type_unused] = { @@ -2501,7 +2500,7 @@ int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn) ; perfc_incr(shadow_mappings); - if ( (page->count_info & PGC_count_mask) == 0 ) + if ( sh_check_page_has_no_refs(page) ) return 0; /* Although this is an externally visible function, we do not know @@ -2517,8 +2516,7 @@ int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn) hash_foreach(v, callback_mask, callbacks, gmfn); /* If that didn't catch the mapping, something is very wrong */ - expected_count = (page->count_info & PGC_allocated) ? 1 : 0; - if ( (page->count_info & PGC_count_mask) != expected_count ) + if ( !sh_check_page_has_no_refs(page) ) { /* Don't complain if we're in HVM and there are some extra mappings: * The qemu helper process has an untyped mapping of this dom's RAM diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 7ccd38edd1..a2436563d2 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -4591,7 +4591,7 @@ int sh_rm_mappings_from_l1(struct vcpu *v, mfn_t sl1mfn, mfn_t target_mfn) { (void) shadow_set_l1e(v, sl1e, shadow_l1e_empty(), p2m_invalid, sl1mfn); - if ( (mfn_to_page(target_mfn)->count_info & PGC_count_mask) == 0 ) + if ( sh_check_page_has_no_refs(mfn_to_page(target_mfn)) ) /* This breaks us cleanly out of the FOREACH macro */ done = 1; } diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h index b8d570948f..b7947e51c3 100644 --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -30,6 +30,7 @@ #include #include #include +#include #include "../mm-locks.h" @@ -815,6 +816,12 @@ static inline unsigned long vtlb_lookup(struct vcpu *v, } #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */ +static inline int sh_check_page_has_no_refs(struct page_info *page) +{ + unsigned long count = read_atomic(&page->count_info); + return ( (count & PGC_count_mask) == + ((count & PGC_allocated) ? 1 : 0) ); +} #endif /* _XEN_SHADOW_PRIVATE_H */