x86/mm: Always set _PAGE_ACCESSED on L4e updates
authorAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 1 Sep 2017 11:15:39 +0000 (12:15 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Mon, 15 Jan 2018 13:53:16 +0000 (13:53 +0000)
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/pv/mm.h

index 7502d533c60584536ae2c3838f043ddf01c75c24..976209ba4c59895e608e7e0d56df5dbdcdbadf00 100644 (file)
@@ -144,9 +144,21 @@ static inline l3_pgentry_t unadjust_guest_l3e(l3_pgentry_t l3e,
 static inline l4_pgentry_t adjust_guest_l4e(l4_pgentry_t l4e,
                                             const struct domain *d)
 {
-    if ( likely(l4e_get_flags(l4e) & _PAGE_PRESENT) &&
-         likely(!is_pv_32bit_domain(d)) )
-        l4e_add_flags(l4e, _PAGE_USER);
+    /*
+     * When shadowing an L4 behind the guests back (e.g. for per-pcpu
+     * purposes), we cannot efficiently sync access bit updates from hardware
+     * (on the shadow tables) back into the guest view.
+     *
+     * We therefore unconditionally set _PAGE_ACCESSED even in the guests
+     * view.  This will appear to the guest as a CPU which proactively pulls
+     * all valid L4e's into its TLB, which is compatible with the x86 ABI.
+     *
+     * At the time of writing, all PV guests set the access bit anyway, so
+     * this is no actual change in their behaviour.
+     */
+    if ( likely(l4e_get_flags(l4e) & _PAGE_PRESENT) )
+        l4e_add_flags(l4e, (_PAGE_ACCESSED |
+                            (is_pv_32bit_domain(d) ? 0 : _PAGE_USER)));
 
     return l4e;
 }