*/
declare_mm_rwlock(altp2m);
-#define p2m_lock(p) \
-{ \
- if ( p2m_is_altp2m(p) ) \
- mm_write_lock(altp2m, &(p)->lock); \
- else \
- mm_write_lock(p2m, &(p)->lock); \
-}
-#define p2m_unlock(p) mm_write_unlock(&(p)->lock);
+#define p2m_lock(p) \
+ do { \
+ if ( p2m_is_altp2m(p) ) \
+ mm_write_lock(altp2m, &(p)->lock); \
+ else \
+ mm_write_lock(p2m, &(p)->lock); \
+ (p)->defer_flush++; \
+ } while (0)
+#define p2m_unlock(p) \
+ do { \
+ if ( --(p)->defer_flush == 0 ) \
+ p2m_unlock_and_tlb_flush(p); \
+ else \
+ mm_write_unlock(&(p)->lock); \
+ } while (0)
#define gfn_lock(p,g,o) p2m_lock(p)
#define gfn_unlock(p,g,o) p2m_unlock(p)
#define p2m_read_lock(p) mm_read_lock(p2m, &(p)->lock)
unmap_domain_page(epte);
}
+ p2m_tlb_flush_sync(p2m);
p2m_free_ptp(p2m, mfn_to_page(ept_entry->mfn));
}
*/
}
-void ept_sync_domain(struct p2m_domain *p2m)
+static void ept_sync_domain_prepare(struct p2m_domain *p2m)
{
struct domain *d = p2m->domain;
struct ept_data *ept = &p2m->ept;
- /* Only if using EPT and this domain has some VCPUs to dirty. */
- if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
- return;
-
- ASSERT(local_irq_is_enabled());
if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) )
p2m_flush_nestedp2m(d);
* of an EP4TA reuse is still needed.
*/
cpumask_setall(ept->invalidate);
+}
+
+static void ept_sync_domain_mask(struct p2m_domain *p2m, const cpumask_t *mask)
+{
+ on_selected_cpus(mask, __ept_sync_domain, p2m, 1);
+}
+
+void ept_sync_domain(struct p2m_domain *p2m)
+{
+ struct domain *d = p2m->domain;
- on_selected_cpus(d->domain_dirty_cpumask,
- __ept_sync_domain, p2m, 1);
+ /* Only if using EPT and this domain has some VCPUs to dirty. */
+ if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
+ return;
+
+ ept_sync_domain_prepare(p2m);
+
+ if ( p2m->defer_flush )
+ {
+ p2m->need_flush = 1;
+ return;
+ }
+
+ ept_sync_domain_mask(p2m, d->domain_dirty_cpumask);
+}
+
+static void ept_tlb_flush(struct p2m_domain *p2m)
+{
+ ept_sync_domain_mask(p2m, p2m->domain->domain_dirty_cpumask);
}
static void ept_enable_pml(struct p2m_domain *p2m)
p2m->change_entry_type_range = ept_change_entry_type_range;
p2m->memory_type_changed = ept_memory_type_changed;
p2m->audit_p2m = NULL;
+ p2m->tlb_flush = ept_tlb_flush;
/* Set the memory type used when accessing EPT paging structures. */
ept->ept_mt = EPT_DEFAULT_MT;
p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
p2m_invalid, p2m->default_access);
+ p2m_tlb_flush_sync(p2m);
for ( j = 0; j < n; ++j )
set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
p2m_pod_cache_add(p2m, page, cur_order);
/* Try to remove the page, restoring old mapping if it fails. */
p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
+ p2m_tlb_flush_sync(p2m);
/* Make none of the MFNs are used elsewhere... for example, mapped
* via the grant table interface, or by qemu. Allow one refcount for
}
}
+ p2m_tlb_flush_sync(p2m);
+
/* Now check each page for real */
for ( i=0; i < count; i++ )
{
}
}
+/*
+ * Force a synchronous P2M TLB flush if a deferred flush is pending.
+ *
+ * Must be called with the p2m lock held.
+ */
+void p2m_tlb_flush_sync(struct p2m_domain *p2m)
+{
+ if ( p2m->need_flush ) {
+ p2m->need_flush = 0;
+ p2m->tlb_flush(p2m);
+ }
+}
+
+/*
+ * Unlock the p2m lock and do a P2M TLB flush if needed.
+ */
+void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m)
+{
+ if ( p2m->need_flush ) {
+ p2m->need_flush = 0;
+ mm_write_unlock(&p2m->lock);
+ p2m->tlb_flush(p2m);
+ } else
+ mm_write_unlock(&p2m->lock);
+}
+
mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
unsigned int *page_order, bool_t locked)
l1_pgentry_t new, unsigned int level);
long (*audit_p2m)(struct p2m_domain *p2m);
+ /*
+ * P2M updates may require TLBs to be flushed (invalidated).
+ *
+ * If 'defer_flush' is set, flushes may be deferred by setting
+ * 'need_flush' and then flushing in 'tlb_flush()'.
+ *
+ * 'tlb_flush()' is only called if 'need_flush' was set.
+ *
+ * If a flush may be being deferred but an immediate flush is
+ * required (e.g., if a page is being freed to pool other than the
+ * domheap), call p2m_tlb_flush_sync().
+ */
+ void (*tlb_flush)(struct p2m_domain *p2m);
+ unsigned int defer_flush;
+ bool_t need_flush;
+
/* Default P2M access type for each page in the the domain: new pages,
* swapped in pages, cleared pages, and pages that are ambiguously
* retyped get this access type. See definition of p2m_access_t. */
#define p2m_get_pagetable(p2m) ((p2m)->phys_table)
+/*
+ * Ensure any deferred p2m TLB flush has been completed on all VCPUs.
+ */
+void p2m_tlb_flush_sync(struct p2m_domain *p2m);
+void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m);
+
/**** p2m query accessors. They lock p2m_lock, and thus serialize
* lookups wrt modifications. They _do not_ release the lock on exit.
* After calling any of the variants below, caller needs to use