paging_mark_dirty(d, page_to_mfn(page));
/* These are most probably not page tables any more */
/* don't take a long time and don't die either */
- sh_remove_shadows(d->vcpu[0], _mfn(page_to_mfn(page)), 1, 0);
+ sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
put_page(page);
}
ASSERT(VALID_M2P(gmfn));
/* Page sharing not supported for shadowed domains */
if(!SHARED_M2P(gmfn))
- shadow_remove_all_shadows(owner->vcpu[0], _mfn(gmfn));
+ shadow_remove_all_shadows(owner, _mfn(gmfn));
}
if ( !(type & PGT_partial) )
&& (page->count_info & PGC_page_table)
&& !((page->shadow_flags & (1u<<29))
&& type == PGT_writable_page) )
- shadow_remove_all_shadows(d->vcpu[0], _mfn(page_to_mfn(page)));
+ shadow_remove_all_shadows(d, _mfn(page_to_mfn(page)));
ASSERT(!(x & PGT_pae_xen_l2));
if ( (x & PGT_type_mask) != type )
* the page. If that doesn't work either, the guest is granting
* his pagetables and must be killed after all.
* This will flush the tlb, so we can return with no worries. */
- sh_remove_shadows(v, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
+ sh_remove_shadows(d, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
return 1;
}
* Since the validate call above will have made a "safe" (i.e. zero)
* shadow entry, we can let the domain live even if we can't fully
* unshadow the page. */
- sh_remove_shadows(v, gmfn, 0, 0);
+ sh_remove_shadows(d, gmfn, 0, 0);
}
}
return rc;
}
-void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
+void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all)
/* Remove the shadows of this guest page.
* If fast != 0, just try the quick heuristic, which will remove
* at most one reference to each shadow of the page. Otherwise, walk
* (all != 0 implies fast == 0)
*/
{
- struct domain *d = v->domain;
struct page_info *pg = mfn_to_page(gmfn);
mfn_t smfn;
unsigned char t;
* can be called via put_page_type when we clear a shadow l1e).*/
paging_lock_recursive(d);
- SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
- d->domain_id, v->vcpu_id, mfn_x(gmfn));
+ SHADOW_PRINTK("d=%d: gmfn=%lx\n", d->domain_id, mfn_x(gmfn));
/* Bail out now if the page is not shadowed */
if ( (pg->count_info & PGC_page_table) == 0 )
}
static void
-sh_remove_all_shadows_and_parents(struct vcpu *v, mfn_t gmfn)
+sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn)
/* Even harsher: this is a HVM page that we thing is no longer a pagetable.
* Unshadow it, and recursively unshadow pages that reference it. */
{
- sh_remove_shadows(v, gmfn, 0, 1);
+ sh_remove_shadows(d, gmfn, 0, 1);
/* XXX TODO:
* Rework this hashtable walker to return a linked-list of all
* the shadows it modified, then do breadth-first recursion
p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
{
- sh_remove_all_shadows_and_parents(v, mfn);
+ sh_remove_all_shadows_and_parents(d, mfn);
if ( sh_remove_all_mappings(v, mfn) )
flush_tlb_mask(d->domain_dirty_cpumask);
}
|| l1e_get_pfn(npte[i]) != mfn_x(omfn) )
{
/* This GFN->MFN mapping has gone away */
- sh_remove_all_shadows_and_parents(v, omfn);
+ sh_remove_all_shadows_and_parents(d, omfn);
if ( sh_remove_all_mappings(v, omfn) )
cpumask_or(&flushmask, &flushmask,
d->domain_dirty_cpumask);
& (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64))) )
{
perfc_incr(shadow_early_unshadow);
- sh_remove_shadows(v, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
+ sh_remove_shadows(d, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EARLY_UNSHADOW);
}
v->arch.paging.shadow.last_emulated_mfn_for_unshadow = mfn_x(gmfn);
SHADOW_PRINTK("user-mode fault to PT, unshadowing mfn %#lx\n",
mfn_x(gmfn));
perfc_incr(shadow_fault_emulate_failed);
- sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
+ sh_remove_shadows(d, gmfn, 0 /* thorough */, 1 /* must succeed */);
trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_USER,
va, gfn);
goto done;
}
if ( !used )
- sh_remove_shadows(v, gmfn, 1 /* fast */, 0 /* can fail */);
+ sh_remove_shadows(d, gmfn, 1 /* fast */, 0 /* can fail */);
}
/*
gdprintk(XENLOG_DEBUG, "write to pagetable during event "
"injection: cr2=%#lx, mfn=%#lx\n",
va, mfn_x(gmfn));
- sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
+ sh_remove_shadows(d, gmfn, 0 /* thorough */, 1 /* must succeed */);
trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ,
va, gfn);
return EXCRET_fault_fixed;
/* If this is actually a page table, then we have a bug, and need
* to support more operations in the emulator. More likely,
* though, this is a hint that this page should not be shadowed. */
- shadow_remove_all_shadows(v, gmfn);
+ shadow_remove_all_shadows(d, gmfn);
trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED,
va, gfn);
u32 bytes,
struct sh_emulate_ctxt *sh_ctxt)
{
+ struct domain *d = v->domain;
void *map = NULL;
sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
/* Unaligned writes mean probably this isn't a pagetable */
if ( vaddr & (bytes - 1) )
- sh_remove_shadows(v, sh_ctxt->mfn1, 0, 0 /* Slow, can fail */ );
+ sh_remove_shadows(d, sh_ctxt->mfn1, 0, 0 /* Slow, can fail */ );
if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) )
{
MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
/* Cross-page writes mean probably not a pagetable */
- sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
+ sh_remove_shadows(d, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
mfns[0] = mfn_x(sh_ctxt->mfn1);
mfns[1] = mfn_x(sh_ctxt->mfn2);
/* Call once all of the references to the domain have gone away */
void shadow_final_teardown(struct domain *d);
-void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all);
+void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all);
/* Discard _all_ mappings from the domain's shadows. */
void shadow_blow_tables_per_domain(struct domain *d);
#define shadow_track_dirty_vram(d, begin_pfn, nr, bitmap) \
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
-static inline void sh_remove_shadows(struct vcpu *v, mfn_t gmfn,
+static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
bool_t fast, bool_t all) {}
static inline void shadow_blow_tables_per_domain(struct domain *d) {}
#endif /* CONFIG_SHADOW_PAGING */
/* Remove all shadows of the guest mfn. */
-static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
+static inline void shadow_remove_all_shadows(struct domain *d, mfn_t gmfn)
{
/* See the comment about locking in sh_remove_shadows */
- sh_remove_shadows(v, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
+ sh_remove_shadows(d, gmfn, 0 /* Be thorough */, 1 /* Must succeed */);
}
#endif /* _XEN_SHADOW_H */