// clear pte
old_pte = ptep_get_and_clear(mm, mpaddr, pte);
}
- domain_flush_vtlb_all();
+ domain_flush_vtlb_all(d);
return 0;
}
#ifndef CONFIG_XEN_IA64_TLB_TRACK
//XXX sledgehammer.
// flush finer range.
- domain_flush_vtlb_all();
+ domain_flush_vtlb_all(d);
put_page(page);
#else
switch (tlb_track_search_and_remove(d->arch.tlb_track,
* queue the page and flush vTLB only once.
* I.e. The caller must call dfree_flush() explicitly.
*/
- domain_flush_vtlb_all();
+ domain_flush_vtlb_all(d);
put_page(page);
break;
case TLB_TRACK_NOT_FOUND:
* So we abondaned to track virtual addresses.
* full vTLB flush is necessary.
*/
- domain_flush_vtlb_all();
+ domain_flush_vtlb_all(d);
put_page(page);
break;
case TLB_TRACK_AGAIN:
vcpu_flush_vtlb_all((struct vcpu*)vcpu);
}
-void domain_flush_vtlb_all (void)
+// caller must incremented reference count to d somehow.
+void domain_flush_vtlb_all(struct domain* d)
{
int cpu = smp_processor_id ();
struct vcpu *v;
- for_each_vcpu (current->domain, v) {
+ for_each_vcpu(d, v) {
if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
continue;
void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range);
/* Global all flush of vTLB */
-void domain_flush_vtlb_all (void);
+void domain_flush_vtlb_all(struct domain *d);
/* Global range-flush of vTLB. */
void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range);