// FIXME should validate address here
unsigned long pteval;
unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
+ seqlock_t* vtlb_lock = ¤t->domain->arch.vtlb_lock;
+ unsigned long seq;
IA64FAULT fault;
if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return;
}
again:
+ seq = read_seqbegin(vtlb_lock);
fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
u64 logps;
pteval = translate_domain_pte(pteval, address, itir, &logps);
vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
- if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
- /* dtlb has been purged in-between. This dtlb was
- matching. Undo the work. */
- vcpu_flush_tlb_vhpt_range (address, 1);
+ if (read_seqretry(vtlb_lock, seq)) {
+ vcpu_flush_tlb_vhpt_range(address & ((1 << logps) - 1),
+ logps);
goto again;
}
return;
{
int cpu = smp_processor_id ();
struct vcpu *v;
+ seqlock_t* vtlb_lock = ¤t->domain->arch.vtlb_lock;
+ write_seqlock(vtlb_lock);
for_each_vcpu (current->domain, v)
if (v->processor == cpu)
vcpu_flush_vtlb_all ();
(v->processor,
(void(*)(void *))vcpu_flush_vtlb_all,
NULL,1,1);
+ write_sequnlock(vtlb_lock);
}
static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range)
void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
{
+ seqlock_t* vtlb_lock = &d->arch.vtlb_lock;
struct vcpu *v;
#if 0
}
#endif
+ write_seqlock(vtlb_lock);
for_each_vcpu (d, v) {
/* Purge TC entries.
FIXME: clear only if match. */
/* ptc.ga */
ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
+ write_sequnlock(vtlb_lock);
}
static void flush_tlb_vhpt_all (struct domain *d)
local_flush_tlb_all ();
}
+// this is called when a domain is destroyed
+// so that there is no race.
void domain_flush_destroy (struct domain *d)
{
/* Very heavy... */
void flush_tlb_mask(cpumask_t mask)
{
+ seqlock_t* vtlb_lock = ¤t->domain->arch.vtlb_lock;
int cpu;
+ write_seqlock(vtlb_lock);
cpu = smp_processor_id();
if (cpu_isset (cpu, mask)) {
cpu_clear(cpu, mask);
}
if (cpus_empty(mask))
- return;
+ goto out;
for_each_cpu_mask (cpu, mask)
smp_call_function_single
(cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
+out:
+ write_sequnlock(vtlb_lock);
}
void zero_vhpt_stats(void)