From 65b976d82764be80c59bb59afdfdd5a8f9fdfb0a Mon Sep 17 00:00:00 2001 From: "awilliam@xenbuild.aw" Date: Fri, 9 Jun 2006 10:35:43 -0600 Subject: [PATCH] [IA64] add seqlock to protect vtlb Signed-off-by: Isaku Yamahata --- xen/arch/ia64/xen/domain.c | 1 + xen/arch/ia64/xen/faults.c | 10 ++++++---- xen/arch/ia64/xen/vhpt.c | 14 +++++++++++++- xen/include/asm-ia64/domain.h | 3 +++ 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c index 158ca2e8d9..1b22c06262 100644 --- a/xen/arch/ia64/xen/domain.c +++ b/xen/arch/ia64/xen/domain.c @@ -300,6 +300,7 @@ int arch_domain_create(struct domain *d) d->xen_vaend = XEN_END_ADDR; d->arch.shared_info_va = SHAREDINFO_ADDR; d->arch.breakimm = 0x1000; + seqlock_init(&d->arch.vtlb_lock); if (is_idle_domain(d)) return 0; diff --git a/xen/arch/ia64/xen/faults.c b/xen/arch/ia64/xen/faults.c index 7637135833..7b43697572 100644 --- a/xen/arch/ia64/xen/faults.c +++ b/xen/arch/ia64/xen/faults.c @@ -214,6 +214,8 @@ void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_reg // FIXME should validate address here unsigned long pteval; unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL); + seqlock_t* vtlb_lock = ¤t->domain->arch.vtlb_lock; + unsigned long seq; IA64FAULT fault; if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return; @@ -230,15 +232,15 @@ void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_reg } again: + seq = read_seqbegin(vtlb_lock); fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha); if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) { u64 logps; pteval = translate_domain_pte(pteval, address, itir, &logps); vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps); - if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) { - /* dtlb has been purged in-between. This dtlb was - matching. Undo the work. */ - vcpu_flush_tlb_vhpt_range (address, 1); + if (read_seqretry(vtlb_lock, seq)) { + vcpu_flush_tlb_vhpt_range(address & ((1 << logps) - 1), + logps); goto again; } return; diff --git a/xen/arch/ia64/xen/vhpt.c b/xen/arch/ia64/xen/vhpt.c index 0d037fdb25..b0cb6547e9 100644 --- a/xen/arch/ia64/xen/vhpt.c +++ b/xen/arch/ia64/xen/vhpt.c @@ -152,7 +152,9 @@ void domain_flush_vtlb_all (void) { int cpu = smp_processor_id (); struct vcpu *v; + seqlock_t* vtlb_lock = ¤t->domain->arch.vtlb_lock; + write_seqlock(vtlb_lock); for_each_vcpu (current->domain, v) if (v->processor == cpu) vcpu_flush_vtlb_all (); @@ -161,6 +163,7 @@ void domain_flush_vtlb_all (void) (v->processor, (void(*)(void *))vcpu_flush_vtlb_all, NULL,1,1); + write_sequnlock(vtlb_lock); } static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range) @@ -187,6 +190,7 @@ void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range) void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range) { + seqlock_t* vtlb_lock = &d->arch.vtlb_lock; struct vcpu *v; #if 0 @@ -197,6 +201,7 @@ void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range) } #endif + write_seqlock(vtlb_lock); for_each_vcpu (d, v) { /* Purge TC entries. FIXME: clear only if match. */ @@ -213,6 +218,7 @@ void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range) /* ptc.ga */ ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT); + write_sequnlock(vtlb_lock); } static void flush_tlb_vhpt_all (struct domain *d) @@ -224,6 +230,8 @@ static void flush_tlb_vhpt_all (struct domain *d) local_flush_tlb_all (); } +// this is called when a domain is destroyed +// so that there is no race. void domain_flush_destroy (struct domain *d) { /* Very heavy... */ @@ -233,8 +241,10 @@ void domain_flush_destroy (struct domain *d) void flush_tlb_mask(cpumask_t mask) { + seqlock_t* vtlb_lock = ¤t->domain->arch.vtlb_lock; int cpu; + write_seqlock(vtlb_lock); cpu = smp_processor_id(); if (cpu_isset (cpu, mask)) { cpu_clear(cpu, mask); @@ -242,11 +252,13 @@ void flush_tlb_mask(cpumask_t mask) } if (cpus_empty(mask)) - return; + goto out; for_each_cpu_mask (cpu, mask) smp_call_function_single (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1); +out: + write_sequnlock(vtlb_lock); } void zero_vhpt_stats(void) diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h index 8f32c6da6d..2d74d4c9ad 100644 --- a/xen/include/asm-ia64/domain.h +++ b/xen/include/asm-ia64/domain.h @@ -76,6 +76,9 @@ struct arch_domain { void *efi_runtime; /* Metaphysical address to fpswa_interface_t in domain firmware memory is set. */ void *fpswa_inf; + + // protect v->itlb, v->dtlb and vhpt + seqlock_t vtlb_lock ____cacheline_aligned_in_smp; }; #define xen_vastart arch.xen_vastart #define xen_vaend arch.xen_vaend -- 2.30.2