From: djm@kirby.fc.hp.com Date: Tue, 8 Mar 2005 22:55:48 +0000 (+0000) Subject: bitkeeper revision 1.1236.14.1 (422e2d74w16o9oWR5FZGj2sVN8jvXQ) X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~17857^2~62^2 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=c7bc2aa500c98779125e3cf24e2d09209cc067b0;p=xen.git bitkeeper revision 1.1236.14.1 (422e2d74w16o9oWR5FZGj2sVN8jvXQ) Phase 1 support for multiple domains (not yet complete) --- diff --git a/xen/arch/ia64/patch/linux-2.6.7/time.c b/xen/arch/ia64/patch/linux-2.6.7/time.c index 5ec93f8bc2..78a45f6b90 100644 --- a/xen/arch/ia64/patch/linux-2.6.7/time.c +++ b/xen/arch/ia64/patch/linux-2.6.7/time.c @@ -1,5 +1,5 @@ ---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/time.c 2004-06-15 23:19:01.000000000 -0600 -+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/time.c 2004-11-23 17:25:18.000000000 -0700 +--- ../../linux-2.6.7/arch/ia64/kernel/time.c 2004-06-15 23:19:01.000000000 -0600 ++++ arch/ia64/time.c 2005-03-08 08:05:00.000000000 -0700 @@ -10,16 +10,22 @@ */ #include @@ -33,7 +33,7 @@ extern unsigned long wall_jiffies; -@@ -45,6 +54,59 @@ +@@ -45,6 +54,58 @@ #endif @@ -45,8 +45,7 @@ + +static inline u64 get_time_delta(void) +{ -+ printf("get_time_delta: called, not implemented\n"); -+ return 0; ++ return ia64_get_itc(); +} + +s_time_t get_s_time(void) @@ -74,7 +73,7 @@ +void update_dom_time(struct domain *d) +{ +// FIXME: implement this? -+ printf("update_dom_time: called, not implemented, skipping\n"); ++// printf("update_dom_time: called, not implemented, skipping\n"); +} + +/* Set clock to after 00:00:00 UTC, 1 January, 1970. */ @@ -93,7 +92,7 @@ static void itc_reset (void) { -@@ -80,12 +142,15 @@ +@@ -80,12 +141,15 @@ return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT; } @@ -109,7 +108,7 @@ int do_settimeofday (struct timespec *tv) { -@@ -95,7 +160,9 @@ +@@ -95,7 +159,9 @@ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; @@ -119,7 +118,7 @@ { /* * This is revolting. We need to set "xtime" correctly. However, the value -@@ -117,12 +184,15 @@ +@@ -117,12 +183,15 @@ time_esterror = NTP_PHASE_LIMIT; time_interpolator_reset(); } @@ -135,7 +134,7 @@ void do_gettimeofday (struct timeval *tv) -@@ -185,6 +255,7 @@ +@@ -185,6 +254,7 @@ } EXPORT_SYMBOL(do_gettimeofday); @@ -143,7 +142,7 @@ /* * The profiling function is SMP safe. (nothing can mess -@@ -195,6 +266,9 @@ +@@ -195,6 +265,9 @@ static inline void ia64_do_profile (struct pt_regs * regs) { @@ -153,7 +152,7 @@ unsigned long ip, slot; extern cpumask_t prof_cpu_mask; -@@ -231,24 +305,88 @@ +@@ -231,24 +304,89 @@ ip = prof_len-1; atomic_inc((atomic_t *)&prof_buffer[ip]); } @@ -219,6 +218,7 @@ + domain_wake(current); + } + } ++ raise_actimer_softirq(); +#endif +#ifndef XEN diff --git a/xen/arch/ia64/process.c b/xen/arch/ia64/process.c index f1d40d7b8a..eb541c0636 100644 --- a/xen/arch/ia64/process.c +++ b/xen/arch/ia64/process.c @@ -239,8 +239,9 @@ void xen_handle_domain_access(unsigned long address, unsigned long isr, struct p unsigned long psr = regs->cr_ipsr, mask, flags; unsigned long iip = regs->cr_iip; // FIXME should validate address here - unsigned long pteval, mpaddr; + unsigned long pteval, mpaddr, ps; unsigned long lookup_domain_mpa(struct domain *,unsigned long); + unsigned long match_dtlb(struct exec_domain *,unsigned long, unsigned long *, unsigned long *); IA64FAULT fault; #ifndef USER_ACCESS extern void __get_domain_bundle(void); @@ -264,7 +265,7 @@ void xen_handle_domain_access(unsigned long address, unsigned long isr, struct p pteval = lookup_domain_mpa(d,address); //FIXME: check return value? // would be nice to have a counter here - vcpu_itc_no_srlz(ed,2,address,pteval,PAGE_SHIFT); + vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT); return; } #ifndef USER_ACCESS @@ -276,6 +277,12 @@ void xen_handle_domain_access(unsigned long address, unsigned long isr, struct p #endif if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip); + // if we are fortunate enough to have it in the 1-entry TLB... + if (pteval = match_dtlb(ed,address,&ps,NULL)) { + vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps); + return; + } + // look in the TRs fault = vcpu_tpa(ed,address,&mpaddr); if (fault != IA64_NO_FAULT) { #ifndef USER_ACCESS @@ -314,7 +321,7 @@ if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip); // would be nice to have a counter here //printf("Handling privop data TLB miss\n"); // FIXME, must be inlined or potential for nested fault here! - vcpu_itc_no_srlz(ed,2,address,pteval,PAGE_SHIFT); + vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT); } void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir) @@ -357,13 +364,13 @@ void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_reg } pteval = lookup_domain_mpa(d,address); // FIXME, must be inlined or potential for nested fault here! - vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,PAGE_SHIFT); + vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,PAGE_SHIFT); return; } if (trp = match_tr(current,address)) { // FIXME address had better be pre-validated on insert pteval = translate_domain_pte(trp->page_flags,address,trp->itir); - vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f); + vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(trp->itir>>2)&0x3f); return; } vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR; diff --git a/xen/arch/ia64/regionreg.c b/xen/arch/ia64/regionreg.c index 9dee9a3cea..d53b01f895 100644 --- a/xen/arch/ia64/regionreg.c +++ b/xen/arch/ia64/regionreg.c @@ -146,6 +146,8 @@ int allocate_rid_range(struct domain *d, unsigned long ridbits) d->rid_bits = ridbits; d->starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS; +printf("###allocating rid_range, domain %p: starting_rid=%lx, ending_rid=%lx\n", +d,d->starting_rid, d->ending_rid); return 1; } @@ -361,8 +363,7 @@ virtualize_rid(struct exec_domain *ed, unsigned long rid) // unsigned long load_region_regs(struct exec_domain *ed) { - unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6; - unsigned long oldrr7, newrr7; + unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7; // TODO: These probably should be validated if (ed->vcpu_info->arch.metaphysical_mode) { @@ -372,9 +373,16 @@ unsigned long load_region_regs(struct exec_domain *ed) rrv.rid = ed->domain->metaphysical_rid; rrv.ps = PAGE_SHIFT; rrv.ve = 1; - rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = newrr7 = rrv.rrval; + rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = rrv.rrval; rrv.ve = 0; rr6 = rrv.rrval; + set_rr_no_srlz(0x0000000000000000L, rr0); + set_rr_no_srlz(0x2000000000000000L, rr1); + set_rr_no_srlz(0x4000000000000000L, rr2); + set_rr_no_srlz(0x6000000000000000L, rr3); + set_rr_no_srlz(0x8000000000000000L, rr4); + set_rr_no_srlz(0xa000000000000000L, rr5); + set_rr_no_srlz(0xc000000000000000L, rr6); } else { rr0 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[0]); @@ -384,21 +392,16 @@ unsigned long load_region_regs(struct exec_domain *ed) rr4 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[4]); rr5 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[5]); rr6 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[6]); - newrr7 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[7]); + set_one_rr(0x0000000000000000L, rr0); + set_one_rr(0x2000000000000000L, rr1); + set_one_rr(0x4000000000000000L, rr2); + set_one_rr(0x6000000000000000L, rr3); + set_one_rr(0x8000000000000000L, rr4); + set_one_rr(0xa000000000000000L, rr5); + set_one_rr(0xc000000000000000L, rr6); + ia64_srlz_d(); } - - set_rr_no_srlz(0x0000000000000000L, rr0); - set_rr_no_srlz(0x2000000000000000L, rr1); - set_rr_no_srlz(0x4000000000000000L, rr2); - set_rr_no_srlz(0x6000000000000000L, rr3); - set_rr_no_srlz(0x8000000000000000L, rr4); - set_rr_no_srlz(0xa000000000000000L, rr5); - set_rr_no_srlz(0xc000000000000000L, rr6); + rr7 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[7]); + set_one_rr(0xe000000000000000L, rr7); ia64_srlz_d(); - oldrr7 = get_rr(0xe000000000000000L); - if (oldrr7 != newrr7) { - newrr7 = (newrr7 & ~0xff) | (PAGE_SHIFT << 2) | 1; - return vmMangleRID(newrr7); - } - else return 0; } diff --git a/xen/arch/ia64/vcpu.c b/xen/arch/ia64/vcpu.c index 6b7ba004b0..c0ab341b82 100644 --- a/xen/arch/ia64/vcpu.c +++ b/xen/arch/ia64/vcpu.c @@ -1097,12 +1097,18 @@ IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr) IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr) { extern TR_ENTRY *match_tr(VCPU *,UINT64); - extern TR_ENTRY *match_dtlb(VCPU *,UINT64); + unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *); TR_ENTRY *trp; - UINT64 mask; + UINT64 mask, pteval, mp_pte, ps; extern unsigned long privop_trace; - if ((trp=match_tr(current,vadr)) || (trp=match_dtlb(current,vadr))) { + if (pteval = match_dtlb(vcpu, vadr, &ps, &mp_pte) && (mp_pte != -1UL)) { + mask = (1L << ps) - 1; + *padr = ((mp_pte & _PAGE_PPN_MASK) & ~mask) | (vadr & mask); + verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr); + return (IA64_NO_FAULT); + } + if (trp=match_tr(current,vadr)) { mask = (1L << trp->ps) - 1; *padr = ((trp->ppn << 12) & ~mask) | (vadr & mask); verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr); @@ -1418,7 +1424,7 @@ void foobar(void) { /*vcpu_verbose = 1;*/ } extern struct domain *dom0; -void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 logps) +void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps) { unsigned long psr; unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT; @@ -1429,13 +1435,29 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings ia64_set_psr(psr); // ia64_srlz_i(); // no srls req'd, will rfi later - if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,logps<<2,vaddr); - if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,logps<<2,vaddr); + if (IorD & 0x4) return; // don't place in 1-entry TLB + if (IorD & 0x1) { + vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,ps<<2,vaddr); + PSCB(vcpu,itlb_pte) = mp_pte; + } + if (IorD & 0x2) { + vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,ps<<2,vaddr); + PSCB(vcpu,dtlb_pte) = mp_pte; + } } -TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa) +// NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check +// the physical address contained for correctness +unsigned long match_dtlb(VCPU *vcpu, unsigned long ifa, unsigned long *ps, unsigned long *mp_pte) { - return vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1); + TR_ENTRY *trp; + + if (trp = vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1)) { + if (ps) *ps = trp->ps; + if (mp_pte) *mp_pte = vcpu->vcpu_info->arch.dtlb_pte; + return (trp->page_flags); + } + return 0UL; } IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa) @@ -1451,7 +1473,7 @@ IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa) //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize pteval = translate_domain_pte(pte,ifa,itir); if (!pteval) return IA64_ILLOP_FAULT; - vcpu_itc_no_srlz(vcpu,2,ifa,pteval,logps); + vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps); return IA64_NO_FAULT; } @@ -1470,7 +1492,7 @@ IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa) pteval = translate_domain_pte(pte,ifa,itir); // FIXME: what to do if bad physical address? (machine check?) if (!pteval) return IA64_ILLOP_FAULT; - vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,logps); + vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps); return IA64_NO_FAULT; } @@ -1480,13 +1502,25 @@ IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range) return IA64_ILLOP_FAULT; } +// At privlvl=0, fc performs no access rights or protection key checks, while +// at privlvl!=0, fc performs access rights checks as if it were a 1-byte +// read but no protection key check. Thus in order to avoid an unexpected +// access rights fault, we have to translate the virtual address to a +// physical address (possibly via a metaphysical address) and do the fc +// on the physical address, which is guaranteed to flush the same cache line IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr) { - UINT64 mpaddr; + UINT64 mpaddr, ps; IA64FAULT fault; + unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *); unsigned long lookup_domain_mpa(struct domain *,unsigned long); unsigned long pteval, dom_imva; + if (pteval = match_dtlb(vcpu, vadr, NULL, NULL)) { + dom_imva = __va(pteval & _PFN_MASK); + ia64_fc(dom_imva); + return IA64_NO_FAULT; + } fault = vcpu_tpa(vcpu, vadr, &mpaddr); if (fault == IA64_NO_FAULT) { struct domain *dom0; diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c index 425aed9894..681ad41902 100644 --- a/xen/arch/ia64/xenmisc.c +++ b/xen/arch/ia64/xenmisc.c @@ -15,6 +15,7 @@ #include #include #include +#include efi_memory_desc_t ia64_efi_io_md; EXPORT_SYMBOL(ia64_efi_io_md); @@ -67,12 +68,22 @@ struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); } void cleanup_writable_pagetable(struct domain *d, int what) { return; } +void raise_actimer_softirq(void) +{ + raise_softirq(AC_TIMER_SOFTIRQ); +} + /////////////////////////////// // from arch/x86/apic.c /////////////////////////////// int reprogram_ac_timer(s_time_t timeout) { + struct exec_domain *ed = current; + + local_cpu_data->itm_next = timeout; + if (is_idle_task(ed->domain)) vcpu_safe_set_itm(timeout); + else vcpu_set_next_timer(current); return 1; } diff --git a/xen/arch/ia64/xensetup.c b/xen/arch/ia64/xensetup.c index 59ba8a4c49..3e355a6d4f 100644 --- a/xen/arch/ia64/xensetup.c +++ b/xen/arch/ia64/xensetup.c @@ -140,6 +140,7 @@ void cmain(multiboot_info_t *mbi) /* Must do this early -- e.g., spinlocks rely on get_current(). */ set_current(&idle0_exec_domain); + idle0_exec_domain.domain = &idle0_domain; early_setup_arch(); @@ -286,7 +287,7 @@ printk("About to call time_init()\n"); printk("About to call ac_timer_init()\n"); ac_timer_init(); // init_xen_time(); ??? -// schedulers_start(); ??? + schedulers_start(); // do_initcalls(); ??? printk("About to call sort_main_extable()\n"); sort_main_extable(); diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h index 3d1c4fc500..c1cf1cd259 100644 --- a/xen/include/asm-ia64/config.h +++ b/xen/include/asm-ia64/config.h @@ -1,5 +1,7 @@ // control flags for turning on/off features under test #undef CLONE_DOMAIN0 +//#define CLONE_DOMAIN0 1 +//#undef CLONE_DOMAIN0 #define USER_ACCESS // manufactured from component pieces diff --git a/xen/include/asm-ia64/vcpu.h b/xen/include/asm-ia64/vcpu.h index 53a32c1d63..6ee2e73dde 100644 --- a/xen/include/asm-ia64/vcpu.h +++ b/xen/include/asm-ia64/vcpu.h @@ -131,7 +131,7 @@ extern void vcpu_poke_timer(VCPU *vcpu); extern void vcpu_set_next_timer(VCPU *vcpu); extern BOOLEAN vcpu_timer_expired(VCPU *vcpu); extern UINT64 vcpu_deliverable_interrupts(VCPU *vcpu); -extern void vcpu_itc_no_srlz(VCPU *vcpu, UINT64, UINT64, UINT64, UINT64); +extern void vcpu_itc_no_srlz(VCPU *vcpu, UINT64, UINT64, UINT64, UINT64, UINT64); #endif diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h index fc928cc1d3..065b65a9c7 100644 --- a/xen/include/public/arch-ia64.h +++ b/xen/include/public/arch-ia64.h @@ -70,6 +70,8 @@ typedef struct { TR_ENTRY dtrs[NDTRS]; TR_ENTRY itlb; TR_ENTRY dtlb; + unsigned long itlb_pte; + unsigned long dtlb_pte; unsigned long irr[4]; unsigned long insvc[4]; unsigned long iva;