---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/time.c 2004-06-15 23:19:01.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/time.c 2004-11-23 17:25:18.000000000 -0700
+--- ../../linux-2.6.7/arch/ia64/kernel/time.c 2004-06-15 23:19:01.000000000 -0600
++++ arch/ia64/time.c 2005-03-08 08:05:00.000000000 -0700
@@ -10,16 +10,22 @@
*/
#include <linux/config.h>
extern unsigned long wall_jiffies;
-@@ -45,6 +54,59 @@
+@@ -45,6 +54,58 @@
#endif
+
+static inline u64 get_time_delta(void)
+{
-+ printf("get_time_delta: called, not implemented\n");
-+ return 0;
++ return ia64_get_itc();
+}
+
+s_time_t get_s_time(void)
+void update_dom_time(struct domain *d)
+{
+// FIXME: implement this?
-+ printf("update_dom_time: called, not implemented, skipping\n");
++// printf("update_dom_time: called, not implemented, skipping\n");
+}
+
+/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
static void
itc_reset (void)
{
-@@ -80,12 +142,15 @@
+@@ -80,12 +141,15 @@
return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
}
int
do_settimeofday (struct timespec *tv)
{
-@@ -95,7 +160,9 @@
+@@ -95,7 +159,9 @@
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
{
/*
* This is revolting. We need to set "xtime" correctly. However, the value
-@@ -117,12 +184,15 @@
+@@ -117,12 +183,15 @@
time_esterror = NTP_PHASE_LIMIT;
time_interpolator_reset();
}
void
do_gettimeofday (struct timeval *tv)
-@@ -185,6 +255,7 @@
+@@ -185,6 +254,7 @@
}
EXPORT_SYMBOL(do_gettimeofday);
/*
* The profiling function is SMP safe. (nothing can mess
-@@ -195,6 +266,9 @@
+@@ -195,6 +265,9 @@
static inline void
ia64_do_profile (struct pt_regs * regs)
{
unsigned long ip, slot;
extern cpumask_t prof_cpu_mask;
-@@ -231,24 +305,88 @@
+@@ -231,24 +304,89 @@
ip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[ip]);
}
+ domain_wake(current);
+ }
+ }
++ raise_actimer_softirq();
+#endif
+#ifndef XEN
unsigned long psr = regs->cr_ipsr, mask, flags;
unsigned long iip = regs->cr_iip;
// FIXME should validate address here
- unsigned long pteval, mpaddr;
+ unsigned long pteval, mpaddr, ps;
unsigned long lookup_domain_mpa(struct domain *,unsigned long);
+ unsigned long match_dtlb(struct exec_domain *,unsigned long, unsigned long *, unsigned long *);
IA64FAULT fault;
#ifndef USER_ACCESS
extern void __get_domain_bundle(void);
pteval = lookup_domain_mpa(d,address);
//FIXME: check return value?
// would be nice to have a counter here
- vcpu_itc_no_srlz(ed,2,address,pteval,PAGE_SHIFT);
+ vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
return;
}
#ifndef USER_ACCESS
#endif
if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
+ // if we are fortunate enough to have it in the 1-entry TLB...
+ if (pteval = match_dtlb(ed,address,&ps,NULL)) {
+ vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps);
+ return;
+ }
+ // look in the TRs
fault = vcpu_tpa(ed,address,&mpaddr);
if (fault != IA64_NO_FAULT) {
#ifndef USER_ACCESS
// would be nice to have a counter here
//printf("Handling privop data TLB miss\n");
// FIXME, must be inlined or potential for nested fault here!
- vcpu_itc_no_srlz(ed,2,address,pteval,PAGE_SHIFT);
+ vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
}
void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
}
pteval = lookup_domain_mpa(d,address);
// FIXME, must be inlined or potential for nested fault here!
- vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,PAGE_SHIFT);
+ vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,PAGE_SHIFT);
return;
}
if (trp = match_tr(current,address)) {
// FIXME address had better be pre-validated on insert
pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
- vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f);
+ vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(trp->itir>>2)&0x3f);
return;
}
vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
d->rid_bits = ridbits;
d->starting_rid = i << IA64_MIN_IMPL_RID_BITS;
d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
+printf("###allocating rid_range, domain %p: starting_rid=%lx, ending_rid=%lx\n",
+d,d->starting_rid, d->ending_rid);
return 1;
}
//
unsigned long load_region_regs(struct exec_domain *ed)
{
- unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6;
- unsigned long oldrr7, newrr7;
+ unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
// TODO: These probably should be validated
if (ed->vcpu_info->arch.metaphysical_mode) {
rrv.rid = ed->domain->metaphysical_rid;
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
- rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = newrr7 = rrv.rrval;
+ rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = rrv.rrval;
rrv.ve = 0;
rr6 = rrv.rrval;
+ set_rr_no_srlz(0x0000000000000000L, rr0);
+ set_rr_no_srlz(0x2000000000000000L, rr1);
+ set_rr_no_srlz(0x4000000000000000L, rr2);
+ set_rr_no_srlz(0x6000000000000000L, rr3);
+ set_rr_no_srlz(0x8000000000000000L, rr4);
+ set_rr_no_srlz(0xa000000000000000L, rr5);
+ set_rr_no_srlz(0xc000000000000000L, rr6);
}
else {
rr0 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[0]);
rr4 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[4]);
rr5 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[5]);
rr6 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[6]);
- newrr7 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[7]);
+ set_one_rr(0x0000000000000000L, rr0);
+ set_one_rr(0x2000000000000000L, rr1);
+ set_one_rr(0x4000000000000000L, rr2);
+ set_one_rr(0x6000000000000000L, rr3);
+ set_one_rr(0x8000000000000000L, rr4);
+ set_one_rr(0xa000000000000000L, rr5);
+ set_one_rr(0xc000000000000000L, rr6);
+ ia64_srlz_d();
}
-
- set_rr_no_srlz(0x0000000000000000L, rr0);
- set_rr_no_srlz(0x2000000000000000L, rr1);
- set_rr_no_srlz(0x4000000000000000L, rr2);
- set_rr_no_srlz(0x6000000000000000L, rr3);
- set_rr_no_srlz(0x8000000000000000L, rr4);
- set_rr_no_srlz(0xa000000000000000L, rr5);
- set_rr_no_srlz(0xc000000000000000L, rr6);
+ rr7 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[7]);
+ set_one_rr(0xe000000000000000L, rr7);
ia64_srlz_d();
- oldrr7 = get_rr(0xe000000000000000L);
- if (oldrr7 != newrr7) {
- newrr7 = (newrr7 & ~0xff) | (PAGE_SHIFT << 2) | 1;
- return vmMangleRID(newrr7);
- }
- else return 0;
}
IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
{
extern TR_ENTRY *match_tr(VCPU *,UINT64);
- extern TR_ENTRY *match_dtlb(VCPU *,UINT64);
+ unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *);
TR_ENTRY *trp;
- UINT64 mask;
+ UINT64 mask, pteval, mp_pte, ps;
extern unsigned long privop_trace;
- if ((trp=match_tr(current,vadr)) || (trp=match_dtlb(current,vadr))) {
+ if (pteval = match_dtlb(vcpu, vadr, &ps, &mp_pte) && (mp_pte != -1UL)) {
+ mask = (1L << ps) - 1;
+ *padr = ((mp_pte & _PAGE_PPN_MASK) & ~mask) | (vadr & mask);
+ verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
+ return (IA64_NO_FAULT);
+ }
+ if (trp=match_tr(current,vadr)) {
mask = (1L << trp->ps) - 1;
*padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
extern struct domain *dom0;
-void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 logps)
+void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
{
unsigned long psr;
unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
ia64_set_psr(psr);
// ia64_srlz_i(); // no srls req'd, will rfi later
- if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,logps<<2,vaddr);
- if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,logps<<2,vaddr);
+ if (IorD & 0x4) return; // don't place in 1-entry TLB
+ if (IorD & 0x1) {
+ vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,ps<<2,vaddr);
+ PSCB(vcpu,itlb_pte) = mp_pte;
+ }
+ if (IorD & 0x2) {
+ vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,ps<<2,vaddr);
+ PSCB(vcpu,dtlb_pte) = mp_pte;
+ }
}
-TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
+// NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check
+// the physical address contained for correctness
+unsigned long match_dtlb(VCPU *vcpu, unsigned long ifa, unsigned long *ps, unsigned long *mp_pte)
{
- return vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1);
+ TR_ENTRY *trp;
+
+ if (trp = vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1)) {
+ if (ps) *ps = trp->ps;
+ if (mp_pte) *mp_pte = vcpu->vcpu_info->arch.dtlb_pte;
+ return (trp->page_flags);
+ }
+ return 0UL;
}
IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
pteval = translate_domain_pte(pte,ifa,itir);
if (!pteval) return IA64_ILLOP_FAULT;
- vcpu_itc_no_srlz(vcpu,2,ifa,pteval,logps);
+ vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
return IA64_NO_FAULT;
}
pteval = translate_domain_pte(pte,ifa,itir);
// FIXME: what to do if bad physical address? (machine check?)
if (!pteval) return IA64_ILLOP_FAULT;
- vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,logps);
+ vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
return IA64_NO_FAULT;
}
return IA64_ILLOP_FAULT;
}
+// At privlvl=0, fc performs no access rights or protection key checks, while
+// at privlvl!=0, fc performs access rights checks as if it were a 1-byte
+// read but no protection key check. Thus in order to avoid an unexpected
+// access rights fault, we have to translate the virtual address to a
+// physical address (possibly via a metaphysical address) and do the fc
+// on the physical address, which is guaranteed to flush the same cache line
IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
{
- UINT64 mpaddr;
+ UINT64 mpaddr, ps;
IA64FAULT fault;
+ unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *);
unsigned long lookup_domain_mpa(struct domain *,unsigned long);
unsigned long pteval, dom_imva;
+ if (pteval = match_dtlb(vcpu, vadr, NULL, NULL)) {
+ dom_imva = __va(pteval & _PFN_MASK);
+ ia64_fc(dom_imva);
+ return IA64_NO_FAULT;
+ }
fault = vcpu_tpa(vcpu, vadr, &mpaddr);
if (fault == IA64_NO_FAULT) {
struct domain *dom0;
#include <asm/processor.h>
#include <xen/serial.h>
#include <asm/io.h>
+#include <xen/softirq.h>
efi_memory_desc_t ia64_efi_io_md;
EXPORT_SYMBOL(ia64_efi_io_md);
void cleanup_writable_pagetable(struct domain *d, int what) { return; }
+void raise_actimer_softirq(void)
+{
+ raise_softirq(AC_TIMER_SOFTIRQ);
+}
+
///////////////////////////////
// from arch/x86/apic.c
///////////////////////////////
int reprogram_ac_timer(s_time_t timeout)
{
+ struct exec_domain *ed = current;
+
+ local_cpu_data->itm_next = timeout;
+ if (is_idle_task(ed->domain)) vcpu_safe_set_itm(timeout);
+ else vcpu_set_next_timer(current);
return 1;
}
/* Must do this early -- e.g., spinlocks rely on get_current(). */
set_current(&idle0_exec_domain);
+ idle0_exec_domain.domain = &idle0_domain;
early_setup_arch();
printk("About to call ac_timer_init()\n");
ac_timer_init();
// init_xen_time(); ???
-// schedulers_start(); ???
+ schedulers_start();
// do_initcalls(); ???
printk("About to call sort_main_extable()\n");
sort_main_extable();
// control flags for turning on/off features under test
#undef CLONE_DOMAIN0
+//#define CLONE_DOMAIN0 1
+//#undef CLONE_DOMAIN0
#define USER_ACCESS
// manufactured from component pieces
extern void vcpu_set_next_timer(VCPU *vcpu);
extern BOOLEAN vcpu_timer_expired(VCPU *vcpu);
extern UINT64 vcpu_deliverable_interrupts(VCPU *vcpu);
-extern void vcpu_itc_no_srlz(VCPU *vcpu, UINT64, UINT64, UINT64, UINT64);
+extern void vcpu_itc_no_srlz(VCPU *vcpu, UINT64, UINT64, UINT64, UINT64, UINT64);
#endif
TR_ENTRY dtrs[NDTRS];
TR_ENTRY itlb;
TR_ENTRY dtlb;
+ unsigned long itlb_pte;
+ unsigned long dtlb_pte;
unsigned long irr[4];
unsigned long insvc[4];
unsigned long iva;