Revert 21339:804304d4e05d "x86: TSC handling cleanups"
authorKeir Fraser <keir.fraser@citrix.com>
Fri, 21 May 2010 15:21:39 +0000 (16:21 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Fri, 21 May 2010 15:21:39 +0000 (16:21 +0100)
It very much breaks PV domU boot.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/save.c
xen/arch/x86/hvm/vpt.c
xen/arch/x86/time.c
xen/common/kernel.c
xen/include/asm-x86/hvm/domain.h
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/time.h
xen/include/public/features.h

index 022b4b79b19f0111118eb437be6ed575ca4f3773..515820bde261d36181c80ea002e63947e61de6ea 100644 (file)
@@ -205,6 +205,32 @@ void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable)
         hvm_funcs.set_rdtsc_exiting(v, enable);
 }
 
+int hvm_gtsc_need_scale(struct domain *d)
+{
+    uint32_t gtsc_mhz, htsc_mhz;
+
+    if ( d->arch.vtsc )
+        return 0;
+
+    gtsc_mhz = d->arch.hvm_domain.gtsc_khz / 1000;
+    htsc_mhz = (uint32_t)cpu_khz / 1000;
+
+    d->arch.hvm_domain.tsc_scaled = (gtsc_mhz && (gtsc_mhz != htsc_mhz));
+    return d->arch.hvm_domain.tsc_scaled;
+}
+
+static u64 hvm_h2g_scale_tsc(struct vcpu *v, u64 host_tsc)
+{
+    uint32_t gtsc_khz, htsc_khz;
+
+    if ( !v->domain->arch.hvm_domain.tsc_scaled )
+        return host_tsc;
+
+    htsc_khz = cpu_khz;
+    gtsc_khz = v->domain->arch.hvm_domain.gtsc_khz;
+    return muldiv64(host_tsc, gtsc_khz, htsc_khz);
+}
+
 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
 {
     uint64_t tsc;
@@ -212,11 +238,11 @@ void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
     if ( v->domain->arch.vtsc )
     {
         tsc = hvm_get_guest_time(v);
-        tsc = gtime_to_gtsc(v->domain, tsc);
     }
     else
     {
         rdtscll(tsc);
+        tsc = hvm_h2g_scale_tsc(v, tsc);
     }
 
     v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - tsc;
@@ -230,12 +256,12 @@ u64 hvm_get_guest_tsc(struct vcpu *v)
     if ( v->domain->arch.vtsc )
     {
         tsc = hvm_get_guest_time(v);
-        tsc = gtime_to_gtsc(v->domain, tsc);
         v->domain->arch.vtsc_kerncount++;
     }
     else
     {
         rdtscll(tsc);
+        tsc = hvm_h2g_scale_tsc(v, tsc);
     }
 
     return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
index 3fc24a02637681ec294a31e8f7b3eff4703249e6..e409add6b6ef9dda925d9dadd96e68471c222750 100644 (file)
@@ -33,7 +33,7 @@ void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
     hdr->cpuid = eax;
 
     /* Save guest's preferred TSC. */
-    hdr->gtsc_khz = d->arch.tsc_khz;
+    hdr->gtsc_khz = d->arch.hvm_domain.gtsc_khz;
 }
 
 int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
@@ -62,8 +62,8 @@ int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
 
     /* Restore guest's preferred TSC frequency. */
     if ( hdr->gtsc_khz )
-        d->arch.tsc_khz = hdr->gtsc_khz;
-    if ( d->arch.vtsc )
+        d->arch.hvm_domain.gtsc_khz = hdr->gtsc_khz;
+    if ( hvm_gtsc_need_scale(d) )
     {
         hvm_set_rdtsc_exiting(d, 1);
         gdprintk(XENLOG_WARNING, "Domain %d expects freq %uMHz "
index ce35a3e0497d280b2f1bc3bcd9baad8937a6c1f5..22d66f6fed4a0f7b9af9dc541be0f1c965390953 100644 (file)
@@ -32,6 +32,9 @@ void hvm_init_guest_time(struct domain *d)
     spin_lock_init(&pl->pl_time_lock);
     pl->stime_offset = -(u64)get_s_time();
     pl->last_guest_time = 0;
+
+    d->arch.hvm_domain.gtsc_khz = cpu_khz;
+    d->arch.hvm_domain.tsc_scaled = 0;
 }
 
 u64 hvm_get_guest_time(struct vcpu *v)
index 77fa8a5912545cf38404e6e25c17ef6dbb842f64..4863996930183a02b4e1bf666de102a5c0305146 100644 (file)
@@ -804,13 +804,8 @@ static void __update_vcpu_system_time(struct vcpu *v, int force)
 
     if ( d->arch.vtsc )
     {
-        u64 stime = t->stime_local_stamp;
-        if ( is_hvm_domain(d) )
-        {
-            struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
-            stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset;
-        }
-        tsc_stamp = gtime_to_gtsc(d, stime);
+        u64 delta = max_t(s64, t->stime_local_stamp - d->arch.vtsc_offset, 0);
+        tsc_stamp = scale_delta(delta, &d->arch.ns_to_vtsc);
     }
     else
     {
@@ -833,8 +828,6 @@ static void __update_vcpu_system_time(struct vcpu *v, int force)
         _u.tsc_to_system_mul = t->tsc_scale.mul_frac;
         _u.tsc_shift         = (s8)t->tsc_scale.shift;
     }
-    if ( is_hvm_domain(d) )
-        _u.tsc_timestamp += v->arch.hvm_vcpu.cache_tsc_offset;
 
     /* Don't bother unless timestamp record has changed or we are forced. */
     _u.version = u->version; /* make versions match for memcmp test */
@@ -1598,18 +1591,11 @@ struct tm wallclock_time(void)
  * PV SoftTSC Emulation.
  */
 
-u64 gtime_to_gtsc(struct domain *d, u64 tsc)
-{
-    if ( !is_hvm_domain(d) )
-        tsc = max_t(s64, tsc - d->arch.vtsc_offset, 0);
-    return scale_delta(tsc, &d->arch.ns_to_vtsc);
-}
-
 void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp)
 {
     s_time_t now = get_s_time();
     struct domain *d = v->domain;
-    u64 tsc;
+    u64 delta;
 
     spin_lock(&d->arch.vtsc_lock);
 
@@ -1625,7 +1611,8 @@ void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp)
 
     spin_unlock(&d->arch.vtsc_lock);
 
-    tsc = gtime_to_gtsc(d, now);
+    delta = max_t(s64, now - d->arch.vtsc_offset, 0);
+    now = scale_delta(delta, &d->arch.ns_to_vtsc);
 
     regs->eax = (uint32_t)now;
     regs->edx = (uint32_t)(now >> 32);
@@ -1766,10 +1753,8 @@ void tsc_set_info(struct domain *d,
         d->arch.vtsc_offset = get_s_time() - elapsed_nsec;
         d->arch.tsc_khz = gtsc_khz ? gtsc_khz : cpu_khz;
         set_time_scale(&d->arch.vtsc_to_ns, d->arch.tsc_khz * 1000 );
-        /* use native TSC if initial host has safe TSC, has not migrated
-         * yet and tsc_khz == cpu_khz */
-        if ( host_tsc_is_safe() && incarnation == 0 &&
-                d->arch.tsc_khz == cpu_khz )
+        /* use native TSC if initial host has safe TSC and not migrated yet */
+        if ( host_tsc_is_safe() && incarnation == 0 )
             d->arch.vtsc = 0;
         else 
             d->arch.ns_to_vtsc = scale_reciprocal(d->arch.vtsc_to_ns);
@@ -1794,7 +1779,7 @@ void tsc_set_info(struct domain *d,
     }
     d->arch.incarnation = incarnation + 1;
     if ( is_hvm_domain(d) )
-        hvm_set_rdtsc_exiting(d, d->arch.vtsc);
+        hvm_set_rdtsc_exiting(d, d->arch.vtsc || hvm_gtsc_need_scale(d));
 }
 
 /* vtsc may incur measurable performance degradation, diagnose with this */
index 61a798df4ea4c7a725d2939c6093e487e014d039..f9d1dedb66209b5f9378c317e8d9423e03c2ae5c 100644 (file)
@@ -259,8 +259,6 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE(void) arg)
                 fi.submap |= (1U << XENFEAT_mmu_pt_update_preserve_ad) |
                              (1U << XENFEAT_highmem_assist) |
                              (1U << XENFEAT_gnttab_map_avail_bits);
-            else
-                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock);
 #endif
             break;
         default:
index 4d68d4b7f44ff7e79bc24be2e20bbce9260d177e..3cd2e3c0146c59bf6b98f5c7e8ffb2121338f6c9 100644 (file)
@@ -45,6 +45,8 @@ struct hvm_domain {
     struct hvm_ioreq_page  ioreq;
     struct hvm_ioreq_page  buf_ioreq;
 
+    uint32_t               gtsc_khz; /* kHz */
+    bool_t                 tsc_scaled;
     struct pl_time         pl_time;
 
     struct hvm_io_handler  io_handler;
index 66637cc3d58ccc1912801aa33a25c98a943b873c..0c2a8ff4501fa9249c746830e6a62891e85ca478 100644 (file)
@@ -296,6 +296,7 @@ int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);
 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
 
 void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
+int hvm_gtsc_need_scale(struct domain *d);
 
 static inline int hvm_cpu_up(void)
 {
index 99038eed7c1a777b28ef5699e96364c42fe046e7..ca19a93164f410b2e7f9c013ed2e5ad9672a3fa5 100644 (file)
@@ -57,7 +57,6 @@ uint64_t acpi_pm_tick_to_ns(uint64_t ticks);
 uint64_t ns_to_acpi_pm_tick(uint64_t ns);
 
 void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp);
-u64 gtime_to_gtsc(struct domain *d, u64 tsc);
 
 void tsc_set_info(struct domain *d, uint32_t tsc_mode, uint64_t elapsed_nsec,
                   uint32_t gtsc_khz, uint32_t incarnation);
index e95c7b755a49d965086e2a2109b7f7ac5098ac8c..879131cda12a53c25199c3f3e9575d266b13d76b 100644 (file)
@@ -68,9 +68,6 @@
  */
 #define XENFEAT_gnttab_map_avail_bits      7
 
-/* x86: pvclock algorithm is safe to use on HVM */
-#define XENFEAT_hvm_safe_pvclock           9
-
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */