return 1;
}
+/************************************************************
+ * PLATFORM TIMER 5: TSC
+ */
+
+static const char plt_tsc_name[] = "TSC";
+#define platform_timer_is_tsc() (plt_src.name == plt_tsc_name)
+
+static int init_tsctimer(struct platform_timesource *pts)
+{
+ if ( !tsc_invariant )
+ return 0;
+
+ pts->name = (char *)plt_tsc_name;
+ return 1;
+}
+
+static void make_tsctimer_record(void)
+{
+ struct cpu_time *t = &this_cpu(cpu_time);
+ s_time_t now;
+ u64 tsc;
+
+ rdtscll(tsc);
+ now = scale_delta(tsc, &t->tsc_scale);
+
+ t->local_tsc_stamp = tsc;
+ t->stime_local_stamp = t->stime_master_stamp = now;
+}
+
/************************************************************
* GENERIC PLATFORM TIMER INFRASTRUCTURE
*/
static void resume_platform_timer(void)
{
+ if ( platform_timer_is_tsc() )
+ {
+ /* TODO: Save/restore TSC values. */
+ return;
+ }
+
/* No change in platform_stime across suspend/resume. */
platform_timer_stamp = plt_stamp64;
plt_stamp = plt_src.read_counter();
rc = init_cyclone(pts);
else if ( !strcmp(opt_clocksource, "acpi") )
rc = init_pmtimer(pts);
+ else if ( !strcmp(opt_clocksource, "tsc") )
+ rc = init_tsctimer(pts);
if ( rc <= 0 )
printk("WARNING: %s clocksource '%s'.\n",
!init_pmtimer(pts) )
init_pit(pts);
+ if ( platform_timer_is_tsc() )
+ {
+ printk("Platform timer is TSC\n");
+ return;
+ }
+
plt_mask = (u64)~0ull >> (64 - pts->counter_bits);
set_time_scale(&plt_scale, pts->frequency);
struct cpu_time *t = &this_cpu(cpu_time);
u64 curr_tsc;
+ /* Nothing to do if TSC is platform timer. Assume it is constant-rate. */
+ if ( platform_timer_is_tsc() )
+ return 0;
+
/* Sanity check: CPU frequency allegedly dropping below 1MHz? */
if ( freq < 1000000u )
{
/* The overall calibration scale multiplier. */
u32 calibration_mul_frac;
+ if ( platform_timer_is_tsc() )
+ {
+ make_tsctimer_record();
+ update_vcpu_system_time(current);
+ set_timer(&t->calibration_timer, NOW() + MILLISECS(10*1000));
+ return;
+ }
+
prev_tsc = t->local_tsc_stamp;
prev_local_stime = t->stime_local_stamp;
prev_master_stime = t->stime_master_stamp;
unsigned long flags;
s_time_t now;
+ if ( platform_timer_is_tsc() )
+ {
+ make_tsctimer_record();
+ goto out;
+ }
+
local_irq_save(flags);
rdtscll(t->local_tsc_stamp);
now = !plt_src.read_counter ? 0 : read_platform_stime();
t->stime_master_stamp = now;
t->stime_local_stamp = now;
+ out:
init_timer(&t->calibration_timer, local_time_calibration,
NULL, smp_processor_id());
set_timer(&t->calibration_timer, NOW() + EPOCH);
/* Late init function (after all CPUs are booted). */
int __init init_xen_time(void)
{
- wc_sec = get_cmos_time();
-
local_irq_disable();
+ /* check if TSC is invariant during deep C state
+ this is a new feature introduced by Nehalem*/
+ if ( cpuid_edx(0x80000007) & (1u<<8) )
+ tsc_invariant = 1;
+
init_percpu_time();
stime_platform_stamp = 0;
init_platform_timer();
- /* check if TSC is invariant during deep C state
- this is a new feature introduced by Nehalem*/
- if ( cpuid_edx(0x80000007) & (1U<<8) )
- tsc_invariant = 1;
+ do_settime(get_cmos_time(), 0, NOW());
local_irq_enable();
resume_platform_timer();
- do_settime(get_cmos_time() + cmos_utc_offset, 0, read_platform_stime());
-
init_percpu_time();
+ do_settime(get_cmos_time() + cmos_utc_offset, 0, NOW());
+
if ( !is_idle_vcpu(current) )
update_vcpu_system_time(current);