#define ACPI_PROCESSOR_MAX_C2_LATENCY 100
#define ACPI_PROCESSOR_MAX_C3_LATENCY 1000
+static void (*lapic_timer_off)(void);
+static void (*lapic_timer_on)(void);
+
extern u32 pmtmr_ioport;
extern void (*pm_idle) (void);
/* preparing TSC stop */
cstate_save_tsc();
/* preparing APIC stop */
- hpet_broadcast_enter();
+ lapic_timer_off();
/* Get start time (ticks) */
t1 = inl(pmtmr_ioport);
/* Get end time (ticks) */
t2 = inl(pmtmr_ioport);
- /* recovering APIC */
- hpet_broadcast_exit();
/* recovering TSC */
cstate_restore_tsc();
/* Re-enable interrupts */
local_irq_enable();
+ /* recovering APIC */
+ lapic_timer_on();
/* Compute time (ticks) that we were actually asleep */
sleep_ticks = ticks_elapsed(t1, t2);
/* Do not account our idle-switching overhead: */
if ( cx->type == ACPI_STATE_C3 )
{
/* We must be able to use HPET in place of LAPIC timers. */
- if ( !hpet_broadcast_is_available() )
+ if ( hpet_broadcast_is_available() )
+ {
+ lapic_timer_off = hpet_broadcast_enter;
+ lapic_timer_on = hpet_broadcast_exit;
+ }
+ else if ( pit_broadcast_is_available() )
+ {
+ lapic_timer_off = pit_broadcast_enter;
+ lapic_timer_on = pit_broadcast_exit;
+ }
+ else
+ {
return -EINVAL;
+ }
/* All the logic here assumes flags.bm_check is same across all CPUs */
if ( !bm_check_flag )
return product;
}
+/*
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+ * IPIs in place of local APIC timers
+ */
+extern int xen_cpuidle;
+static cpumask_t pit_broadcast_mask;
+
+static void smp_send_timer_broadcast_ipi(void)
+{
+ int cpu = smp_processor_id();
+ cpumask_t mask;
+
+ cpus_and(mask, cpu_online_map, pit_broadcast_mask);
+
+ if ( cpu_isset(cpu, mask) )
+ {
+ cpu_clear(cpu, mask);
+ raise_softirq(TIMER_SOFTIRQ);
+ }
+
+ if ( !cpus_empty(mask) )
+ {
+ cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
+ }
+}
+
static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
{
ASSERT(local_irq_is_enabled());
if ( !cpu_has_apic )
raise_softirq(TIMER_SOFTIRQ);
+ if ( xen_cpuidle )
+ smp_send_timer_broadcast_ipi();
+
/* Emulate a 32-bit PIT counter. */
if ( using_pit )
{
setup_irq(0, &irq0);
}
+/* keep pit enabled for pit_broadcast working while cpuidle enabled */
static int disable_pit_irq(void)
{
- if ( !using_pit && cpu_has_apic )
+ if ( !using_pit && cpu_has_apic && !xen_cpuidle )
{
/* Disable PIT CH0 timer interrupt. */
outb_p(0x30, PIT_MODE);
}
__initcall(disable_pit_irq);
+void pit_broadcast_enter(void)
+{
+ cpu_set(smp_processor_id(), pit_broadcast_mask);
+}
+
+void pit_broadcast_exit(void)
+{
+ cpu_clear(smp_processor_id(), pit_broadcast_mask);
+}
+
+int pit_broadcast_is_available(void)
+{
+ return xen_cpuidle;
+}
+
void send_timer_event(struct vcpu *v)
{
send_guest_vcpu_virq(v, VIRQ_TIMER);