/* Lower the priority */
struct irq_desc *desc = irq_to_desc(sgi);
+ perfc_incr(ipis);
+
/* Lower the priority */
gic_hw_ops->eoi_irq(desc);
* GICH_HCR_UIE is cleared before reading GICC_IAR. As a consequence
* this handler is not called.
*/
+ perfc_incr(maintenance_irqs);
}
void gic_dump_info(struct vcpu *v)
{
struct irq_desc *desc = irq_to_desc(irq);
- /* TODO: perfc_incr(irqs); */
+ perfc_incr(irqs);
+
+ ASSERT(irq >= 16); /* SGIs do not come down this path */
+
+ if (irq < 32)
+ perfc_incr(ppis);
+ else
+ perfc_incr(spis);
/* TODO: this_cpu(irq_count)++; */
{
struct domain *d = irq_get_domain(desc);
+ perfc_incr(guest_irqs);
desc->handler->end(desc);
set_bit(_IRQ_INPROGRESS, &desc->status);
if ( irq == (timer_irq[TIMER_HYP_PPI]) &&
READ_SYSREG32(CNTHP_CTL_EL2) & CNTx_CTL_PENDING )
{
+ perfc_incr(hyp_timer_irqs);
/* Signal the generic timer code to do its work */
raise_softirq(TIMER_SOFTIRQ);
/* Disable the timer to avoid more interrupts */
if ( irq == (timer_irq[TIMER_PHYS_NONSECURE_PPI]) &&
READ_SYSREG32(CNTP_CTL_EL0) & CNTx_CTL_PENDING )
{
+ perfc_incr(phys_timer_irqs);
/* Signal the generic timer code to do its work */
raise_softirq(TIMER_SOFTIRQ);
/* Disable the timer to avoid more interrupts */
if ( unlikely(is_idle_vcpu(current)) )
return;
+ perfc_incr(virt_timer_irqs);
+
current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0);
WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0);
vgic_vcpu_inject_irq(current, current->arch.virt_timer.irq);
#include <xen/hypercall.h>
#include <xen/softirq.h>
#include <xen/domain_page.h>
+#include <xen/perfc.h>
#include <public/sched.h>
#include <public/xen.h>
#include <asm/debugger.h>
case PSCI_cpu_off:
{
uint32_t pstate = PSCI_ARG32(regs,1);
+ perfc_incr(vpsci_cpu_off);
PSCI_RESULT_REG(regs) = do_psci_cpu_off(pstate);
}
break;
{
uint32_t vcpuid = PSCI_ARG32(regs,1);
register_t epoint = PSCI_ARG(regs,2);
+ perfc_incr(vpsci_cpu_on);
PSCI_RESULT_REG(regs) = do_psci_cpu_on(vcpuid, epoint);
}
break;
case PSCI_0_2_FN_PSCI_VERSION:
+ perfc_incr(vpsci_version);
PSCI_RESULT_REG(regs) = do_psci_0_2_version();
break;
case PSCI_0_2_FN_CPU_OFF:
+ perfc_incr(vpsci_cpu_off);
PSCI_RESULT_REG(regs) = do_psci_0_2_cpu_off();
break;
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ perfc_incr(vpsci_migrate_info_type);
PSCI_RESULT_REG(regs) = do_psci_0_2_migrate_info_type();
break;
case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
+ perfc_incr(vpsci_migrate_info_up_cpu);
if ( psci_mode_check(current->domain, fid) )
PSCI_RESULT_REG(regs) = do_psci_0_2_migrate_info_up_cpu();
break;
case PSCI_0_2_FN_SYSTEM_OFF:
+ perfc_incr(vpsci_system_off);
do_psci_0_2_system_off();
PSCI_RESULT_REG(regs) = PSCI_INTERNAL_FAILURE;
break;
case PSCI_0_2_FN_SYSTEM_RESET:
+ perfc_incr(vpsci_system_reset);
do_psci_0_2_system_reset();
PSCI_RESULT_REG(regs) = PSCI_INTERNAL_FAILURE;
break;
case PSCI_0_2_FN_CPU_ON:
case PSCI_0_2_FN64_CPU_ON:
+ perfc_incr(vpsci_cpu_on);
if ( psci_mode_check(current->domain, fid) )
{
register_t vcpuid = PSCI_ARG(regs,1);
break;
case PSCI_0_2_FN_CPU_SUSPEND:
case PSCI_0_2_FN64_CPU_SUSPEND:
+ perfc_incr(vpsci_cpu_suspend);
if ( psci_mode_check(current->domain, fid) )
{
uint32_t pstate = PSCI_ARG32(regs,1);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
case PSCI_0_2_FN64_AFFINITY_INFO:
+ perfc_incr(vpsci_cpu_affinity_info);
if ( psci_mode_check(current->domain, fid) )
{
register_t taff = PSCI_ARG(regs,1);
break;
case PSCI_0_2_FN_MIGRATE:
case PSCI_0_2_FN64_MIGRATE:
+ perfc_incr(vpsci_cpu_migrate);
if ( psci_mode_check(current->domain, fid) )
{
uint32_t tcpu = PSCI_ARG32(regs,1);
register_t orig_pc = regs->pc;
#endif
+ BUILD_BUG_ON(NR_hypercalls < ARRAY_SIZE(arm_hypercall_table) );
+
if ( iss != XEN_HYPERCALL_TAG )
domain_crash_synchronous();
if ( *nr >= ARRAY_SIZE(arm_hypercall_table) )
{
+ perfc_incr(invalid_hypercalls);
HYPERCALL_RESULT_REG(regs) = -ENOSYS;
return;
}
+ perfc_incra(hypercalls, *nr);
call = arm_hypercall_table[*nr].fn;
if ( call == NULL )
{
cpsr_cond = cpsr >> 28;
if ( !((cc_map[cond] >> cpsr_cond) & 1) )
+ {
+ perfc_incr(trap_uncond);
return 0;
-
+ }
return 1;
}
}
if ( hsr.wfi_wfe.ti ) {
/* Yield the VCPU for WFE */
+ perfc_incr(trap_wfe);
vcpu_yield();
} else {
/* Block the VCPU for WFI */
+ perfc_incr(trap_wfi);
vcpu_block_unless_event_pending(current);
}
advance_pc(regs, hsr);
case HSR_EC_CP15_32:
if ( !is_32bit_domain(current->domain) )
goto bad_trap;
+ perfc_incr(trap_cp15_32);
do_cp15_32(regs, hsr);
break;
case HSR_EC_CP15_64:
if ( !is_32bit_domain(current->domain) )
goto bad_trap;
+ perfc_incr(trap_cp15_64);
do_cp15_64(regs, hsr);
break;
case HSR_EC_CP14_32:
if ( !is_32bit_domain(current->domain) )
goto bad_trap;
+ perfc_incr(trap_cp14_32);
do_cp14_32(regs, hsr);
break;
case HSR_EC_CP14_DBG:
if ( !is_32bit_domain(current->domain) )
goto bad_trap;
+ perfc_incr(trap_cp14_dbg);
do_cp14_dbg(regs, hsr);
break;
case HSR_EC_CP:
if ( !is_32bit_domain(current->domain) )
goto bad_trap;
+ perfc_incr(trap_cp);
do_cp(regs, hsr);
break;
case HSR_EC_SMC32:
+ perfc_incr(trap_smc32);
inject_undef32_exception(regs);
break;
case HSR_EC_HVC32:
+ perfc_incr(trap_hvc32);
#ifndef NDEBUG
if ( (hsr.iss & 0xff00) == 0xff00 )
return do_debug_trap(regs, hsr.iss & 0x00ff);
break;
#ifdef CONFIG_ARM_64
case HSR_EC_HVC64:
+ perfc_incr(trap_hvc64);
#ifndef NDEBUG
if ( (hsr.iss & 0xff00) == 0xff00 )
return do_debug_trap(regs, hsr.iss & 0x00ff);
do_trap_hypercall(regs, ®s->x16, hsr.iss);
break;
case HSR_EC_SMC64:
+ perfc_incr(trap_smc64);
inject_undef64_exception(regs, hsr.len);
break;
case HSR_EC_SYSREG:
if ( is_32bit_domain(current->domain) )
goto bad_trap;
+ perfc_incr(trap_sysreg);
do_sysreg(regs, hsr);
break;
#endif
case HSR_EC_INSTR_ABORT_LOWER_EL:
+ perfc_incr(trap_iabt);
do_trap_instr_abort_guest(regs, hsr);
break;
case HSR_EC_DATA_ABORT_LOWER_EL:
+ perfc_incr(trap_dabt);
do_trap_data_abort_guest(regs, hsr);
break;
int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
unsigned long flags;
+ perfc_incr(vgicd_reads);
+
switch ( gicd_reg )
{
case GICD_CTLR:
uint32_t tr;
unsigned long flags;
+ perfc_incr(vgicd_writes);
+
switch ( gicd_reg )
{
case GICD_CTLR:
{
uint32_t offset;
+ perfc_incr(vgicr_reads);
+
if ( v->domain->arch.vgic.rdist_stride != 0 )
offset = info->gpa & (v->domain->arch.vgic.rdist_stride - 1);
else
{
uint32_t offset;
+ perfc_incr(vgicr_writes);
+
if ( v->domain->arch.vgic.rdist_stride != 0 )
offset = info->gpa & (v->domain->arch.vgic.rdist_stride - 1);
else
unsigned int vcpu_id;
int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
+ perfc_incr(vgicd_reads);
+
switch ( gicd_reg )
{
case GICD_CTLR:
struct vcpu *old_vcpu, *new_vcpu;
int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
+ perfc_incr(vgicd_writes);
+
switch ( gicd_reg )
{
case GICD_CTLR:
ASSERT (hsr.ec == HSR_EC_SYSREG);
+ if ( sysreg.read )
+ perfc_incr(vgic_sysreg_reads);
+ else
+ perfc_incr(vgic_sysreg_writes);
+
switch ( hsr.bits & HSR_SYSREG_REGS_MASK )
{
case HSR_SYSREG_ICC_SGI1R_EL1:
#include <xen/softirq.h>
#include <xen/irq.h>
#include <xen/sched.h>
+#include <xen/perfc.h>
#include <asm/current.h>
if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
return;
+ perfc_incr(vgic_irq_migrates);
+
spin_lock_irqsave(&old->arch.vgic.lock, flags);
if ( list_empty(&p->inflight) )
switch ( irqmode )
{
case SGI_TARGET_LIST:
+ perfc_incr(vgic_sgi_list);
break;
case SGI_TARGET_OTHERS:
/*
* We expect vcpu_mask to be 0 for SGI_TARGET_OTHERS and
* SGI_TARGET_SELF mode. So Force vcpu_mask to 0
*/
+ perfc_incr(vgic_sgi_others);
vcpu_mask = 0;
for ( i = 0; i < d->max_vcpus; i++ )
{
* We expect vcpu_mask to be 0 for SGI_TARGET_OTHERS and
* SGI_TARGET_SELF mode. So Force vcpu_mask to 0
*/
+ perfc_incr(vgic_sgi_self);
vcpu_mask = 0;
set_bit(current->vcpu_id, &vcpu_mask);
break;
running = v->is_running;
vcpu_unblock(v);
if ( running && v != current )
+ {
+ perfc_incr(vgic_cross_cpu_intr_inject);
smp_send_event_check_mask(cpumask_of(v->processor));
+ }
}
void vgic_vcpu_inject_spi(struct domain *d, unsigned int irq)
#include <xen/lib.h>
#include <xen/timer.h>
#include <xen/sched.h>
+#include <xen/perfc.h>
#include <asm/irq.h>
#include <asm/time.h>
#include <asm/gic.h>
struct vtimer *t = data;
t->ctl |= CNTx_CTL_PENDING;
if ( !(t->ctl & CNTx_CTL_MASK) )
+ {
+ perfc_incr(vtimer_phys_inject);
vgic_vcpu_inject_irq(t->v, t->irq);
+ }
+ else
+ perfc_incr(vtimer_phys_masked);
}
static void virt_timer_expired(void *data)
struct vtimer *t = data;
t->ctl |= CNTx_CTL_MASK;
vgic_vcpu_inject_irq(t->v, t->irq);
+ perfc_incr(vtimer_virt_inject);
}
int domain_vtimer_init(struct domain *d)
struct hsr_cp32 cp32 = hsr.cp32;
uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg);
+ if ( cp32.read )
+ perfc_incr(vtimer_cp32_reads);
+ else
+ perfc_incr(vtimer_cp32_writes);
+
switch ( hsr.bits & HSR_CP32_REGS_MASK )
{
case HSR_CPREG32(CNTP_CTL):
uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2);
uint64_t x;
+ if ( cp64.read )
+ perfc_incr(vtimer_cp64_reads);
+ else
+ perfc_incr(vtimer_cp64_writes);
+
switch ( hsr.bits & HSR_CP64_REGS_MASK )
{
case HSR_CPREG64(CNTPCT):
register_t *x = select_user_reg(regs, sysreg.reg);
uint32_t r = (uint32_t)*x;
+ if ( sysreg.read )
+ perfc_incr(vtimer_sysreg_reads);
+ else
+ perfc_incr(vtimer_sysreg_writes);
+
switch ( hsr.bits & HSR_SYSREG_REGS_MASK )
{
case HSR_SYSREG_CNTP_CTL_EL0:
#include <xen/ctype.h>
#include <xen/serial.h>
#include <asm/mmio.h>
+#include <xen/perfc.h>
#include "vuart.h"
register_t *r = select_user_reg(regs, dabt.reg);
paddr_t offset = info->gpa - d->arch.vuart.info->base_addr;
+ perfc_incr(vuart_reads);
+
/* By default zeroed the register */
*r = 0;
register_t *r = select_user_reg(regs, dabt.reg);
paddr_t offset = info->gpa - d->arch.vuart.info->base_addr;
+ perfc_incr(vuart_writes);
+
if ( offset == d->arch.vuart.info->data_off )
/* ignore any status bits */
vuart_print_char(v, *r & 0xFF);
for_each_online_cpu ( cpu )
{
if ( k > 0 && (k % 4) == 0 )
- printk("\n%46s", "");
+ printk("\n%53s", "");
printk(" CPU%02u[%10"PRIperfc"u]", cpu, per_cpu(perfcounters, cpu)[j]);
++k;
}
if ( perfc_info[i].type == TYPE_S_ARRAY )
sum = (perfc_t) sum;
if ( k > 0 && (k % 4) == 0 )
- printk("\n%46s", "");
+ printk("\n%53s", "");
printk(" CPU%02u[%10Lu]", cpu, sum);
++k;
}
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_FLAG_MASK (~0)
+#define NR_hypercalls 64
+
#define STACK_ORDER 3
#define STACK_SIZE (PAGE_SIZE << STACK_ORDER)
--- /dev/null
+#ifndef __ASM_PERFC_H__
+#define __ASM_PERFC_H__
+
+static inline void arch_perfc_reset(void)
+{
+}
+
+static inline void arch_perfc_gather(void)
+{
+}
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/* This file is legitimately included multiple times. */
+/*#ifndef __XEN_PERFC_DEFN_H__*/
+/*#define __XEN_PERFC_DEFN_H__*/
+
+PERFCOUNTER(invalid_hypercalls, "invalid hypercalls")
+
+PERFCOUNTER(trap_wfi, "trap: wfi")
+PERFCOUNTER(trap_wfe, "trap: wfe")
+PERFCOUNTER(trap_cp15_32, "trap: cp15 32-bit access")
+PERFCOUNTER(trap_cp15_64, "trap: cp15 64-bit access")
+PERFCOUNTER(trap_cp14_32, "trap: cp14 32-bit access")
+PERFCOUNTER(trap_cp14_dbg, "trap: cp14 dbg access")
+PERFCOUNTER(trap_cp, "trap: cp access")
+PERFCOUNTER(trap_smc32, "trap: 32-bit smc")
+PERFCOUNTER(trap_hvc32, "trap: 32-bit hvc")
+#ifdef CONFIG_ARM_64
+PERFCOUNTER(trap_smc64, "trap: 64-bit smc")
+PERFCOUNTER(trap_hvc64, "trap: 64-bit hvc")
+PERFCOUNTER(trap_sysreg, "trap: sysreg access")
+#endif
+PERFCOUNTER(trap_iabt, "trap: guest instr abort")
+PERFCOUNTER(trap_dabt, "trap: guest data abort")
+PERFCOUNTER(trap_uncond, "trap: condition failed")
+
+PERFCOUNTER(vpsci_cpu_on, "vpsci: cpu_on")
+PERFCOUNTER(vpsci_cpu_off, "vpsci: cpu_off")
+PERFCOUNTER(vpsci_version, "vpsci: version")
+PERFCOUNTER(vpsci_migrate_info_type, "vpsci: migrate_info_type")
+PERFCOUNTER(vpsci_migrate_info_up_cpu, "vpsci: migrate_info_up_cpu")
+PERFCOUNTER(vpsci_system_off, "vpsci: system_off")
+PERFCOUNTER(vpsci_system_reset, "vpsci: system_reset")
+PERFCOUNTER(vpsci_cpu_suspend, "vpsci: cpu_suspend")
+PERFCOUNTER(vpsci_cpu_affinity_info, "vpsci: cpu_affinity_info")
+PERFCOUNTER(vpsci_cpu_migrate, "vpsci: cpu_migrate")
+
+PERFCOUNTER(vgicd_reads, "vgicd: read")
+PERFCOUNTER(vgicd_writes, "vgicd: write")
+PERFCOUNTER(vgicr_reads, "vgicr: read")
+PERFCOUNTER(vgicr_writes, "vgicr: write")
+PERFCOUNTER(vgic_sysreg_reads, "vgic: sysreg read")
+PERFCOUNTER(vgic_sysreg_writes, "vgic: sysreg write")
+PERFCOUNTER(vgic_sgi_list , "vgic: SGI send to list")
+PERFCOUNTER(vgic_sgi_others, "vgic: SGI send to others")
+PERFCOUNTER(vgic_sgi_self, "vgic: SGI send to self")
+PERFCOUNTER(vgic_cross_cpu_intr_inject, "vgic: cross-CPU irq inject")
+PERFCOUNTER(vgic_irq_migrates, "vgic: irq migration")
+
+PERFCOUNTER(vuart_reads, "vuart: read")
+PERFCOUNTER(vuart_writes, "vuart: write")
+
+PERFCOUNTER(vtimer_cp32_reads, "vtimer: cp32 read")
+PERFCOUNTER(vtimer_cp32_writes, "vtimer: cp32 write")
+
+PERFCOUNTER(vtimer_cp64_reads, "vtimer: cp64 read")
+PERFCOUNTER(vtimer_cp64_writes, "vtimer: cp64 write")
+
+PERFCOUNTER(vtimer_sysreg_reads, "vtimer: sysreg read")
+PERFCOUNTER(vtimer_sysreg_writes, "vtimer: sysreg write")
+
+PERFCOUNTER(vtimer_phys_inject, "vtimer: phys expired, injected")
+PERFCOUNTER(vtimer_phys_masked, "vtimer: phys expired, masked")
+PERFCOUNTER(vtimer_virt_inject, "vtimer: virt expired, injected")
+
+PERFCOUNTER(ppis, "#PPIs")
+PERFCOUNTER(spis, "#SPIs")
+PERFCOUNTER(guest_irqs, "#GUEST-IRQS")
+
+PERFCOUNTER(hyp_timer_irqs, "Hypervisor timer interrupts")
+PERFCOUNTER(phys_timer_irqs, "Physical timer interrupts")
+PERFCOUNTER(virt_timer_irqs, "Virtual timer interrupts")
+PERFCOUNTER(maintenance_irqs, "Maintenance interrupts")
+
+/*#endif*/ /* __XEN_PERFC_DEFN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */