In ARMv8, write to ICC_SGI1R_EL1 register raises trap to EL2.
Handle the trap and inject SGI to vcpu.
Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@caviumnetworks.com>
Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
#include "decode.h"
#include "vtimer.h"
#include <asm/gic.h>
+#include <asm/vgic.h>
/* The base of the stack must always be double-word aligned, which means
* that both the kernel half of struct cpu_user_regs (which is pushed in
domain_crash_synchronous();
}
break;
+ case HSR_SYSREG_ICC_SGI1R_EL1:
+ if ( !vgic_emulate(regs, hsr) )
+ {
+ dprintk(XENLOG_WARNING,
+ "failed emulation of sysreg ICC_SGI1R_EL1 access\n");
+ inject_undef64_exception(regs, hsr.len);
+ }
+ break;
+ case HSR_SYSREG_ICC_SGI0R_EL1:
+ case HSR_SYSREG_ICC_ASGI1R_EL1:
+ /* TBD: Implement to support secure grp0/1 SGI forwarding */
+ dprintk(XENLOG_WARNING,
+ "Emulation of sysreg ICC_SGI0R_EL1/ASGI1R_EL1 not supported\n");
+ inject_undef64_exception(regs, hsr.len);
default:
bad_sysreg:
{
return 1;
}
+static int vgic_v3_to_sgi(struct vcpu *v, register_t sgir)
+{
+ int virq;
+ int irqmode;
+ enum gic_sgi_mode sgi_mode;
+ unsigned long vcpu_mask = 0;
+
+ irqmode = (sgir >> ICH_SGI_IRQMODE_SHIFT) & ICH_SGI_IRQMODE_MASK;
+ virq = (sgir >> ICH_SGI_IRQ_SHIFT ) & ICH_SGI_IRQ_MASK;
+ /* SGI's are injected at Rdist level 0. ignoring affinity 1, 2, 3 */
+ vcpu_mask = sgir & ICH_SGI_TARGETLIST_MASK;
+
+ /* Map GIC sgi value to enum value */
+ switch ( irqmode )
+ {
+ case ICH_SGI_TARGET_LIST:
+ sgi_mode = SGI_TARGET_LIST;
+ break;
+ case ICH_SGI_TARGET_OTHERS:
+ sgi_mode = SGI_TARGET_OTHERS;
+ break;
+ default:
+ gdprintk(XENLOG_WARNING, "Wrong irq mode in SGI1R_EL1 register\n");
+ return 0;
+ }
+
+ return vgic_to_sgi(v, sgir, sgi_mode, virq, vcpu_mask);
+}
+
+static int vgic_v3_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr)
+{
+ struct vcpu *v = current;
+ struct hsr_sysreg sysreg = hsr.sysreg;
+ register_t *r = select_user_reg(regs, sysreg.reg);
+
+ ASSERT (hsr.ec == HSR_EC_SYSREG);
+
+ switch ( hsr.bits & HSR_SYSREG_REGS_MASK )
+ {
+ case HSR_SYSREG_ICC_SGI1R_EL1:
+ /* WO */
+ if ( !sysreg.read )
+ return vgic_v3_to_sgi(v, *r);
+ else
+ {
+ gdprintk(XENLOG_WARNING, "Reading SGI1R_EL1 - WO register\n");
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
static const struct mmio_handler_ops vgic_rdistr_mmio_handler = {
.read_handler = vgic_v3_rdistr_mmio_read,
.write_handler = vgic_v3_rdistr_mmio_write,
.domain_init = vgic_v3_domain_init,
.get_irq_priority = vgic_v3_get_irq_priority,
.get_target_vcpu = vgic_v3_get_target_vcpu,
+ .emulate_sysreg = vgic_v3_emulate_sysreg,
};
int vgic_v3_init(struct domain *d)
vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
}
+int vgic_emulate(struct cpu_user_regs *regs, union hsr hsr)
+{
+ struct vcpu *v = current;
+
+ ASSERT(v->domain->arch.vgic.handler->emulate_sysreg != NULL);
+
+ return v->domain->arch.vgic.handler->emulate_sysreg(regs, hsr);
+}
+
/*
* Local variables:
* mode: C
#define GICH_VMCR_PRIORITY_MASK 0xff
#define GICH_VMCR_PRIORITY_SHIFT 24
+#define ICH_SGI_IRQMODE_SHIFT 40
+#define ICH_SGI_IRQMODE_MASK 0x1
+#define ICH_SGI_TARGET_OTHERS 1
+#define ICH_SGI_TARGET_LIST 0
+#define ICH_SGI_IRQ_SHIFT 24
+#define ICH_SGI_IRQ_MASK 0xf
+#define ICH_SGI_TARGETLIST_MASK 0xffff
#endif /* __ASM_ARM_GIC_V3_DEFS_H__ */
/*
#define HSR_SYSREG_PMINTENCLR_EL1 HSR_SYSREG(3,0,c9,c14,2)
#define HSR_SYSREG_MAIR_EL1 HSR_SYSREG(3,0,c10,c2,0)
#define HSR_SYSREG_AMAIR_EL1 HSR_SYSREG(3,0,c10,c3,0)
+#define HSR_SYSREG_ICC_SGI1R_EL1 HSR_SYSREG(3,0,c12,c11,5)
+#define HSR_SYSREG_ICC_ASGI1R_EL1 HSR_SYSREG(3,1,c12,c11,6)
+#define HSR_SYSREG_ICC_SGI0R_EL1 HSR_SYSREG(3,2,c12,c11,7)
#define HSR_SYSREG_CONTEXTIDR_EL1 HSR_SYSREG(3,0,c13,c0,1)
#define HSR_SYSREG_PMCR_EL0 HSR_SYSREG(3,3,c9,c12,0)
/* Get the target vcpu for a given virq. The rank lock is already taken
* when calling this. */
struct vcpu *(*get_target_vcpu)(struct vcpu *v, unsigned int irq);
+ /* vGIC sysreg emulation */
+ int (*emulate_sysreg)(struct cpu_user_regs *regs, union hsr hsr);
};
/* Number of ranks of interrupt registers for a domain */
extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, int s);
extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq);
+extern int vgic_emulate(struct cpu_user_regs *regs, union hsr hsr);
extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n);
extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n);
extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops);