#define NR_BITS_PER_TARGET (32U / NR_TARGETS_PER_ITARGETSR)
/*
- * Store an ITARGETSR register. This function only deals with ITARGETSR8
- * and onwards.
+ * Fetch an ITARGETSR register based on the offset from ITARGETSR0. Only
+ * one vCPU will be listed for a given vIRQ.
+ *
+ * Note the offset will be aligned to the appropriate boundary.
+ */
+static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank *rank,
+ unsigned int offset)
+{
+ uint32_t reg = 0;
+ unsigned int i;
+
+ ASSERT(spin_is_locked(&rank->lock));
+
+ offset &= INTERRUPT_RANK_MASK;
+ offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);
+
+ for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ )
+ reg |= (1 << rank->vcpu[offset]) << (i * NR_BITS_PER_TARGET);
+
+ return reg;
+}
+
+/*
+ * Store an ITARGETSR register in a convenient way and migrate the vIRQ
+ * if necessary. This function only deals with ITARGETSR8 and onwards.
*
* Note the offset will be aligned to the appropriate boundary.
*/
unsigned int offset, uint32_t itargetsr)
{
unsigned int i;
- unsigned int regidx = REG_RANK_INDEX(8, offset, DABT_WORD);
unsigned int virq;
ASSERT(spin_is_locked(&rank->lock));
for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++, virq++ )
{
unsigned int new_target, old_target;
- uint8_t new_mask, old_mask;
+ uint8_t new_mask;
/*
* Don't need to mask as we rely on new_mask to fit for only one
BUILD_BUG_ON((sizeof (new_mask) * 8) != NR_BITS_PER_TARGET);
new_mask = itargetsr >> (i * NR_BITS_PER_TARGET);
- old_mask = vgic_byte_read(rank->v2.itargets[regidx], i);
/*
* SPIs are using the 1-N model (see 1.4.3 in ARM IHI 0048B).
* in the target list
*/
new_target = ffs(new_mask);
- old_target = ffs(old_mask);
-
- /* The current target should always be valid */
- ASSERT(old_target && (old_target <= d->max_vcpus));
/*
* Ignore the write request for this interrupt if the new target
/* The vCPU ID always starts from 0 */
new_target--;
- old_target--;
+
+ old_target = rank->vcpu[offset];
/* Only migrate the vIRQ if the target vCPU has changed */
if ( new_target != old_target )
virq);
}
- /* Bit corresponding to unimplemented CPU is write-ignore. */
- new_mask &= (1 << d->max_vcpus) - 1;
- vgic_byte_write(&rank->v2.itargets[regidx], new_mask, i);
+ rank->vcpu[offset] = new_target;
}
}
rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD);
if ( rank == NULL) goto read_as_zero;
vgic_lock_rank(v, rank, flags);
- *r = rank->v2.itargets[REG_RANK_INDEX(8, gicd_reg - GICD_ITARGETSR,
- DABT_WORD)];
+ *r = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR);
if ( dabt.size == DABT_BYTE )
*r = vgic_byte_read(*r, gicd_reg);
vgic_unlock_rank(v, rank, flags);
itargetsr = r;
else
{
- itargetsr = rank->v2.itargets[REG_RANK_INDEX(8,
- gicd_reg - GICD_ITARGETSR,
- DABT_WORD)];
+ itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR);
vgic_byte_write(&itargetsr, r, gicd_reg);
}
vgic_store_itargetsr(v->domain, rank, gicd_reg - GICD_ITARGETSR,
.write = vgic_v2_distr_mmio_write,
};
-static struct vcpu *vgic_v2_get_target_vcpu(struct vcpu *v, unsigned int irq)
-{
- unsigned long target;
- struct vcpu *v_target;
- struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
- ASSERT(spin_is_locked(&rank->lock));
-
- target = vgic_byte_read(rank->v2.itargets[REG_RANK_INDEX(8,
- irq, DABT_WORD)], irq & 0x3);
-
- /* 1-N SPI should be delivered as pending to all the vcpus in the
- * mask, but here we just return the first vcpu for simplicity and
- * because it would be too slow to do otherwise. */
- target = find_first_bit(&target, 8);
- ASSERT(target >= 0 && target < v->domain->max_vcpus);
- v_target = v->domain->vcpu[target];
- return v_target;
-}
-
static int vgic_v2_vcpu_init(struct vcpu *v)
{
- int i;
-
- /* For SGI and PPI the target is always this CPU */
- for ( i = 0 ; i < 8 ; i++ )
- v->arch.vgic.private_irqs->v2.itargets[i] =
- (1<<(v->vcpu_id+0))
- | (1<<(v->vcpu_id+8))
- | (1<<(v->vcpu_id+16))
- | (1<<(v->vcpu_id+24));
+ /* Nothing specific to initialize for this driver */
return 0;
}
static int vgic_v2_domain_init(struct domain *d)
{
- int i, ret;
+ int ret;
paddr_t cbase, csize;
paddr_t vbase;
if ( ret )
return ret;
- /* By default deliver to CPU0 */
- for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
- memset(d->arch.vgic.shared_irqs[i].v2.itargets, 0x1,
- sizeof(d->arch.vgic.shared_irqs[i].v2.itargets));
-
register_mmio_handler(d, &vgic_v2_distr_mmio_handler, d->arch.vgic.dbase,
PAGE_SIZE, NULL);
static const struct vgic_ops vgic_v2_ops = {
.vcpu_init = vgic_v2_vcpu_init,
.domain_init = vgic_v2_domain_init,
- .get_target_vcpu = vgic_v2_get_target_vcpu,
.max_vcpus = 8,
};
return d->vcpu[vcpu_id];
}
-static struct vcpu *vgic_v3_get_target_vcpu(struct vcpu *v, unsigned int irq)
-{
- struct vcpu *v_target;
- struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
+#define NR_BYTES_PER_IROUTER 8U
+/*
+ * Fetch an IROUTER register based on the offset from IROUTER0. Only one
+ * vCPU will be listed for a given vIRQ.
+ *
+ * Note the offset will be aligned to the appropriate boundary.
+ */
+static uint64_t vgic_fetch_irouter(struct vgic_irq_rank *rank,
+ unsigned int offset)
+{
ASSERT(spin_is_locked(&rank->lock));
- v_target = vgic_v3_irouter_to_vcpu(v->domain, rank->v3.irouter[irq % 32]);
+ /* There is exactly 1 vIRQ per IROUTER */
+ offset /= NR_BYTES_PER_IROUTER;
- ASSERT(v_target != NULL);
+ /* Get the index in the rank */
+ offset &= INTERRUPT_RANK_MASK;
- return v_target;
+ return vcpuid_to_vaffinity(rank->vcpu[offset]);
+}
+
+/*
+ * Store an IROUTER register in a convenient way and migrate the vIRQ
+ * if necessary. This function only deals with IROUTER32 and onwards.
+ *
+ * Note the offset will be aligned to the appropriate boundary.
+ */
+static void vgic_store_irouter(struct domain *d, struct vgic_irq_rank *rank,
+ unsigned int offset, uint64_t irouter)
+{
+ struct vcpu *new_vcpu, *old_vcpu;
+ unsigned int virq;
+
+ /* There is 1 vIRQ per IROUTER */
+ virq = offset / NR_BYTES_PER_IROUTER;
+
+ /*
+ * The IROUTER0-31, used for SGIs/PPIs, are reserved and should
+ * never call this function.
+ */
+ ASSERT(virq >= 32);
+
+ /* Get the index in the rank */
+ offset &= virq & INTERRUPT_RANK_MASK;
+
+ new_vcpu = vgic_v3_irouter_to_vcpu(d, irouter);
+ old_vcpu = d->vcpu[rank->vcpu[offset]];
+
+ /*
+ * From the spec (see 8.9.13 in IHI 0069A), any write with an
+ * invalid vCPU will lead to the interrupt being ignored.
+ *
+ * But the current code to inject an IRQ is not able to cope with
+ * invalid vCPU. So for now, just ignore the write.
+ *
+ * TODO: Respect the spec
+ */
+ if ( !new_vcpu )
+ return;
+
+ /* Only migrate the IRQ if the target vCPU has changed */
+ if ( new_vcpu != old_vcpu )
+ vgic_migrate_irq(old_vcpu, new_vcpu, virq);
+
+ rank->vcpu[offset] = new_vcpu->vcpu_id;
}
static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
DABT_DOUBLE_WORD);
if ( rank == NULL ) goto read_as_zero;
vgic_lock_rank(v, rank, flags);
- *r = rank->v3.irouter[REG_RANK_INDEX(64,
- (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
+ *r = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER);
vgic_unlock_rank(v, rank, flags);
return 1;
case GICD_NSACR ... GICD_NSACRN:
struct hsr_dabt dabt = info->dabt;
struct vgic_irq_rank *rank;
unsigned long flags;
- uint64_t new_irouter, old_irouter;
- struct vcpu *old_vcpu, *new_vcpu;
int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
perfc_incr(vgicd_writes);
rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
DABT_DOUBLE_WORD);
if ( rank == NULL ) goto write_ignore;
- new_irouter = r;
vgic_lock_rank(v, rank, flags);
-
- old_irouter = rank->v3.irouter[REG_RANK_INDEX(64,
- (gicd_reg - GICD_IROUTER),
- DABT_DOUBLE_WORD)];
- old_vcpu = vgic_v3_irouter_to_vcpu(v->domain, old_irouter);
- new_vcpu = vgic_v3_irouter_to_vcpu(v->domain, new_irouter);
-
- if ( !new_vcpu )
- {
- printk(XENLOG_G_DEBUG
- "%pv: vGICD: wrong irouter at offset %#08x val %#"PRIregister,
- v, gicd_reg, r);
- vgic_unlock_rank(v, rank, flags);
- /*
- * TODO: Don't inject a fault to the guest when the MPIDR is
- * not valid. From the spec, the interrupt should be
- * ignored.
- */
- return 0;
- }
- rank->v3.irouter[REG_RANK_INDEX(64, (gicd_reg - GICD_IROUTER),
- DABT_DOUBLE_WORD)] = new_irouter;
- if ( old_vcpu != new_vcpu )
- vgic_migrate_irq(old_vcpu, new_vcpu, (gicd_reg - GICD_IROUTER)/8);
+ vgic_store_irouter(v->domain, rank, gicd_reg - GICD_IROUTER, r);
vgic_unlock_rank(v, rank, flags);
return 1;
case GICD_NSACR ... GICD_NSACRN:
static int vgic_v3_vcpu_init(struct vcpu *v)
{
int i;
- uint64_t affinity;
paddr_t rdist_base;
struct vgic_rdist_region *region;
unsigned int last_cpu;
struct domain *d = v->domain;
uint32_t rdist_stride = d->arch.vgic.rdist_stride;
- /* For SGI and PPI the target is always this CPU */
- affinity = (MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 3) << 32 |
- MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 2) << 16 |
- MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 1) << 8 |
- MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 0));
-
- for ( i = 0 ; i < 32 ; i++ )
- v->arch.vgic.private_irqs->v3.irouter[i] = affinity;
-
/*
* Find the region where the re-distributor lives. For this purpose,
* we look one region ahead as we have only the first CPU in hand.
static int vgic_v3_domain_init(struct domain *d)
{
- int i, idx;
+ int i;
/*
* Domain 0 gets the hardware address.
d->arch.vgic.rdist_regions[0].first_cpu = 0;
}
- /* By default deliver to CPU0 */
- for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
- {
- for ( idx = 0; idx < 32; idx++ )
- d->arch.vgic.shared_irqs[i].v3.irouter[idx] = 0;
- }
-
/* Register mmio handle for the Distributor */
register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase,
SZ_64K, NULL);
static const struct vgic_ops v3_ops = {
.vcpu_init = vgic_v3_vcpu_init,
.domain_init = vgic_v3_domain_init,
- .get_target_vcpu = vgic_v3_get_target_vcpu,
.emulate_sysreg = vgic_v3_emulate_sysreg,
/*
* We use both AFF1 and AFF0 in (v)MPIDR. Thus, the max number of CPU
p->irq = virq;
}
-static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index)
+static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index,
+ unsigned int vcpu)
{
+ unsigned int i;
+
+ /*
+ * Make sure that the type chosen to store the target is able to
+ * store an VCPU ID between 0 and the maximum of virtual CPUs
+ * supported.
+ */
+ BUILD_BUG_ON((1 << (sizeof(rank->vcpu[0]) * 8)) < MAX_VIRT_CPUS);
+
spin_lock_init(&rank->lock);
rank->index = index;
+
+ for ( i = 0; i < NR_INTERRUPT_PER_RANK; i++ )
+ rank->vcpu[i] = vcpu;
}
int domain_vgic_init(struct domain *d, unsigned int nr_spis)
for (i=0; i<d->arch.vgic.nr_spis; i++)
vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32);
+ /* SPIs are routed to VCPU0 by default */
for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
- vgic_rank_init(&d->arch.vgic.shared_irqs[i], i + 1);
+ vgic_rank_init(&d->arch.vgic.shared_irqs[i], i + 1, 0);
ret = d->arch.vgic.handler->domain_init(d);
if ( ret )
if ( v->arch.vgic.private_irqs == NULL )
return -ENOMEM;
- vgic_rank_init(v->arch.vgic.private_irqs, 0);
+ /* SGIs/PPIs are always routed to this VCPU */
+ vgic_rank_init(v->arch.vgic.private_irqs, 0, v->vcpu_id);
v->domain->arch.vgic.handler->vcpu_init(v);
return 0;
}
+/* The function should be called by rank lock taken. */
+static struct vcpu *__vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
+{
+ struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
+
+ ASSERT(spin_is_locked(&rank->lock));
+
+ return v->domain->vcpu[rank->vcpu[virq & INTERRUPT_RANK_MASK]];
+}
+
/* takes the rank lock */
-struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq)
+struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
{
- struct domain *d = v->domain;
struct vcpu *v_target;
- struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
+ struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
unsigned long flags;
vgic_lock_rank(v, rank, flags);
- v_target = d->arch.vgic.handler->get_target_vcpu(v, irq);
+ v_target = __vgic_get_target_vcpu(v, virq);
vgic_unlock_rank(v, rank, flags);
+
return v_target;
}
void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
{
- struct domain *d = v->domain;
const unsigned long mask = r;
struct pending_irq *p;
unsigned int irq;
while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
irq = i + (32 * n);
- v_target = d->arch.vgic.handler->get_target_vcpu(v, irq);
+ v_target = __vgic_get_target_vcpu(v, irq);
p = irq_to_pending(v_target, irq);
clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
gic_remove_from_queues(v_target, irq);
void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
{
- struct domain *d = v->domain;
const unsigned long mask = r;
struct pending_irq *p;
unsigned int irq;
while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
irq = i + (32 * n);
- v_target = d->arch.vgic.handler->get_target_vcpu(v, irq);
+ v_target = __vgic_get_target_vcpu(v, irq);
p = irq_to_pending(v_target, irq);
set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
uint32_t ipriorityr[8];
};
- union {
- struct {
- uint32_t itargets[8];
- }v2;
- struct {
- uint64_t irouter[32];
- }v3;
- };
+ /*
+ * It's more convenient to store a target VCPU per vIRQ
+ * than the register ITARGETSR/IROUTER itself
+ */
+ uint8_t vcpu[32];
};
struct sgi_target {
int (*vcpu_init)(struct vcpu *v);
/* Domain specific initialization of vGIC */
int (*domain_init)(struct domain *d);
- /* Get the target vcpu for a given virq. The rank lock is already taken
- * when calling this. */
- struct vcpu *(*get_target_vcpu)(struct vcpu *v, unsigned int irq);
/* vGIC sysreg emulation */
int (*emulate_sysreg)(struct cpu_user_regs *regs, union hsr hsr);
/* Maximum number of vCPU supported */
extern int domain_vgic_init(struct domain *d, unsigned int nr_spis);
extern void domain_vgic_free(struct domain *d);
extern int vcpu_vgic_init(struct vcpu *v);
-extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq);
+extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq);
extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
extern void vgic_clear_pending_irqs(struct vcpu *v);