#include <xen/device_tree.h>
#include <xen/sizes.h>
#include <xen/libfdt/libfdt.h>
+#include <xen/sort.h>
#include <asm/p2m.h>
#include <asm/domain.h>
#include <asm/io.h>
*/
if ( is_hardware_domain(d) )
{
+ unsigned int first_cpu = 0;
+
d->arch.vgic.dbase = gicv3.dbase;
d->arch.vgic.dbase_size = gicv3.dbase_size;
for ( i = 0; i < gicv3.rdist_count; i++ )
{
+ paddr_t size = gicv3.rdist_regions[i].size;
+
d->arch.vgic.rdist_regions[i].base = gicv3.rdist_regions[i].base;
- d->arch.vgic.rdist_regions[i].size = gicv3.rdist_regions[i].size;
+ d->arch.vgic.rdist_regions[i].size = size;
+
+ /* Set the first CPU handled by this region */
+ d->arch.vgic.rdist_regions[i].first_cpu = first_cpu;
+
+ first_cpu += size / d->arch.vgic.rdist_stride;
}
d->arch.vgic.nr_regions = gicv3.rdist_count;
}
BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GUEST_GICV3_RDIST_STRIDE) < MAX_VIRT_CPUS);
d->arch.vgic.rdist_regions[0].base = GUEST_GICV3_GICR0_BASE;
d->arch.vgic.rdist_regions[0].size = GUEST_GICV3_GICR0_SIZE;
+ d->arch.vgic.rdist_regions[0].first_cpu = 0;
}
return 0;
.make_dt_node = gicv3_make_dt_node,
};
+static int __init cmp_rdist(const void *a, const void *b)
+{
+ const struct rdist_region *l = a, *r = a;
+
+ /* We assume that re-distributor regions can never overlap */
+ return ( l->base < r->base) ? -1 : 0;
+}
+
/* Set up the GIC */
static int __init gicv3_init(struct dt_device_node *node, const void *data)
{
rdist_regs[i].size = rdist_size;
}
+ /* The vGIC code requires the region to be sorted */
+ sort(rdist_regs, gicv3.rdist_count, sizeof(*rdist_regs), cmp_rdist, NULL);
+
/* If stride is not set in dt. Set default to 2 * SZ_64K */
if ( !dt_property_read_u32(node, "redistributor-stride", &gicv3.rdist_stride) )
gicv3.rdist_stride = 0;
MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 1) << 40 |
MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 0) << 32);
*r = aff;
+
+ if ( v->arch.vgic.flags & VGIC_V3_RDIST_LAST )
+ *r |= GICR_TYPER_LAST;
+
return 1;
case GICR_STATUSR:
/* Not implemented */
return 1;
}
+static inline struct vcpu *get_vcpu_from_rdist(paddr_t gpa,
+ struct vcpu *v,
+ uint32_t *offset)
+{
+ struct domain *d = v->domain;
+ uint32_t stride = d->arch.vgic.rdist_stride;
+ paddr_t base;
+ int i, vcpu_id;
+ struct vgic_rdist_region *region;
+
+ *offset = gpa & (stride - 1);
+ base = gpa & ~((paddr_t)stride - 1);
+
+ /* Fast path: the VCPU is trying to access its re-distributor */
+ if ( likely(v->arch.vgic.rdist_base == base) )
+ return v;
+
+ /* Slow path: the VCPU is trying to access another re-distributor */
+
+ /*
+ * Find the region where the re-distributor lives. For this purpose,
+ * we look one region ahead as only MMIO range for redistributors
+ * traps here.
+ * Note: The region has been ordered during the GIC initialization
+ */
+ for ( i = 1; i < d->arch.vgic.nr_regions; i++ )
+ {
+ if ( base < d->arch.vgic.rdist_regions[i].base )
+ break;
+ }
+
+ region = &d->arch.vgic.rdist_regions[i - 1];
+
+ vcpu_id = region->first_cpu + ((base - region->base) / stride);
+
+ if ( unlikely(vcpu_id >= d->max_vcpus) )
+ return NULL;
+
+ return d->vcpu[vcpu_id];
+}
+
static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info)
{
uint32_t offset;
perfc_incr(vgicr_reads);
- offset = info->gpa & (v->domain->arch.vgic.rdist_stride - 1);
+ v = get_vcpu_from_rdist(info->gpa, v, &offset);
+ if ( unlikely(!v) )
+ return 0;
if ( offset < SZ_64K )
return __vgic_v3_rdistr_rd_mmio_read(v, info, offset);
perfc_incr(vgicr_writes);
- offset = info->gpa & (v->domain->arch.vgic.rdist_stride - 1);
+ v = get_vcpu_from_rdist(info->gpa, v, &offset);
+ if ( unlikely(!v) )
+ return 0;
if ( offset < SZ_64K )
return __vgic_v3_rdistr_rd_mmio_write(v, info, offset);
{
int i;
uint64_t affinity;
+ paddr_t rdist_base;
+ struct vgic_rdist_region *region;
+ unsigned int last_cpu;
+
+ /* Convenient alias */
+ struct domain *d = v->domain;
+ uint32_t rdist_stride = d->arch.vgic.rdist_stride;
/* For SGI and PPI the target is always this CPU */
affinity = (MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 3) << 32 |
for ( i = 0 ; i < 32 ; i++ )
v->arch.vgic.private_irqs->v3.irouter[i] = affinity;
+ /*
+ * Find the region where the re-distributor lives. For this purpose,
+ * we look one region ahead as we have only the first CPU in hand.
+ */
+ for ( i = 1; i < d->arch.vgic.nr_regions; i++ )
+ {
+ if ( v->vcpu_id < d->arch.vgic.rdist_regions[i].first_cpu )
+ break;
+ }
+
+ region = &d->arch.vgic.rdist_regions[i - 1];
+
+ /* Get the base address of the redistributor */
+ rdist_base = region->base;
+ rdist_base += (v->vcpu_id - region->first_cpu) * rdist_stride;
+
+ /* Check if a valid region was found for the re-distributor */
+ if ( (rdist_base < region->base) ||
+ ((rdist_base + rdist_stride) > (region->base + region->size)) )
+ {
+ dprintk(XENLOG_ERR,
+ "d%u: Unable to find a re-distributor for VCPU %u\n",
+ d->domain_id, v->vcpu_id);
+ return -EINVAL;
+ }
+
+ v->arch.vgic.rdist_base = rdist_base;
+
+ /*
+ * If the redistributor is the last one of the
+ * contiguous region of the vCPU is the last of the domain, set
+ * VGIC_V3_RDIST_LAST flags.
+ * Note that we are assuming max_vcpus will never change.
+ */
+ last_cpu = (region->size / rdist_stride) + region->first_cpu - 1;
+
+ if ( v->vcpu_id == last_cpu || (v->vcpu_id == (d->max_vcpus - 1)) )
+ v->arch.vgic.flags |= VGIC_V3_RDIST_LAST;
+
return 0;
}