}
config->gic_version = gic_version;
- if ( (rc = gicv_setup(d)) != 0 )
- goto fail;
-
if ( (rc = domain_vgic_init(d, config->nr_spis)) != 0 )
goto fail;
/* Global state */
static struct {
- paddr_t dbase; /* Address of distributor registers */
void __iomem * map_dbase; /* IO mapped Address of distributor registers */
- paddr_t cbase; /* Address of CPU interface registers */
void __iomem * map_cbase[2]; /* IO mapped Address of CPU interface registers */
void __iomem * map_hbase; /* IO Address of virtual interface registers */
- paddr_t vbase; /* Address of virtual cpu interface registers */
spinlock_t lock;
} gicv2;
writel_gich(0, HIP04_GICH_LR + lr * 4);
}
-static int hip04gicv_setup(struct domain *d)
-{
- int ret;
-
- /*
- * The hardware domain gets the hardware address.
- * Guests get the virtual platform layout.
- */
- if ( is_hardware_domain(d) )
- {
- d->arch.vgic.dbase = gicv2.dbase;
- d->arch.vgic.cbase = gicv2.cbase;
- }
- else
- {
- d->arch.vgic.dbase = GUEST_GICD_BASE;
- d->arch.vgic.cbase = GUEST_GICC_BASE;
- }
-
- /*
- * Map the gic virtual cpu interface in the gic cpu interface
- * region of the guest.
- *
- * The second page is always mapped at +4K irrespective of the
- * GIC_64K_STRIDE quirk. The DTB passed to the guest reflects this.
- */
- ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase), 1,
- paddr_to_pfn(gicv2.vbase));
- if ( ret )
- return ret;
-
- if ( !platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
- ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase + PAGE_SIZE),
- 2, paddr_to_pfn(gicv2.vbase + PAGE_SIZE));
- else
- ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase + PAGE_SIZE),
- 2, paddr_to_pfn(gicv2.vbase + SZ_64K));
-
- return ret;
-}
-
static void hip04gic_read_lr(int lr, struct gic_lr *lr_reg)
{
uint32_t lrv;
static int __init hip04gic_init(void)
{
int res;
- paddr_t hbase;
+ paddr_t hbase, dbase, cbase, vbase;
const struct dt_device_node *node = gicv2_info.node;
- res = dt_device_get_address(node, 0, &gicv2.dbase, NULL);
+ res = dt_device_get_address(node, 0, &dbase, NULL);
if ( res )
panic("GIC-HIP04: Cannot find a valid address for the distributor");
- res = dt_device_get_address(node, 1, &gicv2.cbase, NULL);
+ res = dt_device_get_address(node, 1, &cbase, NULL);
if ( res )
panic("GIC-HIP04: Cannot find a valid address for the CPU");
if ( res )
panic("GIC-HIP04: Cannot find a valid address for the hypervisor");
- res = dt_device_get_address(node, 3, &gicv2.vbase, NULL);
+ res = dt_device_get_address(node, 3, &vbase, NULL);
if ( res )
panic("GIC-HIP04: Cannot find a valid address for the virtual CPU");
" gic_hyp_addr=%"PRIpaddr"\n"
" gic_vcpu_addr=%"PRIpaddr"\n"
" gic_maintenance_irq=%u\n",
- gicv2.dbase, gicv2.cbase, hbase, gicv2.vbase,
+ dbase, cbase, hbase, vbase,
gicv2_info.maintenance_irq);
- if ( (gicv2.dbase & ~PAGE_MASK) || (gicv2.cbase & ~PAGE_MASK) ||
- (hbase & ~PAGE_MASK) || (gicv2.vbase & ~PAGE_MASK) )
+ if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) ||
+ (hbase & ~PAGE_MASK) || (vbase & ~PAGE_MASK) )
panic("GIC-HIP04 interfaces not page aligned");
- gicv2.map_dbase = ioremap_nocache(gicv2.dbase, PAGE_SIZE);
+ gicv2.map_dbase = ioremap_nocache(dbase, PAGE_SIZE);
if ( !gicv2.map_dbase )
panic("GIC-HIP04: Failed to ioremap for GIC distributor\n");
- gicv2.map_cbase[0] = ioremap_nocache(gicv2.cbase, PAGE_SIZE);
+ gicv2.map_cbase[0] = ioremap_nocache(cbase, PAGE_SIZE);
if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
- gicv2.map_cbase[1] = ioremap_nocache(gicv2.cbase + SZ_64K, PAGE_SIZE);
+ gicv2.map_cbase[1] = ioremap_nocache(cbase + SZ_64K, PAGE_SIZE);
else
- gicv2.map_cbase[1] = ioremap_nocache(gicv2.cbase + PAGE_SIZE, PAGE_SIZE);
+ gicv2.map_cbase[1] = ioremap_nocache(cbase + PAGE_SIZE, PAGE_SIZE);
if ( !gicv2.map_cbase[0] || !gicv2.map_cbase[1] )
panic("GIC-HIP04: Failed to ioremap for GIC CPU interface\n");
if ( !gicv2.map_hbase )
panic("GIC-HIP04: Failed to ioremap for GIC Virtual interface\n");
+ vgic_v2_setup_hw(dbase, cbase, vbase);
+
/* Global settings: interrupt distributor */
spin_lock_init(&gicv2.lock);
spin_lock(&gicv2.lock);
.save_state = hip04gic_save_state,
.restore_state = hip04gic_restore_state,
.dump_state = hip04gic_dump_state,
- .gicv_setup = hip04gicv_setup,
.gic_host_irq_type = &hip04gic_host_irq_type,
.gic_guest_irq_type = &hip04gic_guest_irq_type,
.eoi_irq = hip04gic_eoi_irq,
/* Global state */
static struct {
- paddr_t dbase; /* Address of distributor registers */
void __iomem * map_dbase; /* IO mapped Address of distributor registers */
- paddr_t cbase; /* Address of CPU interface registers */
void __iomem * map_cbase[2]; /* IO mapped Address of CPU interface registers */
void __iomem * map_hbase; /* IO Address of virtual interface registers */
- paddr_t vbase; /* Address of virtual cpu interface registers */
spinlock_t lock;
} gicv2;
writel_gich(0, GICH_LR + lr * 4);
}
-static int gicv2v_setup(struct domain *d)
-{
- int ret;
-
- /*
- * The hardware domain gets the hardware address.
- * Guests get the virtual platform layout.
- */
- if ( is_hardware_domain(d) )
- {
- d->arch.vgic.dbase = gicv2.dbase;
- d->arch.vgic.cbase = gicv2.cbase;
- }
- else
- {
- d->arch.vgic.dbase = GUEST_GICD_BASE;
- d->arch.vgic.cbase = GUEST_GICC_BASE;
- }
-
- /*
- * Map the gic virtual cpu interface in the gic cpu interface
- * region of the guest.
- *
- * The second page is always mapped at +4K irrespective of the
- * GIC_64K_STRIDE quirk. The DTB passed to the guest reflects this.
- */
- ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase), 1,
- paddr_to_pfn(gicv2.vbase));
- if ( ret )
- return ret;
-
- if ( !platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
- ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase + PAGE_SIZE),
- 2, paddr_to_pfn(gicv2.vbase + PAGE_SIZE));
- else
- ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase + PAGE_SIZE),
- 2, paddr_to_pfn(gicv2.vbase + SZ_64K));
-
- return ret;
-}
-
static void gicv2_read_lr(int lr, struct gic_lr *lr_reg)
{
uint32_t lrv;
static int __init gicv2_init(void)
{
int res;
- paddr_t hbase;
+ paddr_t hbase, dbase, cbase, vbase;
const struct dt_device_node *node = gicv2_info.node;
- res = dt_device_get_address(node, 0, &gicv2.dbase, NULL);
+ res = dt_device_get_address(node, 0, &dbase, NULL);
if ( res )
panic("GICv2: Cannot find a valid address for the distributor");
- res = dt_device_get_address(node, 1, &gicv2.cbase, NULL);
+ res = dt_device_get_address(node, 1, &cbase, NULL);
if ( res )
panic("GICv2: Cannot find a valid address for the CPU");
if ( res )
panic("GICv2: Cannot find a valid address for the hypervisor");
- res = dt_device_get_address(node, 3, &gicv2.vbase, NULL);
+ res = dt_device_get_address(node, 3, &vbase, NULL);
if ( res )
panic("GICv2: Cannot find a valid address for the virtual CPU");
" gic_hyp_addr=%"PRIpaddr"\n"
" gic_vcpu_addr=%"PRIpaddr"\n"
" gic_maintenance_irq=%u\n",
- gicv2.dbase, gicv2.cbase, hbase, gicv2.vbase,
+ dbase, cbase, hbase, vbase,
gicv2_info.maintenance_irq);
- if ( (gicv2.dbase & ~PAGE_MASK) || (gicv2.cbase & ~PAGE_MASK) ||
- (hbase & ~PAGE_MASK) || (gicv2.vbase & ~PAGE_MASK) )
+ if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) ||
+ (hbase & ~PAGE_MASK) || (vbase & ~PAGE_MASK) )
panic("GICv2 interfaces not page aligned");
- gicv2.map_dbase = ioremap_nocache(gicv2.dbase, PAGE_SIZE);
+ gicv2.map_dbase = ioremap_nocache(dbase, PAGE_SIZE);
if ( !gicv2.map_dbase )
panic("GICv2: Failed to ioremap for GIC distributor\n");
- gicv2.map_cbase[0] = ioremap_nocache(gicv2.cbase, PAGE_SIZE);
+ gicv2.map_cbase[0] = ioremap_nocache(cbase, PAGE_SIZE);
if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
- gicv2.map_cbase[1] = ioremap_nocache(gicv2.cbase + SZ_64K, PAGE_SIZE);
+ gicv2.map_cbase[1] = ioremap_nocache(cbase + SZ_64K, PAGE_SIZE);
else
- gicv2.map_cbase[1] = ioremap_nocache(gicv2.cbase + PAGE_SIZE, PAGE_SIZE);
+ gicv2.map_cbase[1] = ioremap_nocache(cbase + PAGE_SIZE, PAGE_SIZE);
if ( !gicv2.map_cbase[0] || !gicv2.map_cbase[1] )
panic("GICv2: Failed to ioremap for GIC CPU interface\n");
if ( !gicv2.map_hbase )
panic("GICv2: Failed to ioremap for GIC Virtual interface\n");
+ vgic_v2_setup_hw(dbase, cbase, vbase);
+
/* Global settings: interrupt distributor */
spin_lock_init(&gicv2.lock);
spin_lock(&gicv2.lock);
.save_state = gicv2_save_state,
.restore_state = gicv2_restore_state,
.dump_state = gicv2_dump_state,
- .gicv_setup = gicv2v_setup,
.gic_host_irq_type = &gicv2_host_irq_type,
.gic_guest_irq_type = &gicv2_guest_irq_type,
.eoi_irq = gicv2_eoi_irq,
#include <asm/gic_v3_defs.h>
#include <asm/cpufeature.h>
-struct rdist_region {
- paddr_t base;
- paddr_t size;
- void __iomem *map_base;
-};
-
/* Global state */
static struct {
- paddr_t dbase; /* Address of distributor registers */
void __iomem *map_dbase; /* Mapped address of distributor registers */
struct rdist_region *rdist_regions;
uint32_t rdist_stride;
gicv3_ich_write_lr(lr_reg, lrv);
}
-static int gicv_v3_init(struct domain *d)
-{
- int i;
-
- /*
- * Domain 0 gets the hardware address.
- * Guests get the virtual platform layout.
- */
- if ( is_hardware_domain(d) )
- {
- unsigned int first_cpu = 0;
-
- d->arch.vgic.dbase = gicv3.dbase;
-
- d->arch.vgic.rdist_stride = gicv3.rdist_stride;
- /*
- * If the stride is not set, the default stride for GICv3 is 2 * 64K:
- * - first 64k page for Control and Physical LPIs
- * - second 64k page for Control and Generation of SGIs
- */
- if ( !d->arch.vgic.rdist_stride )
- d->arch.vgic.rdist_stride = 2 * SZ_64K;
-
- for ( i = 0; i < gicv3.rdist_count; i++ )
- {
- paddr_t size = gicv3.rdist_regions[i].size;
-
- d->arch.vgic.rdist_regions[i].base = gicv3.rdist_regions[i].base;
- d->arch.vgic.rdist_regions[i].size = size;
-
- /* Set the first CPU handled by this region */
- d->arch.vgic.rdist_regions[i].first_cpu = first_cpu;
-
- first_cpu += size / d->arch.vgic.rdist_stride;
- }
- d->arch.vgic.nr_regions = gicv3.rdist_count;
- }
- else
- {
- d->arch.vgic.dbase = GUEST_GICV3_GICD_BASE;
-
- /* XXX: Only one Re-distributor region mapped for the guest */
- BUILD_BUG_ON(GUEST_GICV3_RDIST_REGIONS != 1);
-
- d->arch.vgic.nr_regions = GUEST_GICV3_RDIST_REGIONS;
- d->arch.vgic.rdist_stride = GUEST_GICV3_RDIST_STRIDE;
-
- /* The first redistributor should contain enough space for all CPUs */
- BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GUEST_GICV3_RDIST_STRIDE) < MAX_VIRT_CPUS);
- d->arch.vgic.rdist_regions[0].base = GUEST_GICV3_GICR0_BASE;
- d->arch.vgic.rdist_regions[0].size = GUEST_GICV3_GICR0_SIZE;
- d->arch.vgic.rdist_regions[0].first_cpu = 0;
- }
-
- return 0;
-}
-
static void gicv3_hcr_status(uint32_t flag, bool_t status)
{
uint32_t hcr;
int res, i;
uint32_t reg;
const struct dt_device_node *node = gicv3_info.node;
+ paddr_t dbase;
if ( !cpu_has_gicv3 )
{
return -ENODEV;
}
- res = dt_device_get_address(node, 0, &gicv3.dbase, NULL);
+ res = dt_device_get_address(node, 0, &dbase, NULL);
if ( res )
panic("GICv3: Cannot find a valid distributor address");
- if ( (gicv3.dbase & ~PAGE_MASK) )
+ if ( (dbase & ~PAGE_MASK) )
panic("GICv3: Found unaligned distributor address %"PRIpaddr"",
- gicv3.dbase);
+ dbase);
- gicv3.map_dbase = ioremap_nocache(gicv3.dbase, SZ_64K);
+ gicv3.map_dbase = ioremap_nocache(dbase, SZ_64K);
if ( !gicv3.map_dbase )
panic("GICv3: Failed to ioremap for GIC distributor\n");
" gic_maintenance_irq=%u\n"
" gic_rdist_stride=%#x\n"
" gic_rdist_regions=%d\n",
- gicv3.dbase, gicv3_info.maintenance_irq,
+ dbase, gicv3_info.maintenance_irq,
gicv3.rdist_stride, gicv3.rdist_count);
printk(" redistributor regions:\n");
for ( i = 0; i < gicv3.rdist_count; i++ )
i, r->base, r->base + r->size);
}
+ vgic_v3_setup_hw(dbase, gicv3.rdist_count, gicv3.rdist_regions,
+ gicv3.rdist_stride);
+
spin_lock_init(&gicv3.lock);
spin_lock(&gicv3.lock);
.save_state = gicv3_save_state,
.restore_state = gicv3_restore_state,
.dump_state = gicv3_dump_state,
- .gicv_setup = gicv_v3_init,
.gic_host_irq_type = &gicv3_host_irq_type,
.gic_guest_irq_type = &gicv3_guest_irq_type,
.eoi_irq = gicv3_eoi_irq,
} while (1);
}
-int gicv_setup(struct domain *d)
-{
- return gic_hw_ops->gicv_setup(d);
-}
-
static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
{
/*
#include <xen/softirq.h>
#include <xen/irq.h>
#include <xen/sched.h>
+#include <xen/sizes.h>
#include <asm/current.h>
#include <asm/mmio.h>
-#include <asm/gic.h>
+#include <asm/platform.h>
#include <asm/vgic.h>
+static struct {
+ bool_t enabled;
+ /* Distributor interface address */
+ paddr_t dbase;
+ /* CPU interface address */
+ paddr_t cbase;
+ /* Virtual CPU interface address */
+ paddr_t vbase;
+} vgic_v2_hw;
+
+void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t vbase)
+{
+ vgic_v2_hw.enabled = 1;
+ vgic_v2_hw.dbase = dbase;
+ vgic_v2_hw.cbase = cbase;
+ vgic_v2_hw.vbase = vbase;
+}
+
static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
{
struct hsr_dabt dabt = info->dabt;
static int vgic_v2_domain_init(struct domain *d)
{
- int i;
+ int i, ret;
+
+ /*
+ * The hardware domain gets the hardware address.
+ * Guests get the virtual platform layout.
+ */
+ if ( is_hardware_domain(d) )
+ {
+ d->arch.vgic.dbase = vgic_v2_hw.dbase;
+ d->arch.vgic.cbase = vgic_v2_hw.cbase;
+ }
+ else
+ {
+ d->arch.vgic.dbase = GUEST_GICD_BASE;
+ d->arch.vgic.cbase = GUEST_GICC_BASE;
+ }
+
+ /*
+ * Map the gic virtual cpu interface in the gic cpu interface
+ * region of the guest.
+ *
+ * The second page is always mapped at +4K irrespective of the
+ * GIC_64K_STRIDE quirk. The DTB passed to the guest reflects this.
+ */
+ ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase), 1,
+ paddr_to_pfn(vgic_v2_hw.vbase));
+ if ( ret )
+ return ret;
+
+ if ( !platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
+ ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase + PAGE_SIZE),
+ 2, paddr_to_pfn(vgic_v2_hw.vbase + PAGE_SIZE));
+ else
+ ret = map_mmio_regions(d, paddr_to_pfn(d->arch.vgic.cbase + PAGE_SIZE),
+ 2, paddr_to_pfn(vgic_v2_hw.vbase + SZ_64K));
+
+ if ( ret )
+ return ret;
/* By default deliver to CPU0 */
for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
memset(d->arch.vgic.shared_irqs[i].v2.itargets, 0x1,
sizeof(d->arch.vgic.shared_irqs[i].v2.itargets));
- /* We rely on gicv_setup() to initialize dbase(vGIC distributor base) */
register_mmio_handler(d, &vgic_v2_distr_mmio_handler, d->arch.vgic.dbase,
PAGE_SIZE);
int vgic_v2_init(struct domain *d)
{
+ if ( !vgic_v2_hw.enabled )
+ {
+ printk(XENLOG_G_ERR
+ "d%d: vGICv2 is not supported on this platform.\n",
+ d->domain_id);
+ return -ENODEV;
+ }
+
register_vgic_ops(d, &vgic_v2_ops);
return 0;
#include <asm/current.h>
#include <asm/mmio.h>
#include <asm/gic_v3_defs.h>
-#include <asm/gic.h>
#include <asm/vgic.h>
/* GICD_PIDRn register values for ARM implementations */
*/
#define VGICD_CTLR_DEFAULT (GICD_CTLR_ARE_NS)
+static struct {
+ bool_t enabled;
+ /* Distributor interface address */
+ paddr_t dbase;
+ /* Re-distributor regions */
+ unsigned int nr_rdist_regions;
+ const struct rdist_region *regions;
+ uint32_t rdist_stride; /* Re-distributor stride */
+} vgic_v3_hw;
+
+void vgic_v3_setup_hw(paddr_t dbase,
+ unsigned int nr_rdist_regions,
+ const struct rdist_region *regions,
+ uint32_t rdist_stride)
+{
+ vgic_v3_hw.enabled = 1;
+ vgic_v3_hw.dbase = dbase;
+ vgic_v3_hw.nr_rdist_regions = nr_rdist_regions;
+ vgic_v3_hw.regions = regions;
+ vgic_v3_hw.rdist_stride = rdist_stride;
+}
+
static struct vcpu *vgic_v3_irouter_to_vcpu(struct domain *d, uint64_t irouter)
{
unsigned int vcpu_id;
{
int i, idx;
+ /*
+ * Domain 0 gets the hardware address.
+ * Guests get the virtual platform layout.
+ */
+ if ( is_hardware_domain(d) )
+ {
+ unsigned int first_cpu = 0;
+
+ d->arch.vgic.dbase = vgic_v3_hw.dbase;
+
+ d->arch.vgic.rdist_stride = vgic_v3_hw.rdist_stride;
+ /*
+ * If the stride is not set, the default stride for GICv3 is 2 * 64K:
+ * - first 64k page for Control and Physical LPIs
+ * - second 64k page for Control and Generation of SGIs
+ */
+ if ( !d->arch.vgic.rdist_stride )
+ d->arch.vgic.rdist_stride = 2 * SZ_64K;
+
+ for ( i = 0; i < vgic_v3_hw.nr_rdist_regions; i++ )
+ {
+ paddr_t size = vgic_v3_hw.regions[i].size;
+
+ d->arch.vgic.rdist_regions[i].base = vgic_v3_hw.regions[i].base;
+ d->arch.vgic.rdist_regions[i].size = size;
+
+ /* Set the first CPU handled by this region */
+ d->arch.vgic.rdist_regions[i].first_cpu = first_cpu;
+
+ first_cpu += size / d->arch.vgic.rdist_stride;
+ }
+ d->arch.vgic.nr_regions = vgic_v3_hw.nr_rdist_regions;
+ }
+ else
+ {
+ d->arch.vgic.dbase = GUEST_GICV3_GICD_BASE;
+
+ /* XXX: Only one Re-distributor region mapped for the guest */
+ BUILD_BUG_ON(GUEST_GICV3_RDIST_REGIONS != 1);
+
+ d->arch.vgic.nr_regions = GUEST_GICV3_RDIST_REGIONS;
+ d->arch.vgic.rdist_stride = GUEST_GICV3_RDIST_STRIDE;
+
+ /* The first redistributor should contain enough space for all CPUs */
+ BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GUEST_GICV3_RDIST_STRIDE) < MAX_VIRT_CPUS);
+ d->arch.vgic.rdist_regions[0].base = GUEST_GICV3_GICR0_BASE;
+ d->arch.vgic.rdist_regions[0].size = GUEST_GICV3_GICR0_SIZE;
+ d->arch.vgic.rdist_regions[0].first_cpu = 0;
+ }
+
/* By default deliver to CPU0 */
for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
{
for ( idx = 0; idx < 32; idx++ )
d->arch.vgic.shared_irqs[i].v3.irouter[idx] = 0;
}
- /* We rely on gicv init to get dbase and size */
+
+ /* Register mmio handle for the Distributor */
register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase,
SZ_64K);
int vgic_v3_init(struct domain *d)
{
+ if ( !vgic_v3_hw.enabled )
+ {
+ printk(XENLOG_G_ERR
+ "d%d: vGICv3 is not supported on this platform.\n",
+ d->domain_id);
+ return -ENODEV;
+ }
+
register_vgic_ops(d, &v3_ops);
return 0;
return -ENODEV;
break;
default:
+ printk(XENLOG_G_ERR "d%d: Unknown vGIC version %u\n",
+ d->domain_id, gic_hw_version());
return -ENODEV;
}
void (*restore_state)(const struct vcpu *);
/* Dump GIC LR register information */
void (*dump_state)(const struct vcpu *);
- /* Map MMIO region of GIC */
- int (*gicv_setup)(struct domain *);
/* hw_irq_controller to enable/disable/eoi host irq */
hw_irq_controller *gic_host_irq_type;
#define ICH_SGI_IRQ_SHIFT 24
#define ICH_SGI_IRQ_MASK 0xf
#define ICH_SGI_TARGETLIST_MASK 0xffff
+
+struct rdist_region {
+ paddr_t base;
+ paddr_t size;
+ void __iomem *map_base;
+};
+
#endif /* __ASM_ARM_GIC_V3_DEFS_H__ */
/*
extern void vgic_free_virq(struct domain *d, unsigned int virq);
+void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t vbase);
+
+#ifdef HAS_GICV3
+struct rdist_region;
+void vgic_v3_setup_hw(paddr_t dbase,
+ unsigned int nr_rdist_regions,
+ const struct rdist_region *regions,
+ uint32_t rdist_stride);
+#endif
+
#endif /* __ASM_ARM_VGIC_H__ */
/*