uint64_t lr_mask;
} gic;
-irq_desc_t irq_desc[NR_IRQS];
+static irq_desc_t irq_desc[NR_IRQS];
+static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc);
+
unsigned nr_lrs;
+irq_desc_t *__irq_to_desc(int irq)
+{
+ if (irq < NR_LOCAL_IRQS) return &this_cpu(local_irq_desc)[irq];
+ return &irq_desc[irq-NR_LOCAL_IRQS];
+}
+
void gic_save_state(struct vcpu *v)
{
int i;
{
int i;
- /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
- * even though they are controlled with GICD registers, they must
+ /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
+ * even though they are controlled with GICD registers, they must
* be set up here with the other per-cpu state. */
GICD[GICD_ICENABLER] = 0xffff0000; /* Disable all PPI */
GICD[GICD_ISENABLER] = 0x0000ffff; /* Enable all SGI */
spin_unlock_irq(&gic.lock);
}
-void gic_route_irqs(void)
+void gic_route_ppis(void)
{
/* XXX should get these from DT */
/* GIC maintenance */
gic_route_irq(26, 1, 1u << smp_processor_id(), 0xa0);
/* Timer */
gic_route_irq(30, 1, 1u << smp_processor_id(), 0xa0);
+}
+
+void gic_route_spis(void)
+{
+ /* XXX should get these from DT */
/* UART */
gic_route_irq(37, 0, 1u << smp_processor_id(), 0xa0);
}
rc = __setup_irq(desc, irq, new);
- spin_unlock_irqrestore(&desc->lock,flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
return rc;
}
extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq,int virtual);
extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
-extern void gic_route_irqs(void);
+extern void gic_route_ppis(void);
+extern void gic_route_spis(void);
extern void gic_inject(void);
{
int irq;
- for (irq = 0; irq < NR_IRQS; irq++) {
+ for (irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++) {
struct irq_desc *desc = irq_to_desc(irq);
init_one_irq_desc(desc);
desc->irq = irq;
desc->action = NULL;
}
+
+ return 0;
+}
+
+static int __cpuinit init_local_irq_data(void)
+{
+ int irq;
+
+ for (irq = 0; irq < NR_LOCAL_IRQS; irq++) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ init_one_irq_desc(desc);
+ desc->irq = irq;
+ desc->action = NULL;
+ }
+
return 0;
}
void __init init_IRQ(void)
{
+ BUG_ON(init_local_irq_data() < 0);
BUG_ON(init_irq_data() < 0);
}
+void __cpuinit init_secondary_IRQ(void)
+{
+ BUG_ON(init_local_irq_data() < 0);
+}
+
int __init request_irq(unsigned int irq,
void (*handler)(int, void *, struct cpu_user_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
init_IRQ();
- gic_route_irqs();
+ gic_route_ppis();
+ gic_route_spis();
init_maintenance_interrupt();
init_timer_interrupt();
#include <xen/sched.h>
#include <xen/smp.h>
#include <xen/softirq.h>
+#include <xen/timer.h>
+#include <xen/irq.h>
#include <asm/vfp.h>
#include "gic.h"
enable_vfp();
gic_init_secondary_cpu();
+
+ init_secondary_IRQ();
+
+ gic_route_ppis();
+
+ init_maintenance_interrupt();
init_timer_interrupt();
- gic_route_irqs();
set_current(idle_vcpu[cpuid]);
#define arch_irq_desc irq_cfg
};
+#define NR_LOCAL_IRQS 32
+#define NR_IRQS 1024
+#define nr_irqs NR_IRQS
+
+struct irq_desc;
+
+struct irq_desc *__irq_to_desc(int irq);
+
+#define irq_to_desc(irq) __irq_to_desc(irq)
+
void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq);
#define domain_pirq_to_irq(d, pirq) (pirq)
+void init_IRQ(void);
+void init_secondary_IRQ(void);
+
#endif /* _ASM_HW_IRQ_H */
/*
* Local variables:
int construct_dom0(struct domain *d);
-void init_IRQ(void);
-
#endif
/*
* Local variables:
struct list_head rl_link;
} __cacheline_aligned irq_desc_t;
+#ifndef irq_to_desc
#define irq_to_desc(irq) (&irq_desc[irq])
+#endif
int init_one_irq_desc(struct irq_desc *);
int arch_init_one_irq_desc(struct irq_desc *);
#define irq_desc_initialized(desc) ((desc)->handler != NULL)
-#if defined(__arm__)
-
-#define NR_IRQS 1024
-#define nr_irqs NR_IRQS
-extern irq_desc_t irq_desc[NR_IRQS];
-
-#endif
-
extern int setup_irq(unsigned int irq, struct irqaction *);
extern void release_irq(unsigned int irq);
extern int request_irq(unsigned int irq,