return rc;
}
+static int hvmop_inj_msi(
+ XEN_GUEST_HANDLE(xen_hvm_inj_msi_t) uop)
+{
+ struct xen_hvm_inj_msi op;
+ struct domain *d;
+ int rc;
+
+ if ( copy_from_guest(&op, uop, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_remote_target_domain_by_id(op.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto out;
+
+ rc = xsm_hvm_inj_msi(d);
+ if ( rc )
+ goto out;
+
+ rc = 0;
+ hvm_inj_msi(d, op.addr, op.data);
+
+ out:
+ rcu_unlock_domain(d);
+ return rc;
+}
+
static int hvmop_flush_tlb_all(void)
{
struct domain *d = current->domain;
guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
break;
+ case HVMOP_inj_msi:
+ rc = hvmop_inj_msi(
+ guest_handle_cast(arg, xen_hvm_inj_msi_t));
+ break;
+
case HVMOP_set_pci_link_route:
rc = hvmop_set_pci_link_route(
guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
#include <xen/irq.h>
#include <asm/hvm/domain.h>
#include <asm/hvm/support.h>
+#include <asm/msi.h>
/* Must be called with hvm_domain->irq_lock hold */
static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
d->domain_id, link, old_isa_irq, isa_irq);
}
+void hvm_inj_msi(struct domain *d, uint64_t addr, uint32_t data)
+{
+ uint32_t tmp = (uint32_t) addr;
+ uint8_t dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+ uint8_t dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK);
+ uint8_t delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK)
+ >> MSI_DATA_DELIVERY_MODE_SHIFT;
+ uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK)
+ >> MSI_DATA_TRIGGER_SHIFT;
+ uint8_t vector = data & MSI_DATA_VECTOR_MASK;
+
+ vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
+}
+
void hvm_set_callback_via(struct domain *d, uint64_t via)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
}
}
-int vmsi_deliver(struct domain *d, int pirq)
+int vmsi_deliver(
+ struct domain *d, int vector,
+ uint8_t dest, uint8_t dest_mode,
+ uint8_t delivery_mode, uint8_t trig_mode)
{
- struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
- uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
- int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
- uint8_t dest = (uint8_t)flags;
- uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
- uint8_t delivery_mode = (flags & VMSI_DELIV_MASK) >> GFLAGS_SHIFT_DELIV_MODE;
- uint8_t trig_mode = (flags & VMSI_TRIG_MODE) >> GFLAGS_SHIFT_TRG_MODE;
struct vlapic *target;
struct vcpu *v;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
- "msi: dest=%x dest_mode=%x delivery_mode=%x "
- "vector=%x trig_mode=%x\n",
- dest, dest_mode, delivery_mode, vector, trig_mode);
-
- if ( !( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI ) )
- {
- gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
- return 0;
- }
-
switch ( delivery_mode )
{
case dest_LowestPrio:
return 1;
}
+int vmsi_deliver_pirq(struct domain *d, int pirq)
+{
+ struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+ uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
+ int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
+ uint8_t dest = (uint8_t)flags;
+ uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
+ uint8_t delivery_mode = (flags & VMSI_DELIV_MASK)
+ >> GFLAGS_SHIFT_DELIV_MODE;
+ uint8_t trig_mode = (flags&VMSI_TRIG_MODE) >> GFLAGS_SHIFT_TRG_MODE;
+
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
+ "msi: dest=%x dest_mode=%x delivery_mode=%x "
+ "vector=%x trig_mode=%x\n",
+ dest, dest_mode, delivery_mode, vector, trig_mode);
+
+ if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI) )
+ {
+ gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
+ return 0;
+ }
+
+ vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
+ return 1;
+}
+
/* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */
int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode)
{
static int hvm_pci_msi_assert(struct domain *d, int pirq)
{
- if ( hvm_domain_use_pirq(d, pirq) )
- return send_guest_pirq(d, pirq);
- else
- return vmsi_deliver(d, pirq);
+ return (hvm_domain_use_pirq(d, pirq)
+ ? send_guest_pirq(d, pirq)
+ : vmsi_deliver_pirq(d, pirq));
}
#endif
void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
u64 hvm_get_guest_time(struct vcpu *v);
-int vmsi_deliver(struct domain *d, int pirq);
+int vmsi_deliver(
+ struct domain *d, int vector,
+ uint8_t dest, uint8_t dest_mode,
+ uint8_t delivery_mode, uint8_t trig_mode);
+int vmsi_deliver_pirq(struct domain *d, int pirq);
int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
#define hvm_paging_enabled(v) \
#define MSI_DATA_DELIVERY_MODE_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_MODE_MASK 0x00000700
#define MSI_DATA_LEVEL_SHIFT 14
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_TRIGGER_SHIFT 15
#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_MASK 0x00008000
/*
* Shift/mask fields for msi address
#define MSI_ADDR_DESTMODE_SHIFT 2
#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
+#define MSI_ADDR_DESTMODE_MASK 0x4
#define MSI_ADDR_REDIRECTION_SHIFT 3
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
+/* Following tools-only interfaces may change in future. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/* MSI injection for emulated devices */
+#define HVMOP_inj_msi 16
+struct xen_hvm_inj_msi {
+ /* Domain to be injected */
+ domid_t domid;
+ /* Data -- lower 32 bits */
+ uint32_t data;
+ /* Address (0xfeexxxxx) */
+ uint64_t addr;
+};
+typedef struct xen_hvm_inj_msi xen_hvm_inj_msi_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_inj_msi_t);
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
+void hvm_inj_msi(struct domain *d, uint64_t addr, uint32_t data);
+
void hvm_maybe_deassert_evtchn_irq(void);
void hvm_assert_evtchn_irq(struct vcpu *v);
void hvm_set_callback_via(struct domain *d, uint64_t via);
int (*hvm_set_pci_intx_level) (struct domain *d);
int (*hvm_set_isa_irq_level) (struct domain *d);
int (*hvm_set_pci_link_route) (struct domain *d);
+ int (*hvm_inj_msi) (struct domain *d);
int (*apic) (struct domain *d, int cmd);
int (*assign_vector) (struct domain *d, uint32_t pirq);
int (*xen_settime) (void);
return xsm_call(hvm_set_pci_link_route(d));
}
+static inline int xsm_hvm_inj_msi (struct domain *d)
+{
+ return xsm_call(hvm_inj_msi(d));
+}
+
static inline int xsm_apic (struct domain *d, int cmd)
{
return xsm_call(apic(d, cmd));