return 1;
}
-int vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
+void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
{
uint32_t flags = pirq_dpci->gmsi.gflags;
int vector = pirq_dpci->gmsi.gvec;
ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI);
vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
- return 1;
}
/* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */
/* Get the notification function for a given Xen-bound event channel. */
#define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1])
-static int evtchn_set_pending(struct vcpu *v, int port);
+static void evtchn_set_pending(struct vcpu *v, int port);
static int virq_is_global(uint32_t virq)
{
if ( consumer_is_xen(rchn) )
(*xen_notification_fn(rchn))(rvcpu, rport);
else
- {
evtchn_set_pending(rvcpu, rport);
- }
break;
case ECS_IPI:
evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
return ret;
}
-static int evtchn_set_pending(struct vcpu *v, int port)
+static void evtchn_set_pending(struct vcpu *v, int port)
{
struct domain *d = v->domain;
int vcpuid;
*/
if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
- return 1;
+ return;
if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
!test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
/* Check if some VCPU might be polling for this event. */
if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
- return 0;
+ return;
/* Wake any interested (or potentially interested) pollers. */
for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
vcpu_unblock(v);
}
}
-
- return 0;
}
int guest_enabled_event(struct vcpu *v, uint32_t virq)
spin_unlock_irqrestore(&v->virq_lock, flags);
}
-int send_guest_pirq(struct domain *d, const struct pirq *pirq)
+void send_guest_pirq(struct domain *d, const struct pirq *pirq)
{
int port;
struct evtchn *chn;
if ( pirq == NULL || (port = pirq->evtchn) == 0 )
{
BUG_ON(!is_hvm_domain(d));
- return 0;
+ return;
}
chn = evtchn_from_port(d, port);
- return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+ evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
}
static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
spin_unlock(&d->event_lock);
}
-static int hvm_pci_msi_assert(struct domain *d,
- struct hvm_pirq_dpci *pirq_dpci)
+static void hvm_pci_msi_assert(
+ struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
{
struct pirq *pirq = dpci_pirq(pirq_dpci);
- return (hvm_domain_use_pirq(d, pirq)
- ? send_guest_pirq(d, pirq)
- : vmsi_deliver_pirq(d, pirq_dpci));
+ if ( hvm_domain_use_pirq(d, pirq) )
+ send_guest_pirq(d, pirq);
+ else
+ vmsi_deliver_pirq(d, pirq_dpci);
}
static int _hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci,
uint8_t dest, uint8_t dest_mode,
uint8_t delivery_mode, uint8_t trig_mode);
struct hvm_pirq_dpci;
-int vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *);
+void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *);
int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
#define hvm_paging_enabled(v) \
* send_guest_pirq:
* @d: Domain to which physical IRQ should be sent
* @pirq: Physical IRQ number
- * Returns TRUE if the delivery port was already pending.
*/
-int send_guest_pirq(struct domain *, const struct pirq *);
+void send_guest_pirq(struct domain *, const struct pirq *);
/* Send a notification from a given domain's event-channel port. */
int evtchn_send(struct domain *d, unsigned int lport);