* If it is false, it is the callers responsibility to make sure
* that the softirq (with the event_lock dropped) has ran.
*/
-bool_t pt_pirq_softirq_active(struct hvm_pirq_dpci *pirq_dpci)
+bool pt_pirq_softirq_active(struct hvm_pirq_dpci *pirq_dpci)
{
if ( pirq_dpci->state & ((1 << STATE_RUN) | (1 << STATE_SCHED)) )
- return 1;
+ return true;
/*
* If in the future we would call 'raise_softirq_for' right away
* after 'pt_pirq_softirq_active' we MUST reset the list (otherwise it
* might have stale data).
*/
- return 0;
+ return false;
}
/*
pirq_dpci->masked = 0;
}
-bool_t pt_irq_need_timer(uint32_t flags)
+bool pt_irq_need_timer(uint32_t flags)
{
return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE));
}
*/
static struct vcpu *vector_hashing_dest(const struct domain *d,
uint32_t dest_id,
- bool_t dest_mode,
+ bool dest_mode,
uint8_t gvec)
{
dpci->gmsi.dest_vcpu_id = -1;
}
-bool_t pt_pirq_cleanup_check(struct hvm_pirq_dpci *dpci)
+bool pt_pirq_cleanup_check(struct hvm_pirq_dpci *dpci)
{
if ( !dpci->flags && !pt_pirq_softirq_active(dpci) )
{
dpci->dom = NULL;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
int pt_pirq_iterate(struct domain *d,
struct hvm_pirq_dpci {
uint32_t flags;
unsigned int state;
- bool_t masked;
+ bool masked;
uint16_t pending;
struct list_head digl_list;
struct domain *dom;
};
void pt_pirq_init(struct domain *, struct hvm_pirq_dpci *);
-bool_t pt_pirq_cleanup_check(struct hvm_pirq_dpci *);
+bool pt_pirq_cleanup_check(struct hvm_pirq_dpci *);
int pt_pirq_iterate(struct domain *d,
int (*cb)(struct domain *,
struct hvm_pirq_dpci *, void *arg),
void *arg);
-bool_t pt_pirq_softirq_active(struct hvm_pirq_dpci *);
+bool pt_pirq_softirq_active(struct hvm_pirq_dpci *);
/* Modify state of a PCI INTx wire. */
void hvm_pci_intx_assert(struct domain *d, unsigned int device,
unsigned int intx);