{
/* Only if using EPT and this domain has some VCPUs to dirty. */
if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] )
+ {
+ ASSERT(local_irq_is_enabled());
on_each_cpu(__ept_sync_domain, d, 1, 1);
+ }
}
static void __vmx_inject_exception(
struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
spin_lock_irqsave(&qi_ctrl->qinval_poll_lock, flags);
- spin_lock_irqsave(&iommu->register_lock, flags);
+ spin_lock(&iommu->register_lock);
index = qinval_next_index(iommu);
if ( *saddr == 1 )
*saddr = 0;
ret = gen_wait_dsc(iommu, index, iflag, sw, fn, sdata, saddr);
ret |= qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ spin_unlock(&iommu->register_lock);
/* Now we don't support interrupt method */
if ( sw )