int rc = 0, iommu_ret = 0;
ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2)));
+ ASSERT(!in_irq());
for ( ; ; )
{
* may be unnecessary (e.g., page was GDT/LDT) but those
* circumstances should be very rare.
*/
- cpumask_t mask;
+ cpumask_t *mask = this_cpu(scratch_cpumask);
- cpumask_copy(&mask, d->domain_dirty_cpumask);
+ BUG_ON(in_irq());
+ cpumask_copy(mask, d->domain_dirty_cpumask);
/* Don't flush if the timestamp is old enough */
- tlbflush_filter(&mask, page->tlbflush_timestamp);
+ tlbflush_filter(mask, page->tlbflush_timestamp);
- if ( unlikely(!cpumask_empty(&mask)) &&
+ if ( unlikely(!cpumask_empty(mask)) &&
/* Shadow mode: track only writable pages. */
(!shadow_mode_enabled(page_get_owner(page)) ||
((nx & PGT_type_mask) == PGT_writable_page)) )
{
perfc_incr(need_flush_tlb_flush);
- flush_tlb_mask(&mask);
+ flush_tlb_mask(mask);
}
/* We lose existing type and validity. */
case MMUEXT_TLB_FLUSH_MULTI:
case MMUEXT_INVLPG_MULTI:
{
- cpumask_t pmask;
+ cpumask_t *mask = this_cpu(scratch_cpumask);
if ( unlikely(d != pg_owner) )
rc = -EPERM;
else if ( unlikely(vcpumask_to_pcpumask(d,
guest_handle_to_param(op.arg2.vcpumask,
const_void),
- &pmask)) )
+ mask)) )
rc = -EINVAL;
if ( unlikely(rc) )
break;
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
- flush_tlb_mask(&pmask);
+ flush_tlb_mask(mask);
else if ( __addr_ok(op.arg1.linear_addr) )
- flush_tlb_one_mask(&pmask, op.arg1.linear_addr);
+ flush_tlb_one_mask(mask, op.arg1.linear_addr);
break;
}
else if ( likely(cache_flush_permitted(d)) )
{
unsigned int cpu;
- cpumask_t mask;
+ cpumask_t *mask = this_cpu(scratch_cpumask);
- cpumask_clear(&mask);
+ cpumask_clear(mask);
for_each_online_cpu(cpu)
- if ( !cpumask_intersects(&mask,
+ if ( !cpumask_intersects(mask,
per_cpu(cpu_sibling_mask, cpu)) )
- __cpumask_set_cpu(cpu, &mask);
- flush_mask(&mask, FLUSH_CACHE);
+ __cpumask_set_cpu(cpu, mask);
+ flush_mask(mask, FLUSH_CACHE);
}
else
{
struct page_info *gl1pg;
l1_pgentry_t *pl1e;
unsigned long bmap_ptr, gl1mfn;
- cpumask_t pmask;
+ cpumask_t *mask = NULL;
int rc;
perfc_incr(calls_to_update_va);
flush_tlb_local();
break;
case UVMF_ALL:
- flush_tlb_mask(d->domain_dirty_cpumask);
+ mask = d->domain_dirty_cpumask;
break;
default:
+ mask = this_cpu(scratch_cpumask);
rc = vcpumask_to_pcpumask(d, const_guest_handle_from_ptr(bmap_ptr,
void),
- &pmask);
- flush_tlb_mask(&pmask);
+ mask);
break;
}
+ if ( mask )
+ flush_tlb_mask(mask);
break;
case UVMF_INVLPG:
paging_invlpg(v, va);
break;
case UVMF_ALL:
- flush_tlb_one_mask(d->domain_dirty_cpumask, va);
+ mask = d->domain_dirty_cpumask;
break;
default:
+ mask = this_cpu(scratch_cpumask);
rc = vcpumask_to_pcpumask(d, const_guest_handle_from_ptr(bmap_ptr,
void),
- &pmask);
- flush_tlb_one_mask(&pmask, va);
+ mask);
break;
}
+ if ( mask )
+ flush_tlb_one_mask(mask, va);
break;
}
/* bitmap indicate which fixed map is free */
static DEFINE_SPINLOCK(msix_fixmap_lock);
static DECLARE_BITMAP(msix_fixmap_pages, FIX_MSIX_MAX_PAGES);
-static DEFINE_PER_CPU(cpumask_var_t, scratch_mask);
static int msix_fixmap_alloc(void)
{
if ( cpu_mask )
{
- cpumask_t *mask = this_cpu(scratch_mask);
+ cpumask_t *mask = this_cpu(scratch_cpumask);
if ( !cpumask_intersects(cpu_mask, &cpu_online_map) )
return;
return 0;
}
-static int msi_cpu_callback(
- struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch ( action )
- {
- case CPU_UP_PREPARE:
- if ( !alloc_cpumask_var(&per_cpu(scratch_mask, cpu)) )
- return notifier_from_errno(ENOMEM);
- break;
- case CPU_UP_CANCELED:
- case CPU_DEAD:
- free_cpumask_var(per_cpu(scratch_mask, cpu));
- break;
- default:
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block msi_cpu_nfb = {
- .notifier_call = msi_cpu_callback
-};
-
void __init early_msi_init(void)
{
if ( use_msi < 0 )
use_msi = !(acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI);
if ( !use_msi )
return;
-
- register_cpu_notifier(&msi_cpu_nfb);
- if ( msi_cpu_callback(&msi_cpu_nfb, CPU_UP_PREPARE, NULL) &
- NOTIFY_STOP_MASK )
- BUG();
}
static void dump_msi(unsigned char key)
/* representing HT and core siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, scratch_cpumask);
+
cpumask_t cpu_online_map __read_mostly;
EXPORT_SYMBOL(cpu_online_map);
free_cpumask_var(per_cpu(cpu_sibling_mask, cpu));
free_cpumask_var(per_cpu(cpu_core_mask, cpu));
+ free_cpumask_var(per_cpu(scratch_cpumask, cpu));
if ( per_cpu(stubs.addr, cpu) )
{
goto oom;
if ( zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) &&
- zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )
+ zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) &&
+ alloc_cpumask_var(&per_cpu(scratch_cpumask, cpu)) )
return 0;
oom:
panic("No memory for socket CPU siblings map");
if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) ||
- !zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)) )
+ !zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)) ||
+ !alloc_cpumask_var(&per_cpu(scratch_cpumask, 0)) )
panic("No memory for boot CPU sibling/core maps");
set_cpu_sibling_map(0);