From: Keir Fraser Date: Fri, 26 Oct 2007 15:06:49 +0000 (+0100) Subject: x86: Replace FLUSH_LEVEL() parameter to flush_area() with rather X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~14828^2~27 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=7f71fbd32bc8436fb8c3eaf6a9b2d254313a8db4;p=xen.git x86: Replace FLUSH_LEVEL() parameter to flush_area() with rather clearer FLUSH_ORDER(). Also remove bogus assertion. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c index 808b848de5..cb0fbb7580 100644 --- a/xen/arch/x86/flushtlb.c +++ b/xen/arch/x86/flushtlb.c @@ -98,17 +98,15 @@ void write_cr3(unsigned long cr3) void flush_area_local(const void *va, unsigned int flags) { const struct cpuinfo_x86 *c = ¤t_cpu_data; - unsigned int level = flags & FLUSH_LEVEL_MASK; + unsigned int order = (flags - 1) & FLUSH_ORDER_MASK; unsigned long irqfl; - ASSERT(level < CONFIG_PAGING_LEVELS); - /* This non-reentrant function is sometimes called in interrupt context. */ local_irq_save(irqfl); if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) ) { - if ( level == 1 ) + if ( order == 0 ) { /* * We don't INVLPG multi-page regions because the 2M/4M/1G @@ -146,14 +144,14 @@ void flush_area_local(const void *va, unsigned int flags) if ( flags & FLUSH_CACHE ) { - unsigned long i, sz; + unsigned long i, sz = 0; - sz = level ? (1UL << ((level - 1) * PAGETABLE_ORDER)) : ULONG_MAX; + if ( order < (BITS_PER_LONG - PAGE_SHIFT - 1) ) + sz = 1UL << (order + PAGE_SHIFT); - if ( c->x86_clflush_size && c->x86_cache_size && - (sz < (c->x86_cache_size >> (PAGE_SHIFT - 10))) ) + if ( c->x86_clflush_size && c->x86_cache_size && sz && + ((sz >> 10) < c->x86_cache_size) ) { - sz <<= PAGE_SHIFT; va = (const void *)((unsigned long)va & ~(sz - 1)); for ( i = 0; i < sz; i += c->x86_clflush_size ) asm volatile ( "clflush %0" diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 806e73691d..bac5b2b9e4 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3582,7 +3582,8 @@ int map_pages_to_xen( if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) ) { - unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(2); + unsigned int flush_flags = + FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER); if ( l2e_get_flags(ol2e) & _PAGE_PSE ) { @@ -3627,7 +3628,8 @@ int map_pages_to_xen( } else if ( l2e_get_flags(*pl2e) & _PAGE_PSE ) { - unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(2); + unsigned int flush_flags = + FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER); /* Skip this PTE if there is no change. */ if ( (((l2e_get_pfn(*pl2e) & ~(L1_PAGETABLE_ENTRIES - 1)) + @@ -3663,7 +3665,7 @@ int map_pages_to_xen( l1e_write_atomic(pl1e, l1e_from_pfn(mfn, flags)); if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) ) { - unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(1); + unsigned int flush_flags = FLUSH_TLB | FLUSH_ORDER(0); if ( l1e_get_flags(ol1e) & _PAGE_GLOBAL ) flush_flags |= FLUSH_TLB_GLOBAL; if ( (l1e_get_flags(ol1e) ^ flags) & PAGE_CACHE_ATTRS ) @@ -3692,7 +3694,8 @@ int map_pages_to_xen( ol2e = *pl2e; l2e_write_atomic(pl2e, l2e_from_pfn(base_mfn, l1f_to_l2f(flags))); - flush_area(virt, FLUSH_TLB_GLOBAL | FLUSH_LEVEL(2)); + flush_area(virt, (FLUSH_TLB_GLOBAL | + FLUSH_ORDER(PAGETABLE_ORDER))); free_xen_pagetable(l2e_to_l1e(ol2e)); } } diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h index 7dff2e74bd..09a8f8bd9a 100644 --- a/xen/include/asm-x86/flushtlb.h +++ b/xen/include/asm-x86/flushtlb.h @@ -73,21 +73,17 @@ void write_cr3(unsigned long cr3); /* flush_* flag fields: */ /* - * Area to flush: - * 0 -> flush entire address space - * 1 -> 4kB area containing specified virtual address - * 2 -> 4MB/2MB area containing specified virtual address - * 3 -> 1GB area containing specified virtual address (x86/64 only) + * Area to flush: 2^flush_order pages. Default is flush entire address space. * NB. Multi-page areas do not need to have been mapped with a superpage. */ -#define FLUSH_LEVEL_MASK 0x0f -#define FLUSH_LEVEL(x) (x) +#define FLUSH_ORDER_MASK 0xff +#define FLUSH_ORDER(x) ((x)+1) /* Flush TLBs (or parts thereof) */ -#define FLUSH_TLB 0x10 +#define FLUSH_TLB 0x100 /* Flush TLBs (or parts thereof) including global mappings */ -#define FLUSH_TLB_GLOBAL 0x20 +#define FLUSH_TLB_GLOBAL 0x200 /* Flush data caches */ -#define FLUSH_CACHE 0x40 +#define FLUSH_CACHE 0x400 /* Flush local TLBs/caches. */ void flush_area_local(const void *va, unsigned int flags); @@ -105,13 +101,13 @@ void flush_area_mask(cpumask_t, const void *va, unsigned int flags); #define flush_tlb_local() \ flush_local(FLUSH_TLB) #define flush_tlb_one_local(v) \ - flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_LEVEL(1)) + flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0)) /* Flush specified CPUs' TLBs */ #define flush_tlb_mask(mask) \ flush_mask(mask, FLUSH_TLB) #define flush_tlb_one_mask(mask,v) \ - flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_LEVEL(1)) + flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0)) /* Flush all CPUs' TLBs */ #define flush_tlb_all() \