(void)munmap(vaddr, PAGE_SIZE);
}
-static int clear_domain_page(unsigned long pfn)
-{
- void *vaddr = map_pfn(pfn);
- if ( vaddr == NULL )
- return -1;
- memset(vaddr, 0, PAGE_SIZE);
- unmap_pfn(vaddr);
- return 0;
-}
-
static int copy_to_domain_page(unsigned long dst_pfn, void *src_page)
{
void *vaddr = map_pfn(dst_pfn);
int dom, int kernel_fd, int initrd_fd, unsigned long tot_pages,
unsigned long virt_load_addr, size_t ksize, dom_meminfo_t *meminfo)
{
+ l1_pgentry_t *vl1tab = NULL, *vl1e = NULL;
+ l2_pgentry_t *vl2tab = NULL, *vl2e = NULL;
unsigned long *page_array = NULL;
page_update_request_t *pgt_update_arr = NULL, *pgt_updates = NULL;
int alloc_index, num_pt_pages;
* of the allocated physical address space.
*/
l2tab = page_array[alloc_index] << PAGE_SHIFT;
- if ( clear_domain_page(page_array[alloc_index]) < 0 )
- goto error_out;
alloc_index--;
meminfo->l2_pgt_addr = l2tab;
meminfo->virt_shinfo_addr = virt_load_addr + (tot_pages << PAGE_SHIFT);
* PTE -- we break out before filling in the entry, as that is done by
* Xen during final setup.
*/
- l2tab += l2_table_offset(virt_load_addr) * sizeof(l2_pgentry_t);
+ if ( (vl2tab = map_pfn(l2tab >> PAGE_SHIFT)) == NULL )
+ goto error_out;
+ memset(vl2tab, 0, PAGE_SIZE);
+ vl2e = vl2tab + l2_table_offset(virt_load_addr);
for ( count = 0; count < (tot_pages + 1); count++ )
{
- if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
+ if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
{
l1tab = page_array[alloc_index] << PAGE_SHIFT;
- if ( clear_domain_page(page_array[alloc_index]) < 0 )
+ if ( (vl1tab = map_pfn(l1tab >> PAGE_SHIFT)) == NULL )
goto error_out;
+ memset(vl1tab, 0, PAGE_SIZE);
alloc_index--;
- l1tab += l1_table_offset(virt_load_addr + (count << PAGE_SHIFT))
- * sizeof(l1_pgentry_t);
+ vl1e = vl1tab + l1_table_offset(virt_load_addr +
+ (count << PAGE_SHIFT));
/* make apropriate entry in the page directory */
- pgt_updates->ptr = l2tab;
+ pgt_updates->ptr = (unsigned long)vl2e;
pgt_updates->val = l1tab | L2_PROT;
pgt_updates++;
num_pgt_updates++;
- l2tab += sizeof(l2_pgentry_t);
+ vl2e++;
}
/* The last PTE we consider is filled in later by Xen. */
if ( count < pt_start )
{
- pgt_updates->ptr = l1tab;
+ pgt_updates->ptr = (unsigned long)vl1e;
pgt_updates->val = (page_array[count] << PAGE_SHIFT) | L1_PROT;
pgt_updates++;
num_pgt_updates++;
- l1tab += sizeof(l1_pgentry_t);
+ vl1e++;
}
else
{
- pgt_updates->ptr = l1tab;
+ pgt_updates->ptr = (unsigned long)vl1e;
pgt_updates->val =
((page_array[count] << PAGE_SHIFT) | L1_PROT) & ~_PAGE_RW;
pgt_updates++;
num_pgt_updates++;
- l1tab += sizeof(l1_pgentry_t);
+ vl1e++;
}
pgt_updates->ptr =
static void put_page(unsigned long page_nr, int writeable);
static int dec_page_refcnt(unsigned long page_nr, unsigned int type);
-static int mod_l2_entry(unsigned long, l2_pgentry_t);
-static int mod_l1_entry(unsigned long, l1_pgentry_t);
+static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t);
+static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
/* frame table size and its size in pages */
frame_table_t * frame_table;
}
-static int mod_l2_entry(unsigned long pa, l2_pgentry_t new_l2_entry)
+static int mod_l2_entry(l2_pgentry_t *p_l2_entry, l2_pgentry_t new_l2_entry)
{
- l2_pgentry_t *p_l2_entry, old_l2_entry;
-
- p_l2_entry = map_domain_mem(pa);
- old_l2_entry = *p_l2_entry;
+ l2_pgentry_t old_l2_entry = *p_l2_entry;
if ( (((unsigned long)p_l2_entry & (PAGE_SIZE-1)) >> 2) >=
DOMAIN_ENTRIES_PER_L2_PAGETABLE )
}
/* Assume we're mapping an L1 table, falling back to twisted L2. */
- if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) &&
- get_twisted_l2_table(pa >> PAGE_SHIFT, new_l2_entry) )
- goto fail;
+ if ( unlikely(get_l1_table(l2_pgentry_to_pagenr(new_l2_entry))) )
+ {
+ /* NB. No need to sanity-check the VA: done already. */
+ unsigned long l1e = l1_pgentry_val(
+ linear_pg_table[(unsigned long)p_l2_entry >> PAGE_SHIFT]);
+ if ( get_twisted_l2_table(l1e >> PAGE_SHIFT, new_l2_entry) )
+ goto fail;
+ }
}
}
else if ( (l2_pgentry_val(old_l2_entry) & _PAGE_PRESENT) )
}
*p_l2_entry = new_l2_entry;
- unmap_domain_mem(p_l2_entry);
return 0;
fail:
- unmap_domain_mem(p_l2_entry);
return -1;
}
-static int mod_l1_entry(unsigned long pa, l1_pgentry_t new_l1_entry)
+static int mod_l1_entry(l1_pgentry_t *p_l1_entry, l1_pgentry_t new_l1_entry)
{
- l1_pgentry_t *p_l1_entry, old_l1_entry;
-
- p_l1_entry = map_domain_mem(pa);
- old_l1_entry = *p_l1_entry;
+ l1_pgentry_t old_l1_entry = *p_l1_entry;
if ( (l1_pgentry_val(new_l1_entry) & _PAGE_PRESENT) )
{
}
*p_l1_entry = new_l1_entry;
- unmap_domain_mem(p_l1_entry);
return 0;
fail:
- unmap_domain_mem(p_l1_entry);
return -1;
}
int do_process_page_updates(page_update_request_t *ureqs, int count)
{
page_update_request_t req;
- unsigned long flags, pfn, *ptr;
+ unsigned long flags, pfn, l1e;
struct pfn_info *page;
int err = 0, i;
unsigned int cmd;
+ unsigned long cr0 = read_cr0();
+
+ /* Clear the WP bit so that we can write even read-only page mappings. */
+ write_cr0(cr0 & ~X86_CR0_WP);
for ( i = 0; i < count; i++ )
{
}
cmd = req.ptr & (sizeof(l1_pgentry_t)-1);
-
- /* All normal commands must have 'ptr' in range. */
pfn = req.ptr >> PAGE_SHIFT;
- if ( (pfn >= max_page) && (cmd != PGREQ_EXTENDED_COMMAND) )
- {
- MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
- kill_domain_with_errmsg("Page update request out of range");
- }
err = 1;
- /* Least significant bits of 'ptr' demux the operation type. */
spin_lock_irq(¤t->page_lock);
+
+ /* Get the page-frame number that a non-extended command references. */
+ if ( likely(cmd != PGREQ_EXTENDED_COMMAND) )
+ {
+ if ( likely(cmd != PGREQ_MPT_UPDATE) )
+ {
+ /* Need to use 'get_user' since the VA's PGD may be absent. */
+ __get_user(l1e, (unsigned long *)(linear_pg_table+pfn));
+ /* Now check that the VA's PTE isn't absent. */
+ if ( !(l1e & _PAGE_PRESENT) )
+ {
+ MEM_LOG("L1E n.p. at VA %08lx (%08lx)", req.ptr&~3, l1e);
+ goto unlock;
+ }
+ /* Finally, get the underlying machine address. */
+ pfn = l1e >> PAGE_SHIFT;
+ }
+ else if ( pfn >= max_page )
+ {
+ MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
+ goto unlock;
+ }
+ }
+
+ /* Least significant bits of 'ptr' demux the operation type. */
switch ( cmd )
{
/*
- * PGREQ_NORMAL: Normal update to any level of page table.
+ * PGREQ_NORMAL_UPDATE: Normal update to any level of page table.
*/
- case PGREQ_NORMAL:
- page = frame_table + pfn;
+ case PGREQ_NORMAL_UPDATE:
+ page = frame_table + pfn;
flags = page->flags;
if ( DOMAIN_OKAY(flags) )
switch ( (flags & PG_type_mask) )
{
case PGT_l1_page_table:
- err = mod_l1_entry(req.ptr, mk_l1_pgentry(req.val));
+ err = mod_l1_entry((l1_pgentry_t *)req.ptr,
+ mk_l1_pgentry(req.val));
break;
case PGT_l2_page_table:
- err = mod_l2_entry(req.ptr, mk_l2_pgentry(req.val));
+ err = mod_l2_entry((l2_pgentry_t *)req.ptr,
+ mk_l2_pgentry(req.val));
break;
default:
MEM_LOG("Update to non-pt page %08lx", req.ptr);
- ptr = map_domain_mem(req.ptr);
- *ptr = req.val;
- unmap_domain_mem(ptr);
+ *(unsigned long *)req.ptr = req.val;
err = 0;
break;
}
}
break;
+ case PGREQ_UNCHECKED_UPDATE:
+ req.ptr &= ~(sizeof(l1_pgentry_t) - 1);
+ if ( IS_PRIV(current) )
+ {
+ *(unsigned long *)req.ptr = req.val;
+ err = 0;
+ }
+ else
+ {
+ MEM_LOG("Bad unchecked update attempt");
+ }
+ break;
+
case PGREQ_MPT_UPDATE:
page = frame_table + pfn;
if ( DOMAIN_OKAY(page->flags) )
err = do_extended_command(req.ptr, req.val);
break;
- case PGREQ_UNCHECKED_UPDATE:
- req.ptr &= ~(sizeof(l1_pgentry_t) - 1);
- if ( current->domain == 0 )
- {
- ptr = map_domain_mem(req.ptr);
- *ptr = req.val;
- unmap_domain_mem(ptr);
- err = 0;
- }
- else
- {
- MEM_LOG("Bad unchecked update attempt");
- }
- break;
-
default:
MEM_LOG("Invalid page update command %08lx", req.ptr);
break;
}
+
+ unlock:
spin_unlock_irq(¤t->page_lock);
if ( err )
- {
kill_domain_with_errmsg("Illegal page update request");
- }
ureqs++;
}
}
+ /* Restore the WP bit before returning to guest. */
+ write_cr0(cr0);
+
return 0;
}
#include <asm/bitops.h>
#include <asm/flushtlb.h>
+#define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
+
extern l2_pgentry_t idle0_pg_table[ENTRIES_PER_L2_PAGETABLE];
extern l2_pgentry_t *idle_pg_table[NR_CPUS];
extern void paging_init(void);
*
* PGREQ_XXX: specified in least 2 bits of 'ptr' field. These bits are masked
* off to get the real 'ptr' value.
- * All requests specify relevent machine address in 'ptr'.
+ * All requests specify relevent address in 'ptr'. This is either a
+ * machine/physical address (PA), or linear/virtual address (VA).
* Normal requests specify update value in 'value'.
* Extended requests specify command in least 8 bits of 'value'. These bits
* are masked off to get the real 'val' value. Except for PGEXT_SET_LDT
* which shifts the least bits out.
*/
/* A normal page-table update request. */
-#define PGREQ_NORMAL 0 /* does a checked form of '*ptr = val' */
+#define PGREQ_NORMAL_UPDATE 0 /* checked '*ptr = val'. ptr is VA. */
+/* DOM0 can make entirely unchecked updates which do not affect refcnts. */
+#define PGREQ_UNCHECKED_UPDATE 1 /* unchecked '*ptr = val'. ptr is VA. */
/* Update an entry in the machine->physical mapping table. */
-#define PGREQ_MPT_UPDATE 1 /* ptr = frame to modify table entry for */
+#define PGREQ_MPT_UPDATE 2 /* ptr = PA of frame to modify entry for */
/* An extended command. */
-#define PGREQ_EXTENDED_COMMAND 2 /* least 8 bits of val demux further */
-/* DOM0 can make entirely unchecked updates which do not affect refcnts. */
-#define PGREQ_UNCHECKED_UPDATE 3 /* does an unchecked '*ptr = val' */
+#define PGREQ_EXTENDED_COMMAND 3 /* least 8 bits of val demux further */
/* Extended commands: */
-#define PGEXT_PIN_L1_TABLE 0 /* ptr = frame to pin */
-#define PGEXT_PIN_L2_TABLE 1 /* ptr = frame to pin */
-#define PGEXT_PIN_L3_TABLE 2 /* ptr = frame to pin */
-#define PGEXT_PIN_L4_TABLE 3 /* ptr = frame to pin */
-#define PGEXT_UNPIN_TABLE 4 /* ptr = frame to unpin */
-#define PGEXT_NEW_BASEPTR 5 /* ptr = new pagetable base to install */
-#define PGEXT_TLB_FLUSH 6 /* ptr = NULL */
-#define PGEXT_INVLPG 7 /* ptr = NULL ; val = page to invalidate */
-#define PGEXT_SET_LDT 8 /* ptr = linear address; val = # entries */
-#define PGEXT_CMD_MASK 255
-#define PGEXT_CMD_SHIFT 8
-
+#define PGEXT_PIN_L1_TABLE 0 /* ptr = PA of frame to pin */
+#define PGEXT_PIN_L2_TABLE 1 /* ptr = PA of frame to pin */
+#define PGEXT_PIN_L3_TABLE 2 /* ptr = PA of frame to pin */
+#define PGEXT_PIN_L4_TABLE 3 /* ptr = PA of frame to pin */
+#define PGEXT_UNPIN_TABLE 4 /* ptr = PA of frame to unpin */
+#define PGEXT_NEW_BASEPTR 5 /* ptr = PA of new pagetable base */
+#define PGEXT_TLB_FLUSH 6 /* ptr = NULL */
+#define PGEXT_INVLPG 7 /* ptr = NULL ; val = page to invalidate */
+#define PGEXT_SET_LDT 8 /* ptr = VA of table; val = # entries */
+#define PGEXT_CMD_MASK 255
+#define PGEXT_CMD_SHIFT 8
/*
* Master "switch" for enabling/disabling event delivery.
static struct proc_dir_entry *balloon_pde;
unsigned long credit;
-static inline unsigned long get_ppte(unsigned long addr)
+static inline pte_t *get_ptep(unsigned long addr)
{
- unsigned long ppte;
pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
pgd = pgd_offset_k(addr);
if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
ptep = pte_offset(pmd, addr);
- ppte = (unsigned long)__pa(ptep);
- return ppte;
+ return ptep;
}
/* main function for relinquishing bit of memory */
{
curraddr = *currp;
*currp = virt_to_machine(*currp) >> PAGE_SHIFT;
- queue_l1_entry_update(get_ppte(curraddr) | PGREQ_NORMAL, 0);
+ queue_l1_entry_update(get_ptep(curraddr), 0);
phys_to_machine_mapping[__pa(curraddr) >> PAGE_SHIFT] = DEAD;
currp++;
}
{
if ( phys_to_machine_mapping[i] == DEAD )
{
- printk(KERN_ALERT "bd240 debug: proc_new_pages: i %lx, mpt %lx, %lx\n", i, i << PAGE_SHIFT, get_ppte((unsigned long)__va(i << PAGE_SHIFT)) | PGREQ_NORMAL);
phys_to_machine_mapping[i] = *curr;
- queue_l1_entry_update((i << PAGE_SHIFT) | PGREQ_MPT_UPDATE, i);
queue_l1_entry_update(
- get_ppte((unsigned long)__va(i << PAGE_SHIFT)) | PGREQ_NORMAL,
+ (pte_t *)((i << PAGE_SHIFT) | PGREQ_MPT_UPDATE), i);
+ queue_l1_entry_update(
+ get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
((*curr) << PAGE_SHIFT) | L1_PROT);
*curr = (unsigned long)__va(i << PAGE_SHIFT);
pte = update_debug_queue[i].ptep;
if ( pte == NULL ) continue;
update_debug_queue[i].ptep = NULL;
- update.ptr = phys_to_machine(__pa(pte));
+ update.ptr = pte;
update.val = update_debug_queue[i].pteval;
HYPERVISOR_pt_update(&update, 1);
}
}
-static void DEBUG_disallow_pt_read(unsigned long pa)
+static void DEBUG_disallow_pt_read(unsigned long va)
{
pte_t *pte;
pmd_t *pmd;
* That's okay -- it'll get fixed up in the fault handler.
*/
page_update_request_t update;
- unsigned long va = (unsigned long)__va(pa);
pgd = pgd_offset_k(va);
pmd = pmd_offset(pgd, va);
pte = pte_offset(pmd, va);
- update.ptr = phys_to_machine(__pa(pte));
+ update.ptr = pte;
pteval = *(unsigned long *)pte;
update.val = pteval & ~_PAGE_PRESENT;
HYPERVISOR_pt_update(&update, 1);
if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
}
-void queue_l1_entry_update(unsigned long ptr, unsigned long val)
+void queue_l1_entry_update(pte_t *ptr, unsigned long val)
{
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
#if PT_UPDATE_DEBUG > 0
- DEBUG_disallow_pt_read(ptr);
+ DEBUG_disallow_pt_read((unsigned long)ptr);
#endif
- update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr = (unsigned long)ptr;
update_queue[idx].val = val;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
-void queue_l2_entry_update(unsigned long ptr, unsigned long val)
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
{
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr = (unsigned long)ptr;
update_queue[idx].val = val;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
pmd_t *pmd;
pte_t *pte;
- pgd = init_mm.pgd + __pgd_offset(vaddr);
+ pgd = init_mm.pgd + __pgd_offset(vaddr);
if (pgd_none(*pgd)) {
printk("PAE BUG #00!\n");
return;
pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags);
/* We queue directly, avoiding hidden phys->machine translation. */
- queue_l1_entry_update(__pa(pte), phys | pgprot_val(prot));
+ queue_l1_entry_update(pte, phys | pgprot_val(prot));
/*
* It's enough to flush this one mapping.
kpgd = pgd_offset_k((unsigned long)pte);
kpmd = pmd_offset(kpgd, (unsigned long)pte);
kpte = pte_offset(kpmd, (unsigned long)pte);
- queue_l1_entry_update(__pa(kpte),
+ queue_l1_entry_update(kpte,
(*(unsigned long *)kpte)&~_PAGE_RW);
set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
#if defined(CONFIG_XENO_PRIV)
-#define direct_set_pte(pteptr, pteval) \
- queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
+#define direct_set_pte(_p, _v) queue_unchecked_pt_update((_p), (_v).pte_low)
#define __direct_pte(x) ((pte_t) { (x) } )
#define __direct_mk_pte(page_nr,pgprot) \
__direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
#include <linux/types.h>
#include <asm/hypervisor-ifs/hypervisor-if.h>
#include <asm/ptrace.h>
+#include <asm/page.h>
/* arch/xeno/kernel/setup.c */
union start_info_union
extern unsigned int pt_update_queue_idx;
-void queue_l1_entry_update(unsigned long ptr, unsigned long val);
-void queue_l2_entry_update(unsigned long ptr, unsigned long val);
+void queue_l1_entry_update(pte_t *ptr, unsigned long val);
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
void queue_pt_switch(unsigned long ptr);
void queue_tlb_flush(void);
void queue_invlpg(unsigned long ptr);
void queue_set_ldt(unsigned long ptr, unsigned long bytes);
#define PT_UPDATE_DEBUG 0
+#define queue_unchecked_pt_update(_p,_v) queue_l1_entry_update( \
+ (pte_t *)((unsigned long)(_p)|PGREQ_UNCHECKED_UPDATE),(_v))
+
#if PT_UPDATE_DEBUG > 0
typedef struct {
- unsigned long ptr, val, pteval;
+ void *ptr;
+ unsigned long val, pteval;
void *ptep;
int line; char *file;
} page_update_debug_t;
update_debug_queue[pt_update_queue_idx].line = __LINE__; \
update_debug_queue[pt_update_queue_idx].file = __FILE__; \
printk("L1 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
- phys_to_machine(_p), *(unsigned long *)__va(_p), \
+ (_p), pte_val(_p), \
(unsigned long)(_v)); \
queue_l1_entry_update((_p),(_v)); \
})
update_debug_queue[pt_update_queue_idx].line = __LINE__; \
update_debug_queue[pt_update_queue_idx].file = __FILE__; \
printk("L2 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
- phys_to_machine(_p), *(unsigned long *)__va(_p), \
+ (_p), pmd_val(_p), \
(unsigned long)(_v)); \
queue_l2_entry_update((_p),(_v)); \
})
#ifndef __ASSEMBLY__
#include <linux/config.h>
-#include <asm/hypervisor.h>
#ifdef CONFIG_X86_USE_3DNOW
static inline int pgd_present(pgd_t pgd) { return 1; }
#define pgd_clear(xp) do { } while (0)
-#define set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr), (pteval).pte_low)
-#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(__pa(pteptr), (pteval).pte_low)
-#define set_pmd(pmdptr, pmdval) queue_l2_entry_update(__pa(pmdptr), (pmdval).pmd)
+#define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
+#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
+#define set_pmd(pmdptr, pmdval) queue_l2_entry_update((pmdptr), (pmdval).pmd)
#define set_pgd(pgdptr, pgdval) ((void)0)
#define pgd_page(pgd) \
static inline pte_t ptep_get_and_clear(pte_t *xp)
{
pte_t pte = *xp;
- queue_l1_entry_update(__pa(xp), 0);
+ queue_l1_entry_update(xp, 0);
return pte;
}
#define __S111 PAGE_SHARED
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(xp) queue_l1_entry_update(__pa(xp), 0)
+#define pte_clear(xp) queue_l1_entry_update(xp, 0)
#define pmd_none(x) (!(x).pmd)
#define pmd_present(x) ((x).pmd & _PAGE_PRESENT)
{
unsigned long pteval = *(unsigned long *)ptep;
int ret = pteval & _PAGE_DIRTY;
- if ( ret ) queue_l1_entry_update(__pa(ptep), pteval & ~_PAGE_DIRTY);
+ if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_DIRTY);
return ret;
}
static inline int ptep_test_and_clear_young(pte_t *ptep)
{
unsigned long pteval = *(unsigned long *)ptep;
int ret = pteval & _PAGE_ACCESSED;
- if ( ret ) queue_l1_entry_update(__pa(ptep), pteval & ~_PAGE_ACCESSED);
+ if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_ACCESSED);
return ret;
}
static inline void ptep_set_wrprotect(pte_t *ptep)
{
unsigned long pteval = *(unsigned long *)ptep;
if ( (pteval & _PAGE_RW) )
- queue_l1_entry_update(__pa(ptep), pteval & ~_PAGE_RW);
+ queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
}
static inline void ptep_mkdirty(pte_t *ptep)
{
unsigned long pteval = *(unsigned long *)ptep;
if ( !(pteval & _PAGE_DIRTY) )
- queue_l1_entry_update(__pa(ptep), pteval | _PAGE_DIRTY);
+ queue_l1_entry_update(ptep, pteval | _PAGE_DIRTY);
}
/*
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(__pa(pte), (*(unsigned long *)pte)&~_PAGE_RW);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
}
static inline void __make_page_writeable(void *va)
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(__pa(pte), (*(unsigned long *)pte)|_PAGE_RW);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
}
static inline void make_page_readonly(void *va)
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(__pa(pte), (*(unsigned long *)pte)&~_PAGE_RW);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
if ( (unsigned long)va >= VMALLOC_START )
__make_page_readonly(machine_to_virt(
*(unsigned long *)pte&PAGE_MASK));
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(__pa(pte), (*(unsigned long *)pte)|_PAGE_RW);
+ queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
if ( (unsigned long)va >= VMALLOC_START )
__make_page_writeable(machine_to_virt(
*(unsigned long *)pte&PAGE_MASK));
struct page *page = pte_page(pte);
#if defined(CONFIG_XENO_PRIV)
if (pte_io(pte)) {
- queue_l1_entry_update(
- __pa(ptep)|PGREQ_UNCHECKED_UPDATE, 0);
+ queue_unchecked_pt_update(ptep, 0);
continue;
}
#endif