Clean up dom_mem_op hypercall.
404f1bc7IwU-qnH8mJeVu0YsNGMrcw xen/include/hypervisor-ifs/arch-x86_64/hypervisor-if.h
3ddb79c2YTaZwOqWin9-QNgHge5RVw xen/include/hypervisor-ifs/block.h
3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h
-3e6377eaioRoNm0m_HSDEAd4Vqrq_w xen/include/hypervisor-ifs/dom_mem_ops.h
403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h
3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h
3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h
#include <xen/types.h>
#include <xen/lib.h>
#include <xen/mm.h>
-#include <hypervisor-ifs/dom_mem_ops.h>
#include <xen/perfc.h>
#include <xen/sched.h>
#include <xen/event.h>
#include <asm/domain_page.h>
-static long alloc_dom_mem(struct task_struct *p, reservation_increase_t op)
+static long alloc_dom_mem(struct task_struct *p,
+ unsigned long *pages,
+ unsigned long nr_pages)
{
struct pfn_info *page;
unsigned long i;
/* Leave some slack pages; e.g., for the network. */
- if ( unlikely(free_pfns < (op.size + (SLACK_DOMAIN_MEM_KILOBYTES >>
- (PAGE_SHIFT-10)))) )
+ if ( unlikely(free_pfns < (nr_pages + (SLACK_DOMAIN_MEM_KILOBYTES >>
+ (PAGE_SHIFT-10)))) )
{
DPRINTK("Not enough slack: %u %u\n",
free_pfns,
return 0;
}
- for ( i = 0; i < op.size; i++ )
+ for ( i = 0; i < nr_pages; i++ )
{
/* NB. 'alloc_domain_page' does limit-checking on pages per domain. */
if ( unlikely((page = alloc_domain_page(p)) == NULL) )
}
/* Inform the domain of the new page's machine address. */
- if ( unlikely(put_user(page_to_pfn(page), &op.pages[i]) != 0) )
+ if ( unlikely(put_user(page_to_pfn(page), &pages[i]) != 0) )
break;
}
return i;
}
-static long free_dom_mem(struct task_struct *p, reservation_decrease_t op)
+static long free_dom_mem(struct task_struct *p,
+ unsigned long *pages,
+ unsigned long nr_pages)
{
struct pfn_info *page;
unsigned long i, mpfn;
long rc = 0;
- for ( i = 0; i < op.size; i++ )
+ for ( i = 0; i < nr_pages; i++ )
{
- if ( unlikely(get_user(mpfn, &op.pages[i]) != 0) )
+ if ( unlikely(get_user(mpfn, &pages[i]) != 0) )
break;
if ( unlikely(mpfn >= max_page) )
put_page(page);
}
- return rc ? rc : op.size;
+ return rc ? rc : nr_pages;
}
-long do_dom_mem_op(dom_mem_op_t *mem_op)
+long do_dom_mem_op(unsigned int op, void *pages, unsigned long nr_pages)
{
- dom_mem_op_t dmop;
- unsigned long ret;
+ if ( op == MEMOP_increase_reservation )
+ return alloc_dom_mem(current, pages, nr_pages);
- if ( copy_from_user(&dmop, mem_op, sizeof(dom_mem_op_t)) )
- return -EFAULT;
+ if ( op == MEMOP_decrease_reservation )
+ return free_dom_mem(current, pages, nr_pages);
- switch ( dmop.op )
- {
- case MEMOP_RESERVATION_INCREASE:
- ret = alloc_dom_mem(current, dmop.u.increase);
- break;
-
- case MEMOP_RESERVATION_DECREASE:
- ret = free_dom_mem(current, dmop.u.decrease);
- break;
-
- default:
- ret = -ENOSYS;
- break;
- }
-
- return ret;
+ return -ENOSYS;
}
{
unsigned char vector; /* exception vector */
unsigned char flags; /* 0-3: privilege level; 4: clear event enable? */
- unsigned short cs; /* code selector */
+ unsigned short cs; /* code selector */
unsigned long address; /* code address */
} trap_info_t;
* installing their own GDT.
*/
-#define FLAT_RING3_CS32 0x0823 /* GDT index 260 */
-#define FLAT_RING3_CS64 0x082b /* GDT index 261 */
-#define FLAT_RING3_DS 0x0833 /* GDT index 262 */
+#define FLAT_RING3_CS32 0x0823 /* GDT index 260 */
+#define FLAT_RING3_CS64 0x082b /* GDT index 261 */
+#define FLAT_RING3_DS 0x0833 /* GDT index 262 */
#define FLAT_GUESTOS_DS FLAT_RING3_DS
#define FLAT_GUESTOS_CS FLAT_RING3_CS64
/* And the trap vector is... */
#define TRAP_INSTR "syscall"
-
+/* The machine->physical mapping table starts at this address, read-only. */
#ifndef machine_to_phys_mapping
#define machine_to_phys_mapping ((unsigned long *)0xffff810000000000ULL)
#endif
{
unsigned char vector; /* exception vector */
unsigned char flags; /* 0-3: privilege level; 4: clear event enable? */
- unsigned short cs; /* code selector */
+ unsigned short cs; /* code selector */
unsigned long address; /* code address */
} trap_info_t;
+++ /dev/null
-/******************************************************************************
- * dom_mem_ops.h
- *
- * Guest OS operations dealing with physical memory reservations.
- *
- * Copyright (c) 2003, B Dragovic & K A Fraser.
- */
-
-#define MEMOP_RESERVATION_INCREASE 0
-#define MEMOP_RESERVATION_DECREASE 1
-
-typedef struct reservation_increase {
- unsigned long size;
- unsigned long * pages;
-} reservation_increase_t;
-
-typedef struct reservation_decrease {
- unsigned long size;
- unsigned long * pages;
-} reservation_decrease_t;
-
-typedef struct dom_mem_op
-{
- unsigned int op;
- union
- {
- reservation_increase_t increase;
- reservation_decrease_t decrease;
- } u;
-} dom_mem_op_t;
*
* Virtual interrupts that a guest OS may receive from the hypervisor.
*/
-
#define VIRQ_BLKDEV 0 /* A block device response has been queued. */
#define VIRQ_TIMER 1 /* A timeout has been updated. */
#define VIRQ_DIE 2 /* OS is about to be killed. Clean up please! */
/*
- * SCHEDOP_* - Scheduler hypercall operations.
+ * Commands to HYPERVISOR_sched_op().
*/
#define SCHEDOP_yield 0 /* Give up the CPU voluntarily. */
#define SCHEDOP_block 1 /* Block until an event is received. */
#define CONSOLEIO_write 0
#define CONSOLEIO_read 1
+/*
+ * Commands to HYPERVISOR_dom_mem_op().
+ */
+#define MEMOP_increase_reservation 0
+#define MEMOP_decrease_reservation 1
+
#ifndef __ASSEMBLY__
typedef u64 domid_t;
#include <asm/uaccess.h>
#include <asm/tlb.h>
-#include <asm/hypervisor-ifs/dom_mem_ops.h>
-
/* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
#define USER_INFLATE_BALLOON 1 /* return mem to hypervisor */
#define USER_DEFLATE_BALLOON 2 /* claim mem from hypervisor */
/* main function for relinquishing bit of memory */
static unsigned long inflate_balloon(unsigned long num_pages)
{
- dom_mem_op_t dom_mem_op;
unsigned long *parray;
unsigned long *currp;
unsigned long curraddr;
unsigned long vaddr;
unsigned long i, j;
- parray = (unsigned long *)kmalloc(num_pages *
- sizeof(unsigned long), GFP_KERNEL);
+ parray = (unsigned long *)kmalloc(num_pages * sizeof(unsigned long),
+ GFP_KERNEL);
currp = parray;
for ( i = 0; i < num_pages; i++ )
{
- /* try to obtain a free page, has to be done with GFP_ATOMIC
- * as we do not want to sleep indefinately.
- */
+ /* Try to obtain a free page (has to be done with GFP_ATOMIC). */
vaddr = __get_free_page(GFP_ATOMIC);
- /* if allocation fails, free all reserved pages */
- if(!vaddr){
+ /* If allocation fails then free all reserved pages. */
+ if ( vaddr == 0 )
+ {
printk("Unable to inflate balloon by %ld, only %ld pages free.",
num_pages, i);
currp = parray;
XEN_flush_page_update_queue();
- dom_mem_op.op = MEMOP_RESERVATION_DECREASE;
- dom_mem_op.u.decrease.size = num_pages;
- dom_mem_op.u.decrease.pages = parray;
- if ( (ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != num_pages )
+ ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ parray, num_pages);
+ if ( unlikely(ret != num_pages) )
{
printk("Unable to inflate balloon, error %lx\n", ret);
goto cleanup;
unsigned long deflate_balloon(unsigned long num_pages)
{
- dom_mem_op_t dom_mem_op;
unsigned long ret;
unsigned long * parray;
- printk(KERN_ALERT "bd240 debug: deflate balloon called for %lx pages\n", num_pages);
-
if ( num_pages > credit )
{
printk("Can not allocate more pages than previously released.\n");
parray = (unsigned long *)kmalloc(num_pages * sizeof(unsigned long),
GFP_KERNEL);
- dom_mem_op.op = MEMOP_RESERVATION_INCREASE;
- dom_mem_op.u.increase.size = num_pages;
- dom_mem_op.u.increase.pages = parray;
- if((ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != num_pages){
+ ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+ parray, num_pages);
+ if ( unlikely(ret != num_pages) )
+ {
printk("Unable to deflate balloon, error %lx\n", ret);
goto cleanup;
}
- if((ret = process_new_pages(parray, num_pages)) < num_pages){
+ if ( (ret = process_new_pages(parray, num_pages)) < num_pages )
+ {
printk("Unable to deflate balloon by specified %lx pages, only %lx.\n",
num_pages, ret);
goto cleanup;
credit = 0;
- balloon_pde = create_xen_proc_entry("balloon", 0600);
- if ( balloon_pde == NULL )
+ if ( (balloon_pde = create_xen_proc_entry("balloon", 0600)) == NULL )
{
printk(KERN_ALERT "Unable to create balloon driver proc entry!");
return -1;
*/
#include "common.h"
-#include <asm/hypervisor-ifs/dom_mem_ops.h>
static void netif_page_release(struct page *page);
static void make_tx_response(netif_t *netif,
static void __refresh_mfn_list(void)
{
- int ret;
- dom_mem_op_t op;
- op.op = MEMOP_RESERVATION_INCREASE;
- op.u.increase.size = MAX_MFN_ALLOC;
- op.u.increase.pages = mfn_list;
- if ( (ret = HYPERVISOR_dom_mem_op(&op)) != MAX_MFN_ALLOC )
+ int ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+ mfn_list, MAX_MFN_ALLOC);
+ if ( unlikely(ret != MAX_MFN_ALLOC) )
{
printk(KERN_ALERT "Unable to increase memory reservation (%d)\n", ret);
BUG();
static void dealloc_mfn(unsigned long mfn)
{
unsigned long flags;
- dom_mem_op_t op;
-
spin_lock_irqsave(&mfn_lock, flags);
if ( alloc_index != MAX_MFN_ALLOC )
- {
- /* Usually we can put the MFN back on the quicklist. */
mfn_list[alloc_index++] = mfn;
- }
else
- {
- op.op = MEMOP_RESERVATION_INCREASE;
- op.u.decrease.size = 1;
- op.u.decrease.pages = &mfn;
- (void)HYPERVISOR_dom_mem_op(&op);
- }
+ (void)HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, &mfn, 1);
spin_unlock_irqrestore(&mfn_lock, flags);
}
#include <asm/evtchn.h>
#include <asm/ctrl_if.h>
-#include <asm/hypervisor-ifs/dom_mem_ops.h>
#include "../netif.h"
struct net_private *np = dev->priv;
struct sk_buff *skb;
NETIF_RING_IDX i = np->rx->req_prod;
- dom_mem_op_t op;
int nr_pfns = 0;
/* Make sure the batch is large enough to be worthwhile (1/2 ring). */
rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
/* Give away a batch of pages. */
- op.op = MEMOP_RESERVATION_DECREASE;
- op.u.decrease.size = nr_pfns;
- op.u.decrease.pages = rx_pfn_array;
rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
- rx_mcl[nr_pfns].args[0] = (unsigned long)&op;
+ rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
+ rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
+ rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
/* Zap PTEs and give away pages in one big multicall. */
(void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <asm/hypervisor.h>
-#include <asm/hypervisor-ifs/dom_mem_ops.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/multicall.h>
unsigned long i;
int ret;
unsigned int order = get_order(pages*PAGE_SIZE);
- dom_mem_op_t dom_mem_op;
vstart = __get_free_pages(GFP_KERNEL, order);
if ( vstart == 0 )
flush_page_update_queue();
- dom_mem_op.op = MEMOP_RESERVATION_DECREASE;
- dom_mem_op.u.decrease.size = 1<<order;
- dom_mem_op.u.decrease.pages = pfn_array;
- if ( (ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != (1<<order) )
+ ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ pfn_array, 1<<order);
+ if ( unlikely(ret != (1<<order)) )
{
printk(KERN_WARNING "Unable to reduce memory reservation (%d)\n", ret);
BUG();
unsigned long i;
int ret;
unsigned int order = get_order(pages*PAGE_SIZE);
- dom_mem_op_t dom_mem_op;
pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
if ( pfn_array == NULL )
BUG();
- dom_mem_op.op = MEMOP_RESERVATION_INCREASE;
- dom_mem_op.u.increase.size = 1<<order;
- dom_mem_op.u.increase.pages = pfn_array;
- if ( (ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != (1<<order) )
+ ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+ pfn_array, 1<<order);
+ if ( unlikely(ret != (1<<order)) )
{
printk(KERN_WARNING "Unable to increase memory reservation (%d)\n",
ret);
return ret;
}
-static inline int HYPERVISOR_dom_mem_op(void *dom_mem_op)
+static inline int HYPERVISOR_dom_mem_op(unsigned int op,
+ unsigned long *pages,
+ unsigned long nr_pages)
{
int ret;
__asm__ __volatile__ (
TRAP_INSTR
: "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
- "b" (dom_mem_op) : "memory" );
+ "b" (op), "c" (pages), "d" (nr_pages) : "memory" );
return ret;
}