#include <asm/mem_event.h>
#include <asm/mem_paging.h>
-
+/* for public/io/ring.h macros */
#define xen_mb() mb()
#define xen_rmb() rmb()
#define xen_wmb() wmb()
+#define mem_event_ring_lock_init(_d) spin_lock_init(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_lock(_d) spin_lock(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_unlock(_d) spin_unlock(&(_d)->mem_event.ring_lock)
#define MEM_EVENT_RING_THRESHOLD 4
-
-int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
+static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
{
int rc;
mem_event_ring_lock_init(d);
- d->mem_event.paused = 0;
- d->mem_event.enabled = 1;
-
return 0;
err_shared:
return 1;
}
-int mem_event_disable(struct domain *d)
+static int mem_event_disable(struct domain *d)
{
- d->mem_event.enabled = 0;
- d->mem_event.paused = 0;
-
unmap_domain_page(d->mem_event.ring_page);
d->mem_event.ring_page = NULL;
{
struct vcpu *v;
- for_each_vcpu(d, v)
- {
- if ( d->mem_event.paused_vcpus[v->vcpu_id] )
- {
- vcpu_unpause(v);
- d->mem_event.paused_vcpus[v->vcpu_id] = 0;
- }
- }
-}
-
-int mem_event_pause_vcpu(struct domain *d, struct vcpu *v)
-{
- vcpu_pause_nosync(v);
- d->mem_event.paused_vcpus[v->vcpu_id] = 1;
-
- return 0;
+ for_each_vcpu ( d, v )
+ if ( test_and_clear_bit(_VPF_mem_event, &v->pause_flags) )
+ vcpu_wake(v);
}
int mem_event_check_ring(struct domain *d)
{
+ struct vcpu *curr = current;
int free_requests;
int ring_full;
free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
ring_full = free_requests < MEM_EVENT_RING_THRESHOLD;
- if ( (current->domain->domain_id == d->domain_id) && ring_full )
- mem_event_pause_vcpu(d, current);
+ if ( (curr->domain->domain_id == d->domain_id) && ring_full )
+ {
+ set_bit(_VPF_mem_event, &curr->pause_flags);
+ vcpu_sleep_nosync(curr);
+ }
mem_event_ring_unlock(d);
if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
{
- MEM_EVENT_ERROR("Memory paging op on a domain (%u) with no vcpus\n",
- d->domain_id);
+ gdprintk(XENLOG_INFO,
+ "Memory paging op on a domain (%u) with no vcpus\n",
+ d->domain_id);
return -EINVAL;
}
#ifndef __MEM_EVENT_H__
#define __MEM_EVENT_H__
-
-/* Printouts */
-#define MEM_EVENT_PRINTK(_f, _a...) \
- debugtrace_printk("mem_event: %s(): " _f, __func__, ##_a)
-#define MEM_EVENT_ERROR(_f, _a...) \
- printk("mem_event error: %s(): " _f, __func__, ##_a)
-#define MEM_EVENT_DEBUG(flag, _f, _a...) \
- do { \
- if (MEM_EVENT_DEBUG_ ## flag) \
- debugtrace_printk("mem_event debug: %s(): " _f, __func__, ##_a); \
- } while (0)
-
-
-#define mem_event_enabled(_d) (_d)->mem_event.enabled
-
-
-/* Ring lock */
-#define mem_event_ring_lock_init(_d) spin_lock_init(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_lock(_d) spin_lock(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_unlock(_d) spin_unlock(&(_d)->mem_event.ring_lock)
-
-
-int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn);
-int mem_event_disable(struct domain *d);
-
int mem_event_check_ring(struct domain *d);
void mem_event_put_request(struct domain *d, mem_event_request_t *req);
void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
void *ring_page;
/* front-end ring */
mem_event_front_ring_t front_ring;
- /* if domain has been paused due to ring contention */
- bool_t paused;
- int paused_vcpus[MAX_VIRT_CPUS];
- /* the memory event mode */
- unsigned long mode;
- /* domain to receive memory events */
- struct domain *domain;
- /* enabled? */
- bool_t enabled;
/* event channel port (vcpu0 only) */
int xen_port;
};
/* VCPU affinity has changed: migrating to a new CPU. */
#define _VPF_migrating 3
#define VPF_migrating (1UL<<_VPF_migrating)
+ /* VCPU is blocked on memory-event ring. */
+#define _VPF_mem_event 4
+#define VPF_mem_event (1UL<<_VPF_mem_event)
static inline int vcpu_runnable(struct vcpu *v)
{