On the way towards a network interface for new i/o world.
40420a73Wou6JlsZDiu6YwjYomsm7A xenolinux-2.4.26-sparse/arch/xen/drivers/evtchn/evtchn.c
4083dc16-Kd5y9psK_yk161sme5j5Q xenolinux-2.4.26-sparse/arch/xen/drivers/netif/Makefile
4083dc16UmHXxS9g_UFVnkUpN-oP2Q xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/Makefile
+4097ba83pPKYqMS3Gl-PVKIgYU1FZw xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/common.h
+4097ba83glWYwQTkbPqgLIlYDOPVLg xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/control.c
+4097ba837h2tuiweIWp-voNVzCRI6g xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/interface.c
4087cf0d5dudKw_DecIJgOhLlBF_0Q xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c
405853f2wg7JXZJNltspMwOZJklxgw xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/Makefile
405853f6nbeazrNyEWNHBuoSg2PiPA xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c
+4097ba83Qy2eafeFUhGhm6_4iMIIDw xenolinux-2.4.26-sparse/arch/xen/drivers/netif/netif.h
3e5a4e65gZBRBB6RsSVg1c9iahigAw xenolinux-2.4.26-sparse/arch/xen/drivers/network/Makefile
3e5a4e65ZxKrbFetVB84JhrTyZ1YuQ xenolinux-2.4.26-sparse/arch/xen/drivers/network/network.c
3e5a4e65lWzkiPXsZdzPt2RNnJGG1g xenolinux-2.4.26-sparse/arch/xen/kernel/Makefile
/*
* Top-level command types.
*/
-#define CMSG_CONSOLE 0 /* Console */
-#define CMSG_BLKIF_BE 1 /* Block-device backend */
-#define CMSG_BLKIF_FE 2 /* Block-device frontend */
+#define CMSG_CONSOLE 0 /* Console */
+#define CMSG_BLKIF_BE 1 /* Block-device backend */
+#define CMSG_BLKIF_FE 2 /* Block-device frontend */
+#define CMSG_NETIF_BE 3 /* Network-device backend */
+#define CMSG_NETIF_FE 4 /* Network-device frontend */
/******************************************************************************
#define BLKIF_DRIVER_STATUS_DOWN 0
#define BLKIF_DRIVER_STATUS_UP 1
typedef struct {
+ /* IN */
unsigned int status; /* BLKIF_DRIVER_STATUS_??? */
+ /* OUT */
+ /*
+ * Tells driver how many interfaces it should expect to immediately
+ * receive notifications about.
+ */
+ unsigned int nr_interfaces;
} blkif_fe_driver_status_changed_t;
/*
* STATUS_DISCONNECTED message.
*/
typedef struct {
- /* IN */
unsigned int handle;
- /* OUT */
- /*
- * Tells driver how many interfaces it should expect to immediately
- * receive notifications about.
- */
- unsigned int nr_interfaces;
} blkif_fe_interface_disconnect_t;
unsigned int nr_interfaces;
} blkif_be_driver_status_changed_t;
+
+/******************************************************************************
+ * NETWORK-INTERFACE FRONTEND DEFINITIONS
+ */
+
+/* Messages from domain controller to guest. */
+#define CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED 0
+
+/* Messages from guest to domain controller. */
+#define CMSG_NETIF_FE_DRIVER_STATUS_CHANGED 32
+#define CMSG_NETIF_FE_INTERFACE_CONNECT 33
+#define CMSG_NETIF_FE_INTERFACE_DISCONNECT 34
+
+/*
+ * CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED:
+ * Notify a guest about a status change on one of its network interfaces.
+ * If the interface is DESTROYED or DOWN then the interface is disconnected:
+ * 1. The shared-memory frame is available for reuse.
+ * 2. Any unacknowledged messgaes pending on the interface were dropped.
+ */
+#define NETIF_INTERFACE_STATUS_DESTROYED 0 /* Interface doesn't exist. */
+#define NETIF_INTERFACE_STATUS_DISCONNECTED 1 /* Exists but is disconnected. */
+#define NETIF_INTERFACE_STATUS_CONNECTED 2 /* Exists and is connected. */
+typedef struct {
+ unsigned int handle;
+ unsigned int status;
+ unsigned int evtchn; /* status == NETIF_INTERFACE_STATUS_CONNECTED */
+} netif_fe_interface_status_changed_t;
+
+/*
+ * CMSG_NETIF_FE_DRIVER_STATUS_CHANGED:
+ * Notify the domain controller that the front-end driver is DOWN or UP.
+ * When the driver goes DOWN then the controller will send no more
+ * status-change notifications. When the driver comes UP then the controller
+ * will send a notification for each interface that currently exists.
+ * If the driver goes DOWN while interfaces are still UP, the domain
+ * will automatically take the interfaces DOWN.
+ */
+#define NETIF_DRIVER_STATUS_DOWN 0
+#define NETIF_DRIVER_STATUS_UP 1
+typedef struct {
+ /* IN */
+ unsigned int status; /* NETIF_DRIVER_STATUS_??? */
+ /* OUT */
+ /*
+ * Tells driver how many interfaces it should expect to immediately
+ * receive notifications about.
+ */
+ unsigned int nr_interfaces;
+} netif_fe_driver_status_changed_t;
+
+/*
+ * CMSG_NETIF_FE_INTERFACE_CONNECT:
+ * If successful, the domain controller will acknowledge with a
+ * STATUS_CONNECTED message.
+ */
+typedef struct {
+ unsigned int handle;
+ unsigned long shmem_frame;
+} netif_fe_interface_connect_t;
+
+/*
+ * CMSG_NETIF_FE_INTERFACE_DISCONNECT:
+ * If successful, the domain controller will acknowledge with a
+ * STATUS_DISCONNECTED message.
+ */
+typedef struct {
+ unsigned int handle;
+} netif_fe_interface_disconnect_t;
+
+
+/******************************************************************************
+ * NETWORK-INTERFACE BACKEND DEFINITIONS
+ */
+
+/* Messages from domain controller. */
+#define CMSG_NETIF_BE_CREATE 0 /* Create a new net-device interface. */
+#define CMSG_NETIF_BE_DESTROY 1 /* Destroy a net-device interface. */
+#define CMSG_NETIF_BE_CONNECT 2 /* Connect i/f to remote driver. */
+#define CMSG_NETIF_BE_DISCONNECT 3 /* Disconnect i/f from remote driver. */
+
+/* Messages to domain controller. */
+#define CMSG_NETIF_BE_DRIVER_STATUS_CHANGED 32
+
+/*
+ * Message request/response definitions for net-device messages.
+ */
+
+/* Non-specific 'okay' return. */
+#define NETIF_BE_STATUS_OKAY 0
+/* Non-specific 'error' return. */
+#define NETIF_BE_STATUS_ERROR 1
+/* The following are specific error returns. */
+#define NETIF_BE_STATUS_INTERFACE_EXISTS 2
+#define NETIF_BE_STATUS_INTERFACE_NOT_FOUND 3
+#define NETIF_BE_STATUS_INTERFACE_CONNECTED 4
+#define NETIF_BE_STATUS_OUT_OF_MEMORY 5
+#define NETIF_BE_STATUS_MAPPING_ERROR 6
+
+/* This macro can be used to create an array of descriptive error strings. */
+#define NETIF_BE_STATUS_ERRORS { \
+ "Okay", \
+ "Non-specific error", \
+ "Interface already exists", \
+ "Interface not found", \
+ "Interface is still connected", \
+ "Out of memory", \
+ "Could not map domain memory" }
+
+/*
+ * CMSG_NETIF_BE_CREATE:
+ * When the driver sends a successful response then the interface is fully
+ * created. The controller will send a DOWN notification to the front-end
+ * driver.
+ */
+typedef struct {
+ /* IN */
+ domid_t domid; /* Domain attached to new interface. */
+ unsigned int netif_handle; /* Domain-specific interface handle. */
+ /* OUT */
+ unsigned int status;
+} netif_be_create_t;
+
+/*
+ * CMSG_NETIF_BE_DESTROY:
+ * When the driver sends a successful response then the interface is fully
+ * torn down. The controller will send a DESTROYED notification to the
+ * front-end driver.
+ */
+typedef struct {
+ /* IN */
+ domid_t domid; /* Identify interface to be destroyed. */
+ unsigned int netif_handle; /* ...ditto... */
+ /* OUT */
+ unsigned int status;
+} netif_be_destroy_t;
+
+/*
+ * CMSG_NETIF_BE_CONNECT:
+ * When the driver sends a successful response then the interface is fully
+ * connected. The controller will send a CONNECTED notification to the
+ * front-end driver.
+ */
+typedef struct {
+ /* IN */
+ domid_t domid; /* Domain attached to new interface. */
+ unsigned int netif_handle; /* Domain-specific interface handle. */
+ unsigned int evtchn; /* Event channel for notifications. */
+ unsigned long shmem_frame; /* Page cont. shared comms window. */
+ /* OUT */
+ unsigned int status;
+} netif_be_connect_t;
+
+/*
+ * CMSG_NETIF_BE_DISCONNECT:
+ * When the driver sends a successful response then the interface is fully
+ * disconnected. The controller will send a DOWN notification to the front-end
+ * driver.
+ */
+typedef struct {
+ /* IN */
+ domid_t domid; /* Domain attached to new interface. */
+ unsigned int netif_handle; /* Domain-specific interface handle. */
+ /* OUT */
+ unsigned int status;
+} netif_be_disconnect_t;
+
+/*
+ * CMSG_NETIF_BE_DRIVER_STATUS_CHANGED:
+ * Notify the domain controller that the back-end driver is DOWN or UP.
+ * If the driver goes DOWN while interfaces are still UP, the domain
+ * will automatically send DOWN notifications.
+ */
+typedef struct {
+ /* IN */
+ unsigned int status; /* NETIF_DRIVER_STATUS_??? */
+ /* OUT */
+ /*
+ * Tells driver how many interfaces it should expect to immediately
+ * receive notifications about.
+ */
+ unsigned int nr_interfaces;
+} netif_be_driver_status_changed_t;
+
#endif /* __DOMAIN_CONTROLLER_H__ */
pending_req->operation = operation;
pending_req->status = BLKIF_RSP_OKAY;
atomic_set(&pending_req->pendcnt, nr_psegs);
+ pending_cons++;
blkif_get(blkif);
generic_make_request(operation, bh);
}
- pending_cons++;
return;
bad_descriptor:
#define BLKIF_MAX_SECTORS_PER_SEGMENT 16
typedef struct {
- unsigned char operation; /* BLKIF_OP_??? */
- unsigned char nr_segments; /* number of segments */
+ u8 operation; /* BLKIF_OP_??? */
+ u8 nr_segments; /* number of segments */
blkif_vdev_t device; /* only for read/write requests */
unsigned long id; /* private guest value, echoed in resp */
blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */
typedef struct {
unsigned long id; /* copied from request */
- unsigned char operation; /* copied from request */
- int status; /* BLKIF_RSP_??? */
+ u8 operation; /* copied from request */
+ s16 status; /* BLKIF_RSP_??? */
} blkif_response_t;
#define BLKIF_RSP_ERROR -1 /* non-specific 'error' */
#define VDISK_VIRTUAL(_x) ((_x) & VDISK_FLAG_VIRT)
typedef struct {
- blkif_vdev_t device; /* Device number (opaque 16 bit value). */
- unsigned short info; /* Device type and flags (VDISK_*). */
blkif_sector_t capacity; /* Size in terms of 512-byte sectors. */
+ blkif_vdev_t device; /* Device number (opaque 16 bit value). */
+ u16 info; /* Device type and flags (VDISK_*). */
} vdisk_t;
#endif /* __SHARED_BLKIF_H__ */
O_TARGET := drv.o
-obj-y := main.o
+obj-y := main.o control.o interface.o
include $(TOPDIR)/Rules.make
--- /dev/null
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/common.h
+ */
+
+#ifndef __NETIF__BACKEND__COMMON_H__
+#define __NETIF__BACKEND__COMMON_H__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/ctrl_if.h>
+#include <asm/io.h>
+#include "../netif.h"
+
+#ifndef NDEBUG
+#define ASSERT(_p) \
+ if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+ __LINE__, __FILE__); *(int*)0=0; }
+#define DPRINTK(_f, _a...) printk("(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+#else
+#define ASSERT(_p) ((void)0)
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+typedef struct {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+
+ /* Physical parameters of the comms window. */
+ unsigned long tx_shmem_frame;
+ unsigned long rx_shmem_frame;
+ unsigned int evtchn;
+ int irq;
+
+ /* The shared rings and indexes. */
+ netif_tx_interface_t *tx;
+ netif_rx_interface_t *rx;
+
+ /* Private indexes into shared ring. */
+ NETIF_RING_IDX rx_req_cons;
+ NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
+ NETIF_RING_IDX tx_req_cons;
+ NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
+
+ /* Usage accounting */
+ long long total_bytes_sent;
+ long long total_bytes_received;
+ long long total_packets_sent;
+ long long total_packets_received;
+
+ /* Trasnmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+ unsigned long credit_bytes;
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
+
+ /* Miscellaneous private stuff. */
+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+ /*
+ * DISCONNECT response is deferred until pending requests are ack'ed.
+ * We therefore need to store the id from the original request.
+ */
+ u8 disconnect_rspid;
+ struct netif_st *hash_next;
+ struct list_head list; /* scheduling list */
+ atomic_t refcnt;
+ spinlock_t rx_lock, tx_lock;
+ unsigned char vmac[ETH_ALEN];
+} netif_t;
+
+void netif_create(netif_be_create_t *create);
+void netif_destroy(netif_be_destroy_t *destroy);
+void netif_connect(netif_be_connect_t *connect);
+int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id);
+void __netif_disconnect_complete(netif_t *netif);
+netif_t *netif_find_by_handle(domid_t domid, unsigned int handle);
+#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define netif_put(_b) \
+ do { \
+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+ __netif_disconnect_complete(_b); \
+ } while (0)
+
+void netif_interface_init(void);
+void netif_ctrlif_init(void);
+
+void netif_deschedule(netif_t *netif);
+
+void netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+
+#endif /* __NETIF__BACKEND__COMMON_H__ */
--- /dev/null
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/control.c
+ *
+ * Routines for interfacing with the control plane.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ DPRINTK("Received netif backend message, subtype=%d\n", msg->subtype);
+
+ switch ( msg->subtype )
+ {
+ case CMSG_NETIF_BE_CREATE:
+ if ( msg->length != sizeof(netif_be_create_t) )
+ goto parse_error;
+ netif_create((netif_be_create_t *)&msg->msg[0]);
+ break;
+ case CMSG_NETIF_BE_DESTROY:
+ if ( msg->length != sizeof(netif_be_destroy_t) )
+ goto parse_error;
+ netif_destroy((netif_be_destroy_t *)&msg->msg[0]);
+ break;
+ case CMSG_NETIF_BE_CONNECT:
+ if ( msg->length != sizeof(netif_be_connect_t) )
+ goto parse_error;
+ netif_connect((netif_be_connect_t *)&msg->msg[0]);
+ break;
+ case CMSG_NETIF_BE_DISCONNECT:
+ if ( msg->length != sizeof(netif_be_disconnect_t) )
+ goto parse_error;
+ if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) )
+ return; /* Sending the response is deferred until later. */
+ break;
+ default:
+ goto parse_error;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ DPRINTK("Parse error while reading message subtype %d, len %d\n",
+ msg->subtype, msg->length);
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+void netif_ctrlif_init(void)
+{
+ ctrl_msg_t cmsg;
+ netif_be_driver_status_changed_t st;
+
+ (void)ctrl_if_register_receiver(CMSG_NETIF_BE, netif_ctrlif_rx);
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_NETIF_BE;
+ cmsg.subtype = CMSG_NETIF_BE_DRIVER_STATUS_CHANGED;
+ cmsg.length = sizeof(netif_be_driver_status_changed_t);
+ st.status = NETIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &st, sizeof(st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+}
--- /dev/null
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/interface.c
+ *
+ * Network-device interface management.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+
+#define NETIF_HASHSZ 1024
+#define NETIF_HASH(_d,_h) \
+ (((int)(_d)^(int)((_d)>>32)^(int)(_h))&(NETIF_HASHSZ-1))
+
+static kmem_cache_t *netif_cachep;
+static netif_t *netif_hash[NETIF_HASHSZ];
+
+netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
+{
+ netif_t *netif = netif_hash[NETIF_HASH(domid, handle)];
+ while ( (netif != NULL) &&
+ ((netif->domid != domid) || (netif->handle != handle)) )
+ netif = netif->hash_next;
+ return netif;
+}
+
+void __netif_disconnect_complete(netif_t *netif)
+{
+ ctrl_msg_t cmsg;
+ netif_be_disconnect_t disc;
+
+ /*
+ * These can't be done in __netif_disconnect() because at that point there
+ * may be outstanding requests at the disc whose asynchronous responses
+ * must still be notified to the remote driver.
+ */
+ unbind_evtchn_from_irq(netif->evtchn);
+ vfree(netif->net_ring_base);
+
+ /* Construct the deferred response message. */
+ cmsg.type = CMSG_NETIF_BE;
+ cmsg.subtype = CMSG_NETIF_BE_DISCONNECT;
+ cmsg.id = netif->disconnect_rspid;
+ cmsg.length = sizeof(netif_be_disconnect_t);
+ disc.domid = netif->domid;
+ disc.netif_handle = netif->handle;
+ disc.status = NETIF_BE_STATUS_OKAY;
+ memcpy(cmsg.msg, &disc, sizeof(disc));
+
+ /*
+ * Make sure message is constructed /before/ status change, because
+ * after the status change the 'netif' structure could be deallocated at
+ * any time. Also make sure we send the response /after/ status change,
+ * as otherwise a subsequent CONNECT request could spuriously fail if
+ * another CPU doesn't see the status change yet.
+ */
+ mb();
+ if ( netif->status != DISCONNECTING )
+ BUG();
+ netif->status = DISCONNECTED;
+ mb();
+
+ /* Send the successful response. */
+ ctrl_if_send_response(&cmsg);
+}
+
+void netif_create(netif_be_create_t *create)
+{
+ domid_t domid = create->domid;
+ unsigned int handle = create->netif_handle;
+ netif_t **pnetif, *netif;
+
+ if ( (netif = kmem_cache_alloc(netif_cachep, GFP_ATOMIC)) == NULL )
+ {
+ DPRINTK("Could not create netif: out of memory\n");
+ create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ memset(netif, 0, sizeof(*netif));
+ netif->domid = domid;
+ netif->handle = handle;
+ netif->status = DISCONNECTED;
+ spin_lock_init(&netif->vbd_lock);
+ spin_lock_init(&netif->net_ring_lock);
+ atomic_set(&netif->refcnt, 0);
+
+ pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+ while ( *pnetif != NULL )
+ {
+ if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) )
+ {
+ DPRINTK("Could not create netif: already exists\n");
+ create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
+ kmem_cache_free(netif_cachep, netif);
+ return;
+ }
+ pnetif = &(*pnetif)->hash_next;
+ }
+
+ netif->hash_next = *pnetif;
+ *pnetif = netif;
+
+ DPRINTK("Successfully created netif\n");
+ create->status = NETIF_BE_STATUS_OKAY;
+}
+
+void netif_destroy(netif_be_destroy_t *destroy)
+{
+ domid_t domid = destroy->domid;
+ unsigned int handle = destroy->netif_handle;
+ netif_t **pnetif, *netif;
+
+ pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+ while ( (netif = *pnetif) != NULL )
+ {
+ if ( (netif->domid == domid) && (netif->handle == handle) )
+ {
+ if ( netif->status != DISCONNECTED )
+ goto still_connected;
+ goto destroy;
+ }
+ pnetif = &netif->hash_next;
+ }
+
+ destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+
+ still_connected:
+ destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+
+ destroy:
+ *pnetif = netif->hash_next;
+ destroy_all_vbds(netif);
+ kmem_cache_free(netif_cachep, netif);
+ destroy->status = NETIF_BE_STATUS_OKAY;
+}
+
+void netif_connect(netif_be_connect_t *connect)
+{
+ domid_t domid = connect->domid;
+ unsigned int handle = connect->netif_handle;
+ unsigned int evtchn = connect->evtchn;
+ unsigned long shmem_frame = connect->shmem_frame;
+ struct vm_struct *vma;
+ pgprot_t prot;
+ int error;
+ netif_t *netif;
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_connect attempted for non-existent netif (%llu,%u)\n",
+ connect->domid, connect->netif_handle);
+ connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
+ {
+ connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+ error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
+ shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ if ( error != 0 )
+ {
+ if ( error == -ENOMEM )
+ connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ else if ( error == -EFAULT )
+ connect->status = NETIF_BE_STATUS_MAPPING_ERROR;
+ else
+ connect->status = NETIF_BE_STATUS_ERROR;
+ vfree(vma->addr);
+ return;
+ }
+
+ if ( netif->status != DISCONNECTED )
+ {
+ connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+ vfree(vma->addr);
+ return;
+ }
+
+ netif->evtchn = evtchn;
+ netif->irq = bind_evtchn_to_irq(evtchn);
+ netif->shmem_frame = shmem_frame;
+ netif->net_ring_base = (netif_ring_t *)vma->addr;
+ netif->status = CONNECTED;
+ netif_get(netif);
+
+ request_irq(netif->irq, netif_be_int, 0, "netif-backend", netif);
+
+ connect->status = NETIF_BE_STATUS_OKAY;
+}
+
+int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id)
+{
+ domid_t domid = disconnect->domid;
+ unsigned int handle = disconnect->netif_handle;
+ netif_t *netif;
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_disconnect attempted for non-existent netif"
+ " (%llu,%u)\n", disconnect->domid, disconnect->netif_handle);
+ disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return 1; /* Caller will send response error message. */
+ }
+
+ if ( netif->status == CONNECTED )
+ {
+ netif->status = DISCONNECTING;
+ netif->disconnect_rspid = rsp_id;
+ wmb(); /* Let other CPUs see the status change. */
+ free_irq(netif->irq, NULL);
+ netif_deschedule(netif);
+ netif_put(netif);
+ }
+
+ return 0; /* Caller should not send response message. */
+}
+
+net_vif_t *create_net_vif(domid_t dom)
+{
+ unsigned int idx;
+ net_vif_t *new_vif = NULL;
+ net_ring_t *new_ring = NULL;
+ struct task_struct *p = NULL;
+ unsigned long flags, vmac_hash;
+ unsigned char vmac_key[ETH_ALEN + 2 + MAX_DOMAIN_NAME];
+
+ if ( (p = find_domain_by_id(dom)) == NULL )
+ return NULL;
+
+ write_lock_irqsave(&tasklist_lock, flags);
+
+ for ( idx = 0; idx < MAX_DOMAIN_VIFS; idx++ )
+ if ( p->net_vif_list[idx] == NULL )
+ break;
+ if ( idx == MAX_DOMAIN_VIFS )
+ goto fail;
+
+ if ( (new_vif = kmem_cache_alloc(net_vif_cache, GFP_KERNEL)) == NULL )
+ goto fail;
+
+ memset(new_vif, 0, sizeof(*new_vif));
+
+ if ( sizeof(net_ring_t) > PAGE_SIZE )
+ BUG();
+ new_ring = (net_ring_t *)get_free_page(GFP_KERNEL);
+ clear_page(new_ring);
+ SHARE_PFN_WITH_DOMAIN(virt_to_page(new_ring), p);
+
+ /*
+ * Fill in the new vif struct. Note that, while the vif's refcnt is
+ * non-zero, we hold a reference to the task structure.
+ */
+ atomic_set(&new_vif->refcnt, 1);
+ new_vif->shared_rings = new_ring;
+ new_vif->shared_idxs = &p->shared_info->net_idx[idx];
+ new_vif->domain = p;
+ new_vif->idx = idx;
+ new_vif->list.next = NULL;
+ spin_lock_init(&new_vif->rx_lock);
+ spin_lock_init(&new_vif->tx_lock);
+
+ new_vif->credit_bytes = new_vif->remaining_credit = ~0UL;
+ new_vif->credit_usec = 0UL;
+ init_ac_timer(&new_vif->credit_timeout);
+
+ if ( (p->domain == 0) && (idx == 0) )
+ {
+ /*
+ * DOM0/VIF0 gets the real physical MAC address, so that users can
+ * easily get a Xen-based machine up and running by using an existing
+ * DHCP entry.
+ */
+ memcpy(new_vif->vmac, the_dev->dev_addr, ETH_ALEN);
+ }
+ else
+ {
+ /*
+ * Most VIFs get a random MAC address with a "special" vendor id.
+ * We try to get MAC addresses to be unique across multiple servers
+ * by including the physical MAC address in the hash. The hash also
+ * includes the vif index and the domain's name.
+ *
+ * NB. The vendor is currently an "obsolete" one that used to belong
+ * to DEC (AA-00-00). Using it is probably a bit rude :-)
+ *
+ * NB2. The first bit of the first random octet is set to zero for
+ * all dynamic MAC addresses. This may allow us to manually specify
+ * MAC addresses for some VIFs with no fear of clashes.
+ */
+ memcpy(&vmac_key[0], the_dev->dev_addr, ETH_ALEN);
+ *(__u16 *)(&vmac_key[ETH_ALEN]) = htons(idx);
+ strcpy(&vmac_key[ETH_ALEN+2], p->name);
+ vmac_hash = hash(vmac_key, ETH_ALEN + 2 + strlen(p->name));
+ memcpy(new_vif->vmac, "\xaa\x00\x00", 3);
+ new_vif->vmac[3] = (vmac_hash >> 16) & 0xef; /* First bit is zero. */
+ new_vif->vmac[4] = (vmac_hash >> 8) & 0xff;
+ new_vif->vmac[5] = (vmac_hash >> 0) & 0xff;
+ }
+
+ p->net_vif_list[idx] = new_vif;
+
+ write_unlock_irqrestore(&tasklist_lock, flags);
+ return new_vif;
+
+ fail:
+ write_unlock_irqrestore(&tasklist_lock, flags);
+ if ( new_vif != NULL )
+ kmem_cache_free(net_vif_cache, new_vif);
+ if ( p != NULL )
+ put_task_struct(p);
+ return NULL;
+}
+
+void netif_interface_init(void)
+{
+ netif_cachep = kmem_cache_create("netif_cache", sizeof(netif_t),
+ 0, 0, NULL, NULL);
+ memset(netif_hash, 0, sizeof(netif_hash));
+}
* reference front-end implementation can be found in:
* arch/xen/drivers/netif/frontend
*
- * Copyright (c) 2004, K A Fraser
+ * Copyright (c) 2002-2004, K A Fraser
*/
-#include <linux/config.h>
-#include <linux/module.h>
+#include "common.h"
+
+static void make_tx_response(netif_t *netif,
+ u16 id,
+ s8 st);
+static void make_rx_response(netif_t *netif,
+ u16 id,
+ s8 st,
+ netif_addr_t addr,
+ u16 size);
+
+/* Don't currently gate addition of an interface to the tx scheduling list. */
+#define tx_work_exists(_if) (1)
+
+#define MAX_PENDING_REQS 256
+static struct vm_struct *mmap_vma;
+#define MMAP_VADDR(_req) ((unsigned long)mmap_vma->addr + ((_req) * PAGE_SIZE))
+
+/*static pending_req_t pending_reqs[MAX_PENDING_REQS];*/
+static u16 pending_ring[MAX_PENDING_REQS];
+static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
+/* NB. We use a different index type to differentiate from shared blk rings. */
+typedef unsigned int PEND_RING_IDX;
+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+static PEND_RING_IDX pending_prod, pending_cons;
+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+/*
+ * This is the primary RECEIVE function for a network interface.
+ * Note that, from the p.o.v. of /this/ OS it looks like a transmit.
+ */
+static void netif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ netif_t *netif = (netif_t *)dev->priv;
+ s8 status = BLKIF_RSP_OKAY;
+ u16 size;
+ mmu_update_t mmu[4];
+
+ memcpy(skb->mac.ethernet->h_dest, netif->vmac, ETH_ALEN);
+ if ( ntohs(skb->mac.ethernet->h_proto) == ETH_P_ARP )
+ memcpy(skb->nh.raw + 18, netif->vmac, ETH_ALEN);
+
+ spin_lock(&netif->rx_lock);
+
+ mmu[0].val = (unsigned long)(netif->domid<<16) & ~0xFFFFUL;
+ mmu[0].ptr = (unsigned long)(netif->domid<< 0) & ~0xFFFFUL;
+ mmu[1].val = (unsigned long)(netif->domid>>16) & ~0xFFFFUL;
+ mmu[1].ptr = (unsigned long)(netif->domid>>32) & ~0xFFFFUL;
+ mmu[0].ptr |= MMU_EXTENDED_COMMAND;
+ mmu[0].val |= MMUEXT_SET_SUBJECTDOM_L;
+ mmu[1].ptr |= MMU_EXTENDED_COMMAND;
+ mmu[1].val |= MMUEXT_SET_SUBJECTDOM_H;
+
+ mmu[2].ptr = ptr | MMU_EXTENDED_COMMAND;
+ mmu[2].val = MMUEXT_REASSIGN_PAGE;
+
+ mmu[3].ptr = ppte;
+ mmu[3].val = newpage;
+
+ if ( unlikely(HYPERVISOR_mmu_update(mmu, 4) < 0) )
+ {
+ status = BLKIF_RSP_ERROR;
+ goto out;
+ }
+
+ /* Record this so they can be billed. */
+ netif->total_packets_received++;
+ netif->total_bytes_received += size;
+
+ out:
+ make_rx_response(netif, rx->id, status, addr, size);
+ spin_unlock(&netif->rx_lock);
+ dev_kfree_skb(skb);
+}
+
+
+/*************************************************************
+ * NEW TRANSMIT SCHEDULER
+ *
+ * NB. We ought also to only send a limited number of bytes to the NIC
+ * for transmission at any one time (to avoid head-of-line blocking).
+ * However, driver rings are small enough that they provide a reasonable
+ * limit.
+ *
+ * eg. 3c905 has 16 descriptors == 8 packets, at 100Mbps
+ * e1000 has 256 descriptors == 128 packets, at 1000Mbps
+ * tg3 has 512 descriptors == 256 packets, at 1000Mbps
+ *
+ * So, worst case is tg3 with 256 1500-bytes packets == 375kB.
+ * This would take 3ms, and represents our worst-case HoL blocking cost.
+ *
+ * We think this is reasonable.
+ */
+
+struct list_head net_schedule_list;
+spinlock_t net_schedule_list_lock;
+
+static int __on_net_schedule_list(netif_t *netif)
+{
+ return netif->list.next != NULL;
+}
+
+static void remove_from_net_schedule_list(netif_t *netif)
+{
+ spin_lock(&net_schedule_list_lock);
+ ASSERT(__on_net_schedule_list(netif));
+ list_del(&netif->list);
+ netif->list.next = NULL;
+ netif_put(netif);
+ spin_unlock(&net_schedule_list_lock);
+}
+
+static void add_to_net_schedule_list_tail(netif_t *netif)
+{
+ if ( __on_net_schedule_list(netif) )
+ return;
+
+ spin_lock(&net_schedule_list_lock);
+ if ( likely(!__on_net_schedule_list(netif)) )
+ {
+ list_add_tail(&netif->list, &net_schedule_list);
+ netif_get(netif);
+ }
+ spin_unlock(&net_schedule_list_lock);
+}
+
+
+static void tx_skb_release(struct sk_buff *skb);
+
+static inline int init_tx_header(netif_t *netif, u8 *data,
+ unsigned int len, struct net_device *dev)
+{
+ int proto = ntohs(*(unsigned short *)(data + 12));
+
+ memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
+
+ switch ( proto )
+ {
+ case ETH_P_ARP:
+ if ( len < 42 ) break;
+ memcpy(data + 22, dev->dev_addr, ETH_ALEN);
+ break;
+ case ETH_P_IP:
+ break;
+ default:
+ /* Unsupported protocols are onyl allowed to/from NETIF0/0. */
+ if ( (netif->domain->domain != 0) || (netif->idx != 0) )
+ proto = 0;
+ break;
+ }
+ return proto;
+}
+
+
+static void tx_credit_callback(unsigned long data)
+{
+ netif_t *netif = (netif_t *)data;
+
+ netif->remaining_credit = netif->credit_bytes;
+
+ if ( tx_work_exists(netif) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+}
+
+static void net_tx_action(unsigned long unused)
+{
+ struct list_head *ent;
+ struct sk_buff *skb;
+ netif_t *netif;
+ netif_tx_request_t txreq;
+ u16 pending_idx;
+ pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED);
+
+ while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&net_schedule_list) )
+ {
+ /* Get a netif from the list with work to do. */
+ ent = net_schedule_list.next;
+ netif = list_entry(ent, netif_t, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
+ /* Work to do? */
+ i = netif->tx_req_cons;
+ if ( (i == shared_idxs->tx_req_prod) &&
+ ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
+ {
+ netif_put(netif);
+ continue;
+ }
+ memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
+ sizeof(txreq));
+ netif->tx_req_cons++;
+
+#if 0
+ /* Credit-based scheduling. */
+ if ( tx.size > netif->remaining_credit )
+ {
+ s_time_t now = NOW(), next_credit =
+ netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
+ if ( next_credit <= now )
+ {
+ netif->credit_timeout.expires = now;
+ netif->remaining_credit = netif->credit_bytes;
+ }
+ else
+ {
+ netif->remaining_credit = 0;
+ netif->credit_timeout.expires = next_credit;
+ netif->credit_timeout.data = (unsigned long)netif;
+ netif->credit_timeout.function = tx_credit_callback;
+ netif->credit_timeout.cpu = smp_processor_id();
+ add_ac_timer(&netif->credit_timeout);
+ break;
+ }
+ }
+ netif->remaining_credit -= tx.size;
+#endif
+
+ add_to_net_schedule_list_tail(netif);
+
+ if ( unlikely(txreq.size <= PKT_PROT_LEN) ||
+ unlikely(txreq.size > ETH_FRAME_LEN) )
+ {
+ DPRINTK("Bad packet size: %d\n", txreq.size);
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ /* No crossing a page boundary as the payload mustn't fragment. */
+ if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
+ {
+ DPRINTK("tx.addr: %lx, size: %u, end: %lu\n",
+ txreq.addr, txreq.size,
+ (txreq.addr &~PAGE_MASK) + txreq.size);
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+ if ( direct_remap_area_pages(&init_mm,
+ MMAP_VADDR(pending_idx),
+ txreq.addr & PAGE_MASK,
+ PAGE_SIZE, prot, netif->domid) != 0 )
+ {
+ DPRINTK("Bad page frame\n");
+ make_tx_response(netif, tx.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ if ( unlikely((skb = alloc_skb(PKT_PROT_LEN, GFP_ATOMIC)) == NULL) )
+ {
+ make_tx_response(netif, tx.id, BLKIF_RSP_ERROR);
+ netif_put(netif);
+ vmfree_area_pages(MMAP_VADDR(pending_idx), PAGE_SIZE);
+ break;
+ }
+
+ __skb_put(PKT_PROT_LEN);
+ memcpy(skb->data, src, PKT_PROT_LEN);
+ protocol = __constant_htons(
+ init_tx_header(netif, g_data, tx.size, the_dev));
+ if ( protocol == 0 )
+ {
+ make_tx_response(netif, tx.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ dev_kfree_skb(skb);
+ goto cleanup_and_continue;
+ }
+
+ skb->dev = netif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ /* Append the packet payload as a fragment. */
+ skb_shinfo(skb)->frags[0].page =
+ &mem_map[txreq.addr >> PAGE_SHIFT];
+ skb_shinfo(skb)->frags[0].size = txreq.size - PKT_PROT_LEN;
+ skb_shinfo(skb)->frags[0].page_offset =
+ (txreq.addr + PKT_PROT_LEN) & ~PAGE_MASK;
+ skb_shinfo(skb)->nr_frags = 1;
+ skb->data_len = tx->size - PKT_PROT_LEN;
+ skb->len += skb->data_len;
+
+ /* Destructor information. */
+ skb->destructor = tx_skb_release;
+ skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].page = (struct page *)netif;
+ skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].size = pending_idx;
+
+ /* Record the transmission so they can be billed. */
+ netif->total_packets_sent++;
+ netif->total_bytes_sent += tx->size;
+
+ pending_cons++;
+ netif_rx(skb);
+ netif->dev->last_rx = jiffies;
+ }
+}
+
+DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
+
+
+static inline void maybe_schedule_tx_action(void)
+{
+ smp_mb();
+ if ( !netif_queue_stopped(the_dev) &&
+ !list_empty(&net_schedule_list) )
+ tasklet_schedule(&net_tx_tasklet);
+}
+
+
+/* Destructor function for tx skbs. */
+static void tx_skb_release(struct sk_buff *skb)
+{
+ int i;
+ netif_t *netif = (netif_t)skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].page;
+ u16 pending_idx = skb_shinfo(skb)->frags[MAX_SKB_FRAGS-1].size;
+
+ vmfree_area_pages(MMAP_VADDR(pending_idx), PAGE_SIZE);
+
+ skb_shinfo(skb)->nr_frags = 0;
+
+ spin_lock(&netif->tx_lock);
+ make_tx_response(netif, skb->guest_id, NETIF_RSP_OKAY);
+ spin_unlock(&netif->tx_lock);
+
+ /*
+ * Checks below must happen after the above response is posted. This avoids
+ * a possible race with a guest OS on another CPU.
+ */
+ mb();
+
+ if ( tx_work_exists(netif) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+
+ netif_put(netif);
+}
+
+
+long flush_bufs_for_netif(netif_t *netif)
+{
+ NET_RING_IDX i;
+
+ /* Return any outstanding receive buffers to the guest OS. */
+ spin_lock(&netif->rx_lock);
+ for ( i = netif->rx_req_cons;
+ (i != netif->rx->req_prod) &&
+ ((i-netif->rx_resp_prod) != NETIF_RX_RING_SIZE);
+ i++ )
+ {
+ make_rx_response(netif,
+ netif->rx->ring[MASK_NETIF_RX_IDX(i)].req.id,
+ NETIF_RSP_DROPPED, 0, 0);
+ }
+ netif->rx_req_cons = i;
+ spin_unlock(&netif->rx_lock);
+
+ /*
+ * Flush pending transmit buffers. The guest may still have to wait for
+ * buffers that are queued at a physical NIC.
+ */
+ spin_lock(&netif->tx_lock);
+ for ( i = netif->tx_req_cons;
+ (i != netif->tx->req_prod) &&
+ ((i-netif->tx_resp_prod) != NETIF_TX_RING_SIZE);
+ i++ )
+ {
+ make_tx_response(netif,
+ netif->tx->ring[MASK_NETIF_TX_IDX(i)].req.id,
+ NETIF_RSP_DROPPED);
+ }
+ netif->tx_req_cons = i;
+ spin_unlock(&netif->tx_lock);
+
+ return 0;
+}
+
+void netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ netif_t *netif = dev_id;
+ if ( tx_work_exists(netif) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+}
+
+static void make_tx_response(netif_t *netif,
+ u16 id,
+ s8 st)
+{
+ NET_RING_IDX i = netif->tx_resp_prod;
+ netif_tx_response_t *resp;
+
+ resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
+ resp->id = id;
+ resp->status = st;
+ wmb();
+ netif->tx->resp_prod = netif->tx_resp_prod = ++i;
+
+ mb(); /* Update producer before checking event threshold. */
+ if ( i == netif->tx->event )
+ notify_via_evtchn(netif->evtchn);
+}
+
+
+static void make_rx_response(netif_t *netif,
+ u16 id,
+ s8 st,
+ netif_addr_t addr,
+ u16 size)
+{
+ NET_RING_IDX i = netif->rx_resp_prod;
+ netif_rx_response_t *resp;
+
+ resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+ resp->addr = addr;
+ resp->id = id;
+ resp->status = (s16)size;
+ if ( st < 0 )
+ resp->status = (s16)st;
+ wmb();
+ netif->rx->resp_prod = netif->rx_resp_prod = ++i;
+
+ mb(); /* Update producer before checking event threshold. */
+ if ( i == netif->rx->event )
+ notify_via_evtchn(netif->evtchn);
+}
+
static int __init init_module(void)
{
+ netif_interface_init();
+
+ if ( (mmap_vma = get_vm_area(MAX_PENDING_REQS * PAGE_SIZE,
+ VM_IOREMAP)) == NULL )
+ {
+ printk(KERN_WARNING "Could not allocate VMA for netif backend.\n");
+ return -ENOMEM;
+ }
+
+ netif_ctrlif_init();
+
return 0;
}
+
static void cleanup_module(void)
{
}
+
module_init(init_module);
module_exit(cleanup_module);
--- /dev/null
+/******************************************************************************
+ * netif.h
+ *
+ * Unified network-device I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __SHARED_NETIF_H__
+#define __SHARED_NETIF_H__
+
+typedef unsigned long netif_addr_t;
+
+typedef struct {
+ netif_addr_t addr; /* Machine address of packet. */
+ u16 id; /* Echoed in response message. */
+ u16 size; /* Packet size in bytes. */
+} netif_tx_request_t;
+
+typedef struct {
+ u16 id;
+ s8 status;
+} netif_tx_response_t;
+
+typedef struct {
+ u16 id; /* Echoed in response message. */
+} netif_rx_request_t;
+
+typedef struct {
+ netif_addr_t addr; /* Machine address of packet. */
+ u16 id;
+ s16 status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+} netif_rx_response_t;
+
+/*
+ * We use a special capitalised type name because it is _essential_ that all
+ * arithmetic on indexes is done on an integer type of the correct size.
+ */
+typedef unsigned int NETIF_RING_IDX;
+
+/*
+ * Ring indexes are 'free running'. That is, they are not stored modulo the
+ * size of the ring buffer. The following macros convert a free-running counter
+ * into a value that can directly index a ring-buffer array.
+ */
+#define MASK_NETIF_RX_IDX(_i) ((_i)&(NETIF_RX_RING_SIZE-1))
+#define MASK_NETIF_TX_IDX(_i) ((_i)&(NETIF_TX_RING_SIZE-1))
+
+#define NETIF_TX_RING_SIZE 256
+#define NETIF_RX_RING_SIZE 256
+
+/* This structure must fit in a memory page. */
+typedef struct {
+ union {
+ netif_tx_request_t req;
+ netif_tx_response_t resp;
+ } ring[NETIF_TX_RING_SIZE];
+ /*
+ * Frontend places packets into ring at tx_req_prod.
+ * Frontend receives event when tx_resp_prod passes tx_event.
+ */
+ NETIF_RING_IDX req_prod, resp_prod, event;
+} netif_tx_interface_t;
+
+/* This structure must fit in a memory page. */
+typedef struct {
+ union {
+ netif_rx_request_t req;
+ netif_rx_response_t resp;
+ } ring[NETIF_RX_RING_SIZE];
+ /*
+ * Frontend places empty buffers into ring at rx_req_prod.
+ * Frontend receives event when rx_resp_prod passes rx_event.
+ */
+ NETIF_RING_IDX req_prod, resp_prod, event;
+} netif_rx_interface_t;
+
+/* Descriptor status values */
+#define NETIF_RSP_DROPPED -2
+#define NETIF_RSP_ERROR -1
+#define NETIF_RSP_OKAY 0
+
+#endif
{
unsigned long end;
#define MAX_DIRECTMAP_MMU_QUEUE 130
- mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v;
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v, *w;
address &= ~PMD_MASK;
end = address + size;
if (address >= end)
BUG();
- reset_buffer:
/* If not I/O mapping then specify General-Purpose Subject Domain (GPS). */
- v = &u[0];
if ( domid != 0 )
{
- v[0].val = (unsigned long)(domid<<16) & ~0xFFFFUL;
- v[0].ptr = (unsigned long)(domid<< 0) & ~0xFFFFUL;
- v[1].val = (unsigned long)(domid>>16) & ~0xFFFFUL;
- v[1].ptr = (unsigned long)(domid>>32) & ~0xFFFFUL;
- v[0].ptr |= MMU_EXTENDED_COMMAND;
- v[0].val |= MMUEXT_SET_SUBJECTDOM_L;
- v[1].ptr |= MMU_EXTENDED_COMMAND;
- v[1].val |= MMUEXT_SET_SUBJECTDOM_H;
- v += 2;
+ u[0].val = (unsigned long)(domid<<16) & ~0xFFFFUL;
+ u[0].ptr = (unsigned long)(domid<< 0) & ~0xFFFFUL;
+ u[1].val = (unsigned long)(domid>>16) & ~0xFFFFUL;
+ u[1].ptr = (unsigned long)(domid>>32) & ~0xFFFFUL;
+ u[0].ptr |= MMU_EXTENDED_COMMAND;
+ u[0].val |= MMUEXT_SET_SUBJECTDOM_L;
+ u[1].ptr |= MMU_EXTENDED_COMMAND;
+ u[1].val |= MMUEXT_SET_SUBJECTDOM_H;
+ v = w = &u[2];
+ }
+ else
+ {
+ v = w = &u[0];
}
do {
+ if ( (v-u) == MAX_DIRECTMAP_MMU_QUEUE )
+ {
+ if ( HYPERVISOR_mmu_update(u, MAX_DIRECTMAP_MMU_QUEUE) < 0 )
+ return -EFAULT;
+ v = w;
+ }
#if 0 /* thanks to new ioctl mmaping interface this is no longer a bug */
if (!pte_none(*pte)) {
printk("direct_remap_area_pte: page already exists\n");
#endif
v->ptr = virt_to_machine(pte);
v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot) | _PAGE_IO;
- if ( ( ++v - u )== MAX_DIRECTMAP_MMU_QUEUE )
- {
- if ( HYPERVISOR_mmu_update(u, MAX_DIRECTMAP_MMU_QUEUE) < 0 )
- return -EFAULT;
- goto reset_buffer;
- }
+ v++;
address += PAGE_SIZE;
machine_addr += PAGE_SIZE;
pte++;
} while (address && (address < end));
- if ( ((v-u) > 2) && (HYPERVISOR_mmu_update(u, v-u) < 0) )
- {
- printk(KERN_WARNING "Failed to ioremap %08lx->%08lx (%08lx)\n",
- end-size, end, machine_addr-size);
- return -EINVAL;
- }
+ if ( ((v-w) != 0) && (HYPERVISOR_mmu_update(u, v-u) < 0) )
+ return -EFAULT;
return 0;
}
pte_t * pte = pte_alloc(mm, pmd, address);
if (!pte)
return -ENOMEM;
-
error = direct_remap_area_pte(pte, address, end - address,
address + machine_addr, prot, domid);
if ( error )
pgd_t * dir;
unsigned long end = address + size;
-/*printk("direct_remap_area_pages va=%08lx ma=%08lx size=%d\n",
- address, machine_addr, size);*/
-
machine_addr -= address;
dir = pgd_offset(mm, address);
flush_cache_all();