}
}
-static void netbk_tx_err(netif_t *netif, RING_IDX end)
+static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
{
RING_IDX cons = netif->tx.req_cons;
do {
- netif_tx_request_t *txp = RING_GET_REQUEST(&netif->tx, cons);
make_tx_response(netif, txp, NETIF_RSP_ERROR);
- } while (++cons < end);
+ if (++cons >= end)
+ break;
+ txp = RING_GET_REQUEST(&netif->tx, cons);
+ } while (1);
netif->tx.req_cons = cons;
netif_schedule_work(netif);
netif_put(netif);
{
netif_tx_request_t *first = txp;
RING_IDX cons = netif->tx.req_cons;
- int frags = 1;
+ int frags = 0;
while (txp->flags & NETTXF_more_data) {
if (frags >= work_to_do) {
skb_frag_t *frags = shinfo->frags;
netif_tx_request_t *txp;
unsigned long pending_idx = *((u16 *)skb->data);
- RING_IDX cons = netif->tx.req_cons + 1;
+ RING_IDX cons = netif->tx.req_cons;
int i, start;
/* Skip first skb fragment if it is on same page as header fragment. */
struct sk_buff *skb;
netif_t *netif;
netif_tx_request_t txreq;
+ struct netif_tx_extra txtra;
u16 pending_idx;
RING_IDX i;
gnttab_map_grant_ref_t *mop;
}
netif->remaining_credit -= txreq.size;
+ work_to_do--;
+ netif->tx.req_cons = ++i;
+
+ if (txreq.flags & NETTXF_extra_info) {
+ if (work_to_do-- <= 0) {
+ DPRINTK("Missing extra info\n");
+ netbk_tx_err(netif, &txreq, i);
+ continue;
+ }
+
+ memcpy(&txtra, RING_GET_REQUEST(&netif->tx, i),
+ sizeof(txtra));
+ netif->tx.req_cons = ++i;
+ }
+
ret = netbk_count_requests(netif, &txreq, work_to_do);
if (unlikely(ret < 0)) {
- netbk_tx_err(netif, i - ret);
+ netbk_tx_err(netif, &txreq, i - ret);
continue;
}
i += ret;
if (unlikely(ret > MAX_SKB_FRAGS + 1)) {
DPRINTK("Too many frags\n");
- netbk_tx_err(netif, i);
+ netbk_tx_err(netif, &txreq, i);
continue;
}
if (unlikely(txreq.size < ETH_HLEN)) {
DPRINTK("Bad packet size: %d\n", txreq.size);
- netbk_tx_err(netif, i);
+ netbk_tx_err(netif, &txreq, i);
continue;
}
DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset &~PAGE_MASK) + txreq.size);
- netbk_tx_err(netif, i);
+ netbk_tx_err(netif, &txreq, i);
continue;
}
pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
data_len = (txreq.size > PKT_PROT_LEN &&
- ret < MAX_SKB_FRAGS + 1) ?
+ ret < MAX_SKB_FRAGS) ?
PKT_PROT_LEN : txreq.size;
skb = alloc_skb(data_len+16, GFP_ATOMIC);
if (unlikely(skb == NULL)) {
DPRINTK("Can't allocate a skb in start_xmit.\n");
- netbk_tx_err(netif, i);
+ netbk_tx_err(netif, &txreq, i);
break;
}
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve(skb, 16);
+ if (txreq.flags & NETTXF_gso) {
+ skb_shinfo(skb)->gso_size = txtra.u.gso.size;
+ skb_shinfo(skb)->gso_segs = txtra.u.gso.segs;
+ skb_shinfo(skb)->gso_type = txtra.u.gso.type;
+ }
+
gnttab_set_map_op(mop, MMAP_VADDR(pending_idx),
GNTMAP_host_map | GNTMAP_readonly,
txreq.gref, netif->domid);
__skb_put(skb, data_len);
- skb_shinfo(skb)->nr_frags = ret - 1;
+ skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
skb_shinfo(skb)->frags[0].page =
resp->id = txp->id;
resp->status = st;
+ if (txp->flags & NETTXF_extra_info)
+ RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
+
netif->tx.rsp_prod_pvt = ++i;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
if (notify)
* the appropriate req_event or rsp_event field in the shared ring.
*/
+/*
+ * This is the 'wire' format for packets:
+ * Request 1: netif_tx_request -- NETTXF_* (any flags)
+ * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
+ * Request 3: netif_tx_request -- NETTXF_more_data
+ * Request 4: netif_tx_request -- NETTXF_more_data
+ * ...
+ * Request N: netif_tx_request -- 0
+ */
+
/* Protocol checksum field is blank in the packet (hardware offload)? */
#define _NETTXF_csum_blank (0)
#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
#define _NETTXF_data_validated (1)
#define NETTXF_data_validated (1U<<_NETTXF_data_validated)
-/* Packet continues in the request. */
+/* Packet continues in the next request descriptor. */
#define _NETTXF_more_data (2)
#define NETTXF_more_data (1U<<_NETTXF_more_data)
+/* Packet has GSO fields in the following descriptor (netif_tx_extra.u.gso). */
+#define _NETTXF_gso (3)
+#define NETTXF_gso (1U<<_NETTXF_gso)
+
+/* This descriptor is followed by an extra-info descriptor (netif_tx_extra). */
+#define NETTXF_extra_info (NETTXF_gso)
+
struct netif_tx_request {
grant_ref_t gref; /* Reference to buffer page */
uint16_t offset; /* Offset within buffer page */
};
typedef struct netif_tx_request netif_tx_request_t;
+/* This structure needs to fit within netif_tx_request for compatibility. */
+struct netif_tx_extra {
+ union {
+ /* NETTXF_gso: Generic Segmentation Offload. */
+ struct netif_tx_gso {
+ uint16_t size; /* GSO MSS. */
+ uint16_t segs; /* GSO segment count. */
+ uint16_t type; /* GSO type. */
+ } gso;
+ } u;
+};
+
struct netif_tx_response {
uint16_t id;
int16_t status; /* NETIF_RSP_* */
#define NETIF_RSP_DROPPED -2
#define NETIF_RSP_ERROR -1
#define NETIF_RSP_OKAY 0
+/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
+#define NETIF_RSP_NULL 1
#endif