static void print_stats(blkif_t *blkif)
{
- printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
current->comm, blkif->st_oo_req,
- blkif->st_rd_req, blkif->st_wr_req);
+ blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
blkif->st_rd_req = 0;
blkif->st_wr_req = 0;
* COMPLETION CALLBACK -- Called as bh->b_end_io()
*/
-static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
+static void __end_block_io_op(pending_req_t *pending_req, int error)
{
/* An error fails the entire request. */
- if (!uptodate) {
- DPRINTK("Buffer not up-to-date at end of operation\n");
+ if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
+ (error == -EOPNOTSUPP)) {
+ DPRINTK("blkback: write barrier op failed, not supported\n");
+ blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
+ pending_req->status = BLKIF_RSP_EOPNOTSUPP;
+ } else if (error) {
+ DPRINTK("Buffer not up-to-date at end of operation, "
+ "error=%d\n", error);
pending_req->status = BLKIF_RSP_ERROR;
}
{
if (bio->bi_size != 0)
return 1;
- __end_block_io_op(bio->bi_private, !error);
+ __end_block_io_op(bio->bi_private, error);
bio_put(bio);
return error;
}
blkif->st_rd_req++;
dispatch_rw_block_io(blkif, &req, pending_req);
break;
+ case BLKIF_OP_WRITE_BARRIER:
+ blkif->st_br_req++;
+ /* fall through */
case BLKIF_OP_WRITE:
blkif->st_wr_req++;
dispatch_rw_block_io(blkif, &req, pending_req);
pending_req_t *pending_req)
{
extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
- int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct phys_req preq;
struct {
unsigned int nseg;
struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int ret, i, nbio = 0;
+ int operation;
+
+ switch (req->operation) {
+ case BLKIF_OP_READ:
+ operation = READ;
+ break;
+ case BLKIF_OP_WRITE:
+ operation = WRITE;
+ break;
+ case BLKIF_OP_WRITE_BARRIER:
+ operation = WRITE_BARRIER;
+ break;
+ default:
+ operation = 0; /* make gcc happy */
+ BUG();
+ }
/* Check that number of segments is sane. */
nseg = req->nr_segments;
pending_req->blkif = blkif;
pending_req->id = req->id;
- pending_req->operation = operation;
+ pending_req->operation = req->operation;
pending_req->status = BLKIF_RSP_OKAY;
pending_req->nr_pages = nseg;
preq.nr_sects += seg[i].nsec;
flags = GNTMAP_host_map;
- if ( operation == WRITE )
+ if (operation != READ)
flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
req->seg[i].gref, blkif->domid);
#include <xen/interface/io/ring.h>
#include <xen/gnttab.h>
#include <xen/driver_util.h>
+#include <xen/xenbus.h>
#define DPRINTK(_f, _a...) \
pr_debug("(file=%s, line=%d) " _f, \
int st_rd_req;
int st_wr_req;
int st_oo_req;
+ int st_br_req;
wait_queue_head_t waiting_to_free;
irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
int blkif_schedule(void *arg);
+int blkback_barrier(struct xenbus_transaction xbt,
+ struct backend_info *be, int state);
+
#endif /* __BLKIF__BACKEND__COMMON_H__ */
*/
#include "common.h"
-#include <xen/xenbus.h>
#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
struct vbd *vbd = &blkif->vbd;
int rc = -EACCES;
- if ((operation == WRITE) && vbd->readonly)
+ if ((operation != READ) && vbd->readonly)
goto out;
if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
#include <stdarg.h>
#include <linux/module.h>
#include <linux/kthread.h>
-#include <xen/xenbus.h>
#include "common.h"
#undef DPRINTK
VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
+VBD_SHOW(br_req, "%d\n", be->blkif->st_br_req);
static struct attribute *vbdstat_attrs[] = {
&dev_attr_oo_req.attr,
&dev_attr_rd_req.attr,
&dev_attr_wr_req.attr,
+ &dev_attr_br_req.attr,
NULL
};
return 0;
}
+int blkback_barrier(struct xenbus_transaction xbt,
+ struct backend_info *be, int state)
+{
+ struct xenbus_device *dev = be->dev;
+ int err;
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
+ "%d", state);
+ if (err)
+ xenbus_dev_fatal(dev, err, "writing feature-barrier");
+
+ return err;
+}
/**
* Entry point to this code when a new device is created. Allocate the basic
/* Supply the information about the device the frontend needs */
again:
err = xenbus_transaction_start(&xbt);
-
if (err) {
xenbus_dev_fatal(dev, err, "starting transaction");
return;
}
+ err = blkback_barrier(xbt, be, 1);
+ if (err)
+ goto abort;
+
err = xenbus_printf(xbt, dev->nodename, "sectors", "%lu",
vbd_size(&be->blkif->vbd));
if (err) {
return;
}
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%lu", &info->feature_barrier,
+ NULL);
+ if (err)
+ info->feature_barrier = 0;
+
err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->shadow[id].request = (unsigned long)req;
ring_req->id = id;
- ring_req->operation = rq_data_dir(req) ?
- BLKIF_OP_WRITE : BLKIF_OP_READ;
ring_req->sector_number = (blkif_sector_t)req->sector;
ring_req->handle = info->handle;
+ ring_req->operation = rq_data_dir(req) ?
+ BLKIF_OP_WRITE : BLKIF_OP_READ;
+ if (blk_barrier_rq(req))
+ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+
ring_req->nr_segments = 0;
rq_for_each_bio (bio, req) {
bio_for_each_segment (bvec, bio, idx) {
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int uptodate;
spin_lock_irqsave(&blkif_io_lock, flags);
ADD_ID_TO_FREELIST(info, id);
+ uptodate = (bret->status == BLKIF_RSP_OKAY);
switch (bret->operation) {
+ case BLKIF_OP_WRITE_BARRIER:
+ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ printk("blkfront: %s: write barrier op failed\n",
+ info->gd->disk_name);
+ uptodate = -EOPNOTSUPP;
+ info->feature_barrier = 0;
+ xlvbd_barrier(info);
+ }
+ /* fall through */
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
if (unlikely(bret->status != BLKIF_RSP_OKAY))
DPRINTK("Bad return from blkdev data "
"request: %x\n", bret->status);
- ret = end_that_request_first(
- req, (bret->status == BLKIF_RSP_OKAY),
+ ret = end_that_request_first(req, uptodate,
req->hard_nr_sectors);
BUG_ON(ret);
- end_that_request_last(
- req, (bret->status == BLKIF_RSP_OKAY));
+ end_that_request_last(req, uptodate);
break;
default:
BUG();
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
unsigned long shadow_free;
+ int feature_barrier;
/**
* The number of people holding this device open. We won't allow a
int xlvbd_add(blkif_sector_t capacity, int device,
u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
void xlvbd_del(struct blkfront_info *info);
+int xlvbd_barrier(struct blkfront_info *info);
#endif /* __XEN_DRIVERS_BLOCK_H__ */
}
info->rq = gd->queue;
+ info->gd = gd;
+
+ if (info->feature_barrier)
+ xlvbd_barrier(info);
if (vdisk_info & VDISK_READONLY)
set_disk_ro(gd, 1);
if (vdisk_info & VDISK_CDROM)
gd->flags |= GENHD_FL_CD;
- info->gd = gd;
-
return 0;
out:
blk_cleanup_queue(info->rq);
info->rq = NULL;
}
+
+int
+xlvbd_barrier(struct blkfront_info *info)
+{
+ int err;
+
+ err = blk_queue_ordered(info->rq,
+ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL);
+ if (err)
+ return err;
+ printk("blkfront: %s: barriers %s\n",
+ info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled");
+ return 0;
+}