* mmap_alloc is initialised to 2 and should be adjustable on the fly via
* sysfs.
*/
-#define MAX_DYNAMIC_MEM 64
-#define MAX_PENDING_REQS 64
+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
+#define MAX_DYNAMIC_MEM BLK_RING_SIZE
+#define MAX_PENDING_REQS BLK_RING_SIZE
#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
#define MMAP_VADDR(_start, _req,_seg) \
(_start + \
grant_handle_t kernel;
grant_handle_t user;
};
+#define INVALID_GRANT_HANDLE 0xFFFF
static struct grant_handle_pair
pending_grant_handles[MAX_DYNAMIC_MEM][MMAP_PAGES];
#define BLKTAP_INVALID_HANDLE(_g) \
- (((_g->kernel) == 0xFFFF) && ((_g->user) == 0xFFFF))
+ (((_g->kernel) == INVALID_GRANT_HANDLE) && \
+ ((_g->user) == INVALID_GRANT_HANDLE))
#define BLKTAP_INVALIDATE_HANDLE(_g) do { \
- (_g)->kernel = 0xFFFF; (_g)->user = 0xFFFF; \
+ (_g)->kernel = INVALID_GRANT_HANDLE; (_g)->user = INVALID_GRANT_HANDLE; \
} while(0)
info->user_vstart = info->rings_vstart + (RING_PAGES << PAGE_SHIFT);
/* Map the ring pages to the start of the region and reserve it. */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
if (remap_pfn_range(vma, vma->vm_start,
__pa(info->ufe_ring.sring) >> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot)) {
khandle = &pending_handle(mmap_idx, k_idx, i);
- if (khandle->kernel != 0xFFFF) {
+ if (khandle->kernel != INVALID_GRANT_HANDLE) {
gnttab_set_unmap_op(&unmap[invcount],
idx_to_kaddr(mmap_idx, k_idx, i),
GNTMAP_host_map, khandle->kernel);
invcount++;
}
- if (khandle->user != 0xFFFF) {
+ if (khandle->user != INVALID_GRANT_HANDLE) {
if (create_lookup_pte_addr(
info->vma->vm_mm,
MMAP_VADDR(info->user_vstart, u_idx, i),
/* Check we have space on user ring - should never fail. */
usr_idx = GET_NEXT_REQ(info->idx_map);
- if (usr_idx == INVALID_REQ)
+ if (usr_idx == INVALID_REQ) {
+ BUG();
goto fail_response;
+ }
/* Check that number of segments is sane. */
nseg = req->nr_segments;
unsigned long uvaddr;
unsigned long kvaddr;
uint64_t ptep;
- struct page *page;
uint32_t flags;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
- page = virt_to_page(kvaddr);
- sector = req->sector_number + (8*i);
+ sector = req->sector_number + ((PAGE_SIZE / 512) * i);
if( (blkif->sectors > 0) && (sector >= blkif->sectors) ) {
WPRINTK("BLKTAP: Sector request greater"
"than size\n");
BLKIF_OP_WRITE ? "WRITE" : "READ"),
(long long unsigned) sector,
(long long unsigned) sector>>9,
- blkif->sectors);
+ (long long unsigned) blkif->sectors);
}
flags = GNTMAP_host_map;
WPRINTK("invalid kernel buffer -- "
"could not remap it\n");
ret |= 1;
- map[i].handle = 0xFFFF;
+ map[i].handle = INVALID_GRANT_HANDLE;
}
if (unlikely(map[i+1].status != 0)) {
WPRINTK("invalid user buffer -- "
"could not remap it\n");
ret |= 1;
- map[i+1].handle = 0xFFFF;
+ map[i+1].handle = INVALID_GRANT_HANDLE;
}
pending_handle(mmap_idx, pending_idx, i/2).kernel
#include <sys/types.h>
#include <unistd.h>
-#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, getpagesize())
+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, XC_PAGE_SIZE)
/* size of the extra VMA area to map in attached pages. */
#define BLKTAP_VMA_PAGES BLK_RING_SIZE
( arg == BLKTAP_MODE_INTERPOSE ) );
}
-#define MAX_REQUESTS 64
+#define MAX_REQUESTS BLK_RING_SIZE
#define BLKTAP_IOCTL_KICK 1
-#define MAX_PENDING_REQS 64
+#define MAX_PENDING_REQS BLK_RING_SIZE
#define BLKTAP_DEV_DIR "/dev/xen"
#define BLKTAP_DEV_NAME "blktap"
#define BLKTAP_DEV_MINOR 0
/* Abitrary values, must match the underlying driver... */
-#define MAX_PENDING_REQS 64
#define MAX_TAP_DEV 100
/* Accessing attached data page mappings */