* Maximum order of pages to be used for the shared ring between front and
* backend, 4KB page granularity is used.
*/
-unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER;
+unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
/*
if (!xen_domain())
return -ENODEV;
- if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
+ if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
- xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
- xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER;
+ xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
+ xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
}
rc = xen_blkif_interface_init();
static int connect_ring(struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
- unsigned int ring_ref[XENBUS_MAX_RING_PAGES];
+ unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
unsigned int evtchn, nr_grefs, ring_page_order;
unsigned int pers_grants;
char protocol[64] = "";
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
#define BLK_MAX_RING_SIZE \
- __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_PAGES)
+ __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
/*
* ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
int vdevice;
blkif_vdev_t handle;
enum blkif_state connected;
- int ring_ref[XENBUS_MAX_RING_PAGES];
+ int ring_ref[XENBUS_MAX_RING_GRANTS];
unsigned int nr_ring_pages;
struct blkif_front_ring ring;
unsigned int evtchn, irq;
struct blkif_sring *sring;
int err, i;
unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
- grant_ref_t gref[XENBUS_MAX_RING_PAGES];
+ grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
for (i = 0; i < info->nr_ring_pages; i++)
info->ring_ref[i] = GRANT_INVALID_REF;
if (!xen_domain())
return -ENODEV;
- if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
+ if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
- xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
+ xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
xen_blkif_max_ring_order = 0;
}
struct vm_struct *area;
} pv;
struct {
- struct page *pages[XENBUS_MAX_RING_PAGES];
+ struct page *pages[XENBUS_MAX_RING_GRANTS];
void *addr;
} hvm;
};
- grant_handle_t handles[XENBUS_MAX_RING_PAGES];
+ grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
unsigned int nr_handles;
};
unsigned int flags,
bool *leaked)
{
- struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES];
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
+ struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
int i, j;
int err = GNTST_okay;
- if (nr_grefs > XENBUS_MAX_RING_PAGES)
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
for (i = 0; i < nr_grefs; i++) {
{
struct xenbus_map_node *node;
struct vm_struct *area;
- pte_t *ptes[XENBUS_MAX_RING_PAGES];
- phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
+ pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
int err = GNTST_okay;
int i;
bool leaked;
*vaddr = NULL;
- if (nr_grefs > XENBUS_MAX_RING_PAGES)
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
void *addr;
bool leaked = false;
/* Why do we need two arrays? See comment of __xenbus_map_ring */
- phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
- unsigned long addrs[XENBUS_MAX_RING_PAGES];
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+ unsigned long addrs[XENBUS_MAX_RING_GRANTS];
- if (nr_grefs > XENBUS_MAX_RING_PAGES)
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
*vaddr = NULL;
unsigned int nr_grefs, grant_handle_t *handles,
unsigned long *vaddrs, bool *leaked)
{
- phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
int i;
- if (nr_grefs > XENBUS_MAX_RING_PAGES)
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
for (i = 0; i < nr_grefs; i++)
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
unsigned int level;
int i;
bool leaked = false;
int rv;
struct xenbus_map_node *node;
void *addr;
- unsigned long addrs[XENBUS_MAX_RING_PAGES];
+ unsigned long addrs[XENBUS_MAX_RING_GRANTS];
int i;
spin_lock(&xenbus_valloc_lock);
grant_handle_t *handles, unsigned int nr_handles,
unsigned long *vaddrs)
{
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
int i;
int err;
- if (nr_handles > XENBUS_MAX_RING_PAGES)
+ if (nr_handles > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
for (i = 0; i < nr_handles; i++)
#include <xen/interface/io/xenbus.h>
#include <xen/interface/io/xs_wire.h>
-#define XENBUS_MAX_RING_PAGE_ORDER 4
-#define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER)
+#define XENBUS_MAX_RING_GRANT_ORDER 4
+#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER)
#define INVALID_GRANT_HANDLE (~0U)
/* Register callback to watch this node. */