summaryrefslogtreecommitdiff
path: root/block/gluster.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/gluster.c')
-rw-r--r--block/gluster.c543
1 files changed, 319 insertions, 224 deletions
diff --git a/block/gluster.c b/block/gluster.c
index 645b7f12a..883608564 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -3,43 +3,26 @@
*
* Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
*
- * Pipe handling mechanism in AIO implementation is derived from
- * block/rbd.c. Hence,
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
*
- * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
- * Josh Durgin <josh.durgin@dreamhost.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
*/
#include <glusterfs/api/glfs.h>
#include "block/block_int.h"
-#include "qemu/sockets.h"
#include "qemu/uri.h"
typedef struct GlusterAIOCB {
- BlockDriverAIOCB common;
int64_t size;
int ret;
- bool *finished;
QEMUBH *bh;
+ Coroutine *coroutine;
} GlusterAIOCB;
typedef struct BDRVGlusterState {
struct glfs *glfs;
- int fds[2];
struct glfs_fd *fd;
- int qemu_aio_count;
- int event_reader_pos;
- GlusterAIOCB *event_acb;
} BDRVGlusterState;
-#define GLUSTER_FD_READ 0
-#define GLUSTER_FD_WRITE 1
-
typedef struct GlusterConf {
char *server;
int port;
@@ -50,11 +33,13 @@ typedef struct GlusterConf {
static void qemu_gluster_gconf_free(GlusterConf *gconf)
{
- g_free(gconf->server);
- g_free(gconf->volname);
- g_free(gconf->image);
- g_free(gconf->transport);
- g_free(gconf);
+ if (gconf) {
+ g_free(gconf->server);
+ g_free(gconf->volname);
+ g_free(gconf->image);
+ g_free(gconf->transport);
+ g_free(gconf);
+ }
}
static int parse_volume_options(GlusterConf *gconf, char *path)
@@ -95,7 +80,7 @@ static int parse_volume_options(GlusterConf *gconf, char *path)
* 'server' specifies the server where the volume file specification for
* the given volume resides. This can be either hostname, ipv4 address
* or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specifed.
+ * If transport type is 'unix', then 'server' field should not be specified.
* The 'socket' field needs to be populated with the path to unix domain
* socket.
*
@@ -132,7 +117,7 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
}
/* transport */
- if (!strcmp(uri->scheme, "gluster")) {
+ if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
gconf->transport = g_strdup("tcp");
} else if (!strcmp(uri->scheme, "gluster+tcp")) {
gconf->transport = g_strdup("tcp");
@@ -168,7 +153,7 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
}
gconf->server = g_strdup(qp->p[0].value);
} else {
- gconf->server = g_strdup(uri->server);
+ gconf->server = g_strdup(uri->server ? uri->server : "localhost");
gconf->port = uri->port;
}
@@ -180,7 +165,8 @@ out:
return ret;
}
-static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename)
+static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
+ Error **errp)
{
struct glfs *glfs = NULL;
int ret;
@@ -188,8 +174,8 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename)
ret = qemu_gluster_parseuri(gconf, filename);
if (ret < 0) {
- error_report("Usage: file=gluster[+transport]://[server[:port]]/"
- "volname/image[?socket=...]");
+ error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
+ "volname/image[?socket=...]");
errno = -ret;
goto out;
}
@@ -216,9 +202,11 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename)
ret = glfs_init(glfs);
if (ret) {
- error_report("Gluster connection failed for server=%s port=%d "
- "volume=%s image=%s transport=%s", gconf->server, gconf->port,
- gconf->volname, gconf->image, gconf->transport);
+ error_setg_errno(errp, errno,
+ "Gluster connection failed for server=%s port=%d "
+ "volume=%s image=%s transport=%s", gconf->server,
+ gconf->port, gconf->volname, gconf->image,
+ gconf->transport);
goto out;
}
return glfs;
@@ -232,54 +220,32 @@ out:
return NULL;
}
-static void qemu_gluster_complete_aio(GlusterAIOCB *acb, BDRVGlusterState *s)
+static void qemu_gluster_complete_aio(void *opaque)
{
- int ret;
- bool *finished = acb->finished;
- BlockDriverCompletionFunc *cb = acb->common.cb;
- void *opaque = acb->common.opaque;
-
- if (!acb->ret || acb->ret == acb->size) {
- ret = 0; /* Success */
- } else if (acb->ret < 0) {
- ret = acb->ret; /* Read/Write failed */
- } else {
- ret = -EIO; /* Partial read/write - fail it */
- }
+ GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
- s->qemu_aio_count--;
- qemu_aio_release(acb);
- cb(opaque, ret);
- if (finished) {
- *finished = true;
- }
+ qemu_bh_delete(acb->bh);
+ acb->bh = NULL;
+ qemu_coroutine_enter(acb->coroutine, NULL);
}
-static void qemu_gluster_aio_event_reader(void *opaque)
+/*
+ * AIO callback routine called from GlusterFS thread.
+ */
+static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
{
- BDRVGlusterState *s = opaque;
- ssize_t ret;
-
- do {
- char *p = (char *)&s->event_acb;
-
- ret = read(s->fds[GLUSTER_FD_READ], p + s->event_reader_pos,
- sizeof(s->event_acb) - s->event_reader_pos);
- if (ret > 0) {
- s->event_reader_pos += ret;
- if (s->event_reader_pos == sizeof(s->event_acb)) {
- s->event_reader_pos = 0;
- qemu_gluster_complete_aio(s->event_acb, s);
- }
- }
- } while (ret < 0 && errno == EINTR);
-}
+ GlusterAIOCB *acb = (GlusterAIOCB *)arg;
-static int qemu_gluster_aio_flush_cb(void *opaque)
-{
- BDRVGlusterState *s = opaque;
+ if (!ret || ret == acb->size) {
+ acb->ret = 0; /* Success */
+ } else if (ret < 0) {
+ acb->ret = ret; /* Read/Write failed */
+ } else {
+ acb->ret = -EIO; /* Partial read/write - fail it */
+ }
- return (s->qemu_aio_count > 0);
+ acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb);
+ qemu_bh_schedule(acb->bh);
}
/* TODO Convert to fine grained options */
@@ -296,60 +262,57 @@ static QemuOptsList runtime_opts = {
},
};
+static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
+{
+ assert(open_flags != NULL);
+
+ *open_flags |= O_BINARY;
+
+ if (bdrv_flags & BDRV_O_RDWR) {
+ *open_flags |= O_RDWR;
+ } else {
+ *open_flags |= O_RDONLY;
+ }
+
+ if ((bdrv_flags & BDRV_O_NOCACHE)) {
+ *open_flags |= O_DIRECT;
+ }
+}
+
static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
- int bdrv_flags)
+ int bdrv_flags, Error **errp)
{
BDRVGlusterState *s = bs->opaque;
- int open_flags = O_BINARY;
+ int open_flags = 0;
int ret = 0;
GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
QemuOpts *opts;
Error *local_err = NULL;
const char *filename;
- opts = qemu_opts_create_nofail(&runtime_opts);
+ opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
- if (error_is_set(&local_err)) {
- qerror_report_err(local_err);
- error_free(local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
ret = -EINVAL;
goto out;
}
filename = qemu_opt_get(opts, "filename");
-
- s->glfs = qemu_gluster_init(gconf, filename);
+ s->glfs = qemu_gluster_init(gconf, filename, errp);
if (!s->glfs) {
ret = -errno;
goto out;
}
- if (bdrv_flags & BDRV_O_RDWR) {
- open_flags |= O_RDWR;
- } else {
- open_flags |= O_RDONLY;
- }
-
- if ((bdrv_flags & BDRV_O_NOCACHE)) {
- open_flags |= O_DIRECT;
- }
+ qemu_gluster_parse_flags(bdrv_flags, &open_flags);
s->fd = glfs_open(s->glfs, gconf->image, open_flags);
if (!s->fd) {
ret = -errno;
- goto out;
}
- ret = qemu_pipe(s->fds);
- if (ret < 0) {
- ret = -errno;
- goto out;
- }
- fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK);
- qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
- qemu_gluster_aio_event_reader, NULL, qemu_gluster_aio_flush_cb, s);
-
out:
qemu_opts_del(opts);
qemu_gluster_gconf_free(gconf);
@@ -365,24 +328,180 @@ out:
return ret;
}
+typedef struct BDRVGlusterReopenState {
+ struct glfs *glfs;
+ struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
+
+static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
+ BlockReopenQueue *queue, Error **errp)
+{
+ int ret = 0;
+ BDRVGlusterReopenState *reop_s;
+ GlusterConf *gconf = NULL;
+ int open_flags = 0;
+
+ assert(state != NULL);
+ assert(state->bs != NULL);
+
+ state->opaque = g_malloc0(sizeof(BDRVGlusterReopenState));
+ reop_s = state->opaque;
+
+ qemu_gluster_parse_flags(state->flags, &open_flags);
+
+ gconf = g_malloc0(sizeof(GlusterConf));
+
+ reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
+ if (reop_s->glfs == NULL) {
+ ret = -errno;
+ goto exit;
+ }
+
+ reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
+ if (reop_s->fd == NULL) {
+ /* reops->glfs will be cleaned up in _abort */
+ ret = -errno;
+ goto exit;
+ }
+
+exit:
+ /* state->opaque will be freed in either the _abort or _commit */
+ qemu_gluster_gconf_free(gconf);
+ return ret;
+}
+
+static void qemu_gluster_reopen_commit(BDRVReopenState *state)
+{
+ BDRVGlusterReopenState *reop_s = state->opaque;
+ BDRVGlusterState *s = state->bs->opaque;
+
+
+ /* close the old */
+ if (s->fd) {
+ glfs_close(s->fd);
+ }
+ if (s->glfs) {
+ glfs_fini(s->glfs);
+ }
+
+ /* use the newly opened image / connection */
+ s->fd = reop_s->fd;
+ s->glfs = reop_s->glfs;
+
+ g_free(state->opaque);
+ state->opaque = NULL;
+
+ return;
+}
+
+
+static void qemu_gluster_reopen_abort(BDRVReopenState *state)
+{
+ BDRVGlusterReopenState *reop_s = state->opaque;
+
+ if (reop_s == NULL) {
+ return;
+ }
+
+ if (reop_s->fd) {
+ glfs_close(reop_s->fd);
+ }
+
+ if (reop_s->glfs) {
+ glfs_fini(reop_s->glfs);
+ }
+
+ g_free(state->opaque);
+ state->opaque = NULL;
+
+ return;
+}
+
+#ifdef CONFIG_GLUSTERFS_ZEROFILL
+static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
+{
+ int ret;
+ GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
+ BDRVGlusterState *s = bs->opaque;
+ off_t size = nb_sectors * BDRV_SECTOR_SIZE;
+ off_t offset = sector_num * BDRV_SECTOR_SIZE;
+
+ acb->size = size;
+ acb->ret = 0;
+ acb->coroutine = qemu_coroutine_self();
+
+ ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
+ if (ret < 0) {
+ ret = -errno;
+ goto out;
+ }
+
+ qemu_coroutine_yield();
+ ret = acb->ret;
+
+out:
+ g_slice_free(GlusterAIOCB, acb);
+ return ret;
+}
+
+static inline bool gluster_supports_zerofill(void)
+{
+ return 1;
+}
+
+static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
+ int64_t size)
+{
+ return glfs_zerofill(fd, offset, size);
+}
+
+#else
+static inline bool gluster_supports_zerofill(void)
+{
+ return 0;
+}
+
+static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
+ int64_t size)
+{
+ return 0;
+}
+#endif
+
static int qemu_gluster_create(const char *filename,
- QEMUOptionParameter *options)
+ QEMUOptionParameter *options, Error **errp)
{
struct glfs *glfs;
struct glfs_fd *fd;
int ret = 0;
+ int prealloc = 0;
int64_t total_size = 0;
GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
- glfs = qemu_gluster_init(gconf, filename);
+ glfs = qemu_gluster_init(gconf, filename, errp);
if (!glfs) {
- ret = -errno;
+ ret = -EINVAL;
goto out;
}
while (options && options->name) {
if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
total_size = options->value.n / BDRV_SECTOR_SIZE;
+ } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) {
+ if (!options->value.s || !strcmp(options->value.s, "off")) {
+ prealloc = 0;
+ } else if (!strcmp(options->value.s, "full") &&
+ gluster_supports_zerofill()) {
+ prealloc = 1;
+ } else {
+ error_setg(errp, "Invalid preallocation mode: '%s'"
+ " or GlusterFS doesn't support zerofill API",
+ options->value.s);
+ ret = -EINVAL;
+ goto out;
+ }
}
options++;
}
@@ -392,9 +511,15 @@ static int qemu_gluster_create(const char *filename,
if (!fd) {
ret = -errno;
} else {
- if (glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE) != 0) {
+ if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) {
+ if (prealloc && qemu_gluster_zerofill(fd, 0,
+ total_size * BDRV_SECTOR_SIZE)) {
+ ret = -errno;
+ }
+ } else {
ret = -errno;
}
+
if (glfs_close(fd) != 0) {
ret = -errno;
}
@@ -407,72 +532,18 @@ out:
return ret;
}
-static void qemu_gluster_aio_cancel(BlockDriverAIOCB *blockacb)
-{
- GlusterAIOCB *acb = (GlusterAIOCB *)blockacb;
- bool finished = false;
-
- acb->finished = &finished;
- while (!finished) {
- qemu_aio_wait();
- }
-}
-
-static const AIOCBInfo gluster_aiocb_info = {
- .aiocb_size = sizeof(GlusterAIOCB),
- .cancel = qemu_gluster_aio_cancel,
-};
-
-static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
-{
- GlusterAIOCB *acb = (GlusterAIOCB *)arg;
- BlockDriverState *bs = acb->common.bs;
- BDRVGlusterState *s = bs->opaque;
- int retval;
-
- acb->ret = ret;
- retval = qemu_write_full(s->fds[GLUSTER_FD_WRITE], &acb, sizeof(acb));
- if (retval != sizeof(acb)) {
- /*
- * Gluster AIO callback thread failed to notify the waiting
- * QEMU thread about IO completion.
- *
- * Complete this IO request and make the disk inaccessible for
- * subsequent reads and writes.
- */
- error_report("Gluster failed to notify QEMU about IO completion");
-
- qemu_mutex_lock_iothread(); /* We are in gluster thread context */
- acb->common.cb(acb->common.opaque, -EIO);
- qemu_aio_release(acb);
- s->qemu_aio_count--;
- close(s->fds[GLUSTER_FD_READ]);
- close(s->fds[GLUSTER_FD_WRITE]);
- qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL,
- NULL);
- bs->drv = NULL; /* Make the disk inaccessible */
- qemu_mutex_unlock_iothread();
- }
-}
-
-static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque, int write)
+static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
{
int ret;
- GlusterAIOCB *acb;
+ GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
BDRVGlusterState *s = bs->opaque;
- size_t size;
- off_t offset;
-
- offset = sector_num * BDRV_SECTOR_SIZE;
- size = nb_sectors * BDRV_SECTOR_SIZE;
- s->qemu_aio_count++;
+ size_t size = nb_sectors * BDRV_SECTOR_SIZE;
+ off_t offset = sector_num * BDRV_SECTOR_SIZE;
- acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = size;
acb->ret = 0;
- acb->finished = NULL;
+ acb->coroutine = qemu_coroutine_self();
if (write) {
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
@@ -483,14 +554,16 @@ static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
}
if (ret < 0) {
+ ret = -errno;
goto out;
}
- return &acb->common;
+
+ qemu_coroutine_yield();
+ ret = acb->ret;
out:
- s->qemu_aio_count--;
- qemu_aio_release(acb);
- return NULL;
+ g_slice_free(GlusterAIOCB, acb);
+ return ret;
}
static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
@@ -506,75 +579,68 @@ static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
return 0;
}
-static BlockDriverAIOCB *qemu_gluster_aio_readv(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
{
- return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
+ return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
}
-static BlockDriverAIOCB *qemu_gluster_aio_writev(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
{
- return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
+ return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
}
-static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs,
- BlockDriverCompletionFunc *cb, void *opaque)
+static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
{
int ret;
- GlusterAIOCB *acb;
+ GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
BDRVGlusterState *s = bs->opaque;
- acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = 0;
acb->ret = 0;
- acb->finished = NULL;
- s->qemu_aio_count++;
+ acb->coroutine = qemu_coroutine_self();
ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
if (ret < 0) {
+ ret = -errno;
goto out;
}
- return &acb->common;
+
+ qemu_coroutine_yield();
+ ret = acb->ret;
out:
- s->qemu_aio_count--;
- qemu_aio_release(acb);
- return NULL;
+ g_slice_free(GlusterAIOCB, acb);
+ return ret;
}
#ifdef CONFIG_GLUSTERFS_DISCARD
-static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, BlockDriverCompletionFunc *cb,
- void *opaque)
+static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors)
{
int ret;
- GlusterAIOCB *acb;
+ GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
BDRVGlusterState *s = bs->opaque;
- size_t size;
- off_t offset;
+ size_t size = nb_sectors * BDRV_SECTOR_SIZE;
+ off_t offset = sector_num * BDRV_SECTOR_SIZE;
- offset = sector_num * BDRV_SECTOR_SIZE;
- size = nb_sectors * BDRV_SECTOR_SIZE;
-
- acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = 0;
acb->ret = 0;
- acb->finished = NULL;
- s->qemu_aio_count++;
+ acb->coroutine = qemu_coroutine_self();
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
if (ret < 0) {
+ ret = -errno;
goto out;
}
- return &acb->common;
+
+ qemu_coroutine_yield();
+ ret = acb->ret;
out:
- s->qemu_aio_count--;
- qemu_aio_release(acb);
- return NULL;
+ g_slice_free(GlusterAIOCB, acb);
+ return ret;
}
#endif
@@ -609,10 +675,6 @@ static void qemu_gluster_close(BlockDriverState *bs)
{
BDRVGlusterState *s = bs->opaque;
- close(s->fds[GLUSTER_FD_READ]);
- close(s->fds[GLUSTER_FD_WRITE]);
- qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, NULL);
-
if (s->fd) {
glfs_close(s->fd);
s->fd = NULL;
@@ -632,6 +694,11 @@ static QEMUOptionParameter qemu_gluster_create_options[] = {
.type = OPT_SIZE,
.help = "Virtual disk size"
},
+ {
+ .name = BLOCK_OPT_PREALLOC,
+ .type = OPT_STRING,
+ .help = "Preallocation mode (allowed values: off, full)"
+ },
{ NULL }
};
@@ -639,18 +706,25 @@ static BlockDriver bdrv_gluster = {
.format_name = "gluster",
.protocol_name = "gluster",
.instance_size = sizeof(BDRVGlusterState),
+ .bdrv_needs_filename = true,
.bdrv_file_open = qemu_gluster_open,
+ .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
+ .bdrv_reopen_commit = qemu_gluster_reopen_commit,
+ .bdrv_reopen_abort = qemu_gluster_reopen_abort,
.bdrv_close = qemu_gluster_close,
.bdrv_create = qemu_gluster_create,
.bdrv_getlength = qemu_gluster_getlength,
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
.bdrv_truncate = qemu_gluster_truncate,
- .bdrv_aio_readv = qemu_gluster_aio_readv,
- .bdrv_aio_writev = qemu_gluster_aio_writev,
- .bdrv_aio_flush = qemu_gluster_aio_flush,
+ .bdrv_co_readv = qemu_gluster_co_readv,
+ .bdrv_co_writev = qemu_gluster_co_writev,
+ .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
#ifdef CONFIG_GLUSTERFS_DISCARD
- .bdrv_aio_discard = qemu_gluster_aio_discard,
+ .bdrv_co_discard = qemu_gluster_co_discard,
+#endif
+#ifdef CONFIG_GLUSTERFS_ZEROFILL
+ .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
.create_options = qemu_gluster_create_options,
};
@@ -659,18 +733,25 @@ static BlockDriver bdrv_gluster_tcp = {
.format_name = "gluster",
.protocol_name = "gluster+tcp",
.instance_size = sizeof(BDRVGlusterState),
+ .bdrv_needs_filename = true,
.bdrv_file_open = qemu_gluster_open,
+ .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
+ .bdrv_reopen_commit = qemu_gluster_reopen_commit,
+ .bdrv_reopen_abort = qemu_gluster_reopen_abort,
.bdrv_close = qemu_gluster_close,
.bdrv_create = qemu_gluster_create,
.bdrv_getlength = qemu_gluster_getlength,
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
.bdrv_truncate = qemu_gluster_truncate,
- .bdrv_aio_readv = qemu_gluster_aio_readv,
- .bdrv_aio_writev = qemu_gluster_aio_writev,
- .bdrv_aio_flush = qemu_gluster_aio_flush,
+ .bdrv_co_readv = qemu_gluster_co_readv,
+ .bdrv_co_writev = qemu_gluster_co_writev,
+ .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
#ifdef CONFIG_GLUSTERFS_DISCARD
- .bdrv_aio_discard = qemu_gluster_aio_discard,
+ .bdrv_co_discard = qemu_gluster_co_discard,
+#endif
+#ifdef CONFIG_GLUSTERFS_ZEROFILL
+ .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
.create_options = qemu_gluster_create_options,
};
@@ -679,18 +760,25 @@ static BlockDriver bdrv_gluster_unix = {
.format_name = "gluster",
.protocol_name = "gluster+unix",
.instance_size = sizeof(BDRVGlusterState),
+ .bdrv_needs_filename = true,
.bdrv_file_open = qemu_gluster_open,
+ .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
+ .bdrv_reopen_commit = qemu_gluster_reopen_commit,
+ .bdrv_reopen_abort = qemu_gluster_reopen_abort,
.bdrv_close = qemu_gluster_close,
.bdrv_create = qemu_gluster_create,
.bdrv_getlength = qemu_gluster_getlength,
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
.bdrv_truncate = qemu_gluster_truncate,
- .bdrv_aio_readv = qemu_gluster_aio_readv,
- .bdrv_aio_writev = qemu_gluster_aio_writev,
- .bdrv_aio_flush = qemu_gluster_aio_flush,
+ .bdrv_co_readv = qemu_gluster_co_readv,
+ .bdrv_co_writev = qemu_gluster_co_writev,
+ .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
#ifdef CONFIG_GLUSTERFS_DISCARD
- .bdrv_aio_discard = qemu_gluster_aio_discard,
+ .bdrv_co_discard = qemu_gluster_co_discard,
+#endif
+#ifdef CONFIG_GLUSTERFS_ZEROFILL
+ .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
.create_options = qemu_gluster_create_options,
};
@@ -699,18 +787,25 @@ static BlockDriver bdrv_gluster_rdma = {
.format_name = "gluster",
.protocol_name = "gluster+rdma",
.instance_size = sizeof(BDRVGlusterState),
+ .bdrv_needs_filename = true,
.bdrv_file_open = qemu_gluster_open,
+ .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
+ .bdrv_reopen_commit = qemu_gluster_reopen_commit,
+ .bdrv_reopen_abort = qemu_gluster_reopen_abort,
.bdrv_close = qemu_gluster_close,
.bdrv_create = qemu_gluster_create,
.bdrv_getlength = qemu_gluster_getlength,
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
.bdrv_truncate = qemu_gluster_truncate,
- .bdrv_aio_readv = qemu_gluster_aio_readv,
- .bdrv_aio_writev = qemu_gluster_aio_writev,
- .bdrv_aio_flush = qemu_gluster_aio_flush,
+ .bdrv_co_readv = qemu_gluster_co_readv,
+ .bdrv_co_writev = qemu_gluster_co_writev,
+ .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
#ifdef CONFIG_GLUSTERFS_DISCARD
- .bdrv_aio_discard = qemu_gluster_aio_discard,
+ .bdrv_co_discard = qemu_gluster_co_discard,
+#endif
+#ifdef CONFIG_GLUSTERFS_ZEROFILL
+ .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
#endif
.create_options = qemu_gluster_create_options,
};