diff options
Diffstat (limited to 'include/block')
-rw-r--r-- | include/block/aio.h | 30 | ||||
-rw-r--r-- | include/block/block.h | 59 | ||||
-rw-r--r-- | include/block/block_backup.h | 39 | ||||
-rw-r--r-- | include/block/block_int.h | 93 | ||||
-rw-r--r-- | include/block/blockjob.h | 264 | ||||
-rw-r--r-- | include/block/blockjob_int.h | 250 | ||||
-rw-r--r-- | include/block/dirty-bitmap.h | 35 | ||||
-rw-r--r-- | include/block/nbd.h | 76 |
8 files changed, 552 insertions, 294 deletions
diff --git a/include/block/aio.h b/include/block/aio.h index 173c1ed404..c7ae27c91c 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -18,7 +18,6 @@ #include "qemu/queue.h" #include "qemu/event_notifier.h" #include "qemu/thread.h" -#include "qemu/rfifolock.h" #include "qemu/timer.h" typedef struct BlockAIOCB BlockAIOCB; @@ -54,7 +53,7 @@ struct AioContext { GSource source; /* Protects all fields from multi-threaded access */ - RFifoLock lock; + QemuRecMutex lock; /* The list of registered AIO handlers */ QLIST_HEAD(, AioHandler) aio_handlers; @@ -116,9 +115,6 @@ struct AioContext { bool notified; EventNotifier notifier; - /* Scheduling this BH forces the event loop it iterate */ - QEMUBH *notify_dummy_bh; - /* Thread pool for performing work and receiving completion callbacks */ struct ThreadPool *thread_pool; @@ -181,6 +177,12 @@ void aio_context_acquire(AioContext *ctx); void aio_context_release(AioContext *ctx); /** + * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run + * only once and as soon as possible. + */ +void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); + +/** * aio_bh_new: Allocate a new bottom half structure. * * Bottom halves are lightweight callbacks whose invocation is guaranteed @@ -447,6 +449,24 @@ static inline bool aio_node_check(AioContext *ctx, bool is_external) } /** + * Return the AioContext whose event loop runs in the current thread. + * + * If called from an IOThread this will be the IOThread's AioContext. If + * called from another thread it will be the main loop AioContext. + */ +AioContext *qemu_get_current_aio_context(void); + +/** + * @ctx: the aio context + * + * Return whether we are running in the I/O thread that manages @ctx. + */ +static inline bool aio_context_in_iothread(AioContext *ctx) +{ + return ctx == qemu_get_current_aio_context(); +} + +/** * aio_context_setup: * @ctx: the aio context * diff --git a/include/block/block.h b/include/block/block.h index 11c162d594..49bb0b239a 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -7,16 +7,15 @@ #include "qemu/coroutine.h" #include "block/accounting.h" #include "block/dirty-bitmap.h" +#include "block/blockjob.h" #include "qapi/qmp/qobject.h" #include "qapi-types.h" #include "qemu/hbitmap.h" /* block.c */ typedef struct BlockDriver BlockDriver; -typedef struct BlockJob BlockJob; typedef struct BdrvChild BdrvChild; typedef struct BdrvChildRole BdrvChildRole; -typedef struct BlockJobTxn BlockJobTxn; typedef struct BlockDriverInfo { /* in bytes, 0 if irrelevant */ @@ -65,9 +64,10 @@ typedef enum { BDRV_REQ_MAY_UNMAP = 0x4, BDRV_REQ_NO_SERIALISING = 0x8, BDRV_REQ_FUA = 0x10, + BDRV_REQ_WRITE_COMPRESSED = 0x20, /* Mask of valid flags */ - BDRV_REQ_MASK = 0x1f, + BDRV_REQ_MASK = 0x3f, } BdrvRequestFlags; typedef struct BlockSizes { @@ -106,6 +106,8 @@ typedef struct HDGeometry { #define BDRV_OPT_CACHE_WB "cache.writeback" #define BDRV_OPT_CACHE_DIRECT "cache.direct" #define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush" +#define BDRV_OPT_READ_ONLY "read-only" +#define BDRV_OPT_DISCARD "discard" #define BDRV_SECTOR_BITS 9 @@ -184,11 +186,6 @@ typedef enum BlockOpType { BLOCK_OP_TYPE_MAX, } BlockOpType; -void bdrv_info_print(Monitor *mon, const QObject *data); -void bdrv_info(Monitor *mon, QObject **ret_data); -void bdrv_stats_print(Monitor *mon, const QObject *data); -void bdrv_info_stats(Monitor *mon, QObject **ret_data); - /* disk I/O throttling */ void bdrv_init(void); void bdrv_init_with_whitelist(void); @@ -220,7 +217,7 @@ BlockDriverState *bdrv_open(const char *filename, const char *reference, BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, BlockDriverState *bs, QDict *options, int flags); -int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); +int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp); int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp); int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, Error **errp); @@ -316,17 +313,11 @@ BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num, BlockCompletionFunc *cb, void *opaque); BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque); -BlockAIOCB *bdrv_aio_pdiscard(BlockDriverState *bs, - int64_t offset, int count, - BlockCompletionFunc *cb, void *opaque); void bdrv_aio_cancel(BlockAIOCB *acb); void bdrv_aio_cancel_async(BlockAIOCB *acb); /* sg packet commands */ -int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf); -BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, - unsigned long int req, void *buf, - BlockCompletionFunc *cb, void *opaque); +int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf); /* Invalidate any cached metadata used by image formats */ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp); @@ -336,11 +327,43 @@ int bdrv_inactivate_all(void); /* Ensure contents are flushed to disk. */ int bdrv_flush(BlockDriverState *bs); int coroutine_fn bdrv_co_flush(BlockDriverState *bs); +int bdrv_flush_all(void); void bdrv_close_all(void); void bdrv_drain(BlockDriverState *bs); void coroutine_fn bdrv_co_drain(BlockDriverState *bs); +void bdrv_drain_all_begin(void); +void bdrv_drain_all_end(void); void bdrv_drain_all(void); +#define BDRV_POLL_WHILE(bs, cond) ({ \ + bool waited_ = false; \ + BlockDriverState *bs_ = (bs); \ + AioContext *ctx_ = bdrv_get_aio_context(bs_); \ + if (aio_context_in_iothread(ctx_)) { \ + while ((cond)) { \ + aio_poll(ctx_, true); \ + waited_ = true; \ + } \ + } else { \ + assert(qemu_get_current_aio_context() == \ + qemu_get_aio_context()); \ + /* Ask bdrv_dec_in_flight to wake up the main \ + * QEMU AioContext. Extra I/O threads never take \ + * other I/O threads' AioContexts (see for example \ + * block_job_defer_to_main_loop for how to do it). \ + */ \ + assert(!bs_->wakeup); \ + bs_->wakeup = true; \ + while ((cond)) { \ + aio_context_release(ctx_); \ + aio_poll(qemu_get_aio_context(), true); \ + aio_context_acquire(ctx_); \ + waited_ = true; \ + } \ + bs_->wakeup = false; \ + } \ + waited_; }) + int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count); int bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, int count); int bdrv_has_zero_init_1(BlockDriverState *bs); @@ -392,15 +415,12 @@ bool bdrv_is_encrypted(BlockDriverState *bs); bool bdrv_key_required(BlockDriverState *bs); int bdrv_set_key(BlockDriverState *bs, const char *key); void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp); -int bdrv_query_missing_keys(void); void bdrv_iterate_format(void (*it)(void *opaque, const char *name), void *opaque); const char *bdrv_get_node_name(const BlockDriverState *bs); const char *bdrv_get_device_name(const BlockDriverState *bs); const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); int bdrv_get_flags(BlockDriverState *bs); -int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, - const uint8_t *buf, int nb_sectors); int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs); void bdrv_round_sectors_to_clusters(BlockDriverState *bs, @@ -421,7 +441,6 @@ void bdrv_get_full_backing_filename_from_filename(const char *backed, const char *backing, char *dest, size_t sz, Error **errp); -int bdrv_is_snapshot(BlockDriverState *bs); int path_has_protocol(const char *path); int path_is_absolute(const char *path); diff --git a/include/block/block_backup.h b/include/block/block_backup.h new file mode 100644 index 0000000000..8a759477a3 --- /dev/null +++ b/include/block/block_backup.h @@ -0,0 +1,39 @@ +/* + * QEMU backup + * + * Copyright (c) 2013 Proxmox Server Solutions + * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD. + * Copyright (c) 2016 Intel Corporation + * Copyright (c) 2016 FUJITSU LIMITED + * + * Authors: + * Dietmar Maurer <dietmar@proxmox.com> + * Changlong Xie <xiecl.fnst@cn.fujitsu.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef BLOCK_BACKUP_H +#define BLOCK_BACKUP_H + +#include "block/block_int.h" + +typedef struct CowRequest { + int64_t start; + int64_t end; + QLIST_ENTRY(CowRequest) list; + CoQueue wait_queue; /* coroutines blocked on this request */ +} CowRequest; + +void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num, + int nb_sectors); +void backup_cow_request_begin(CowRequest *req, BlockJob *job, + int64_t sector_num, + int nb_sectors); +void backup_cow_request_end(CowRequest *req); + +void backup_do_checkpoint(BlockJob *job, Error **errp); + +#endif diff --git a/include/block/block_int.h b/include/block/block_int.h index 1e939de4fe..83a423c580 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -62,8 +62,6 @@ enum BdrvTrackedRequestType { BDRV_TRACKED_READ, BDRV_TRACKED_WRITE, - BDRV_TRACKED_FLUSH, - BDRV_TRACKED_IOCTL, BDRV_TRACKED_DISCARD, }; @@ -204,8 +202,8 @@ struct BlockDriver { bool has_variable_length; int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs); - int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num, - const uint8_t *buf, int nb_sectors); + int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs, + uint64_t offset, uint64_t bytes, QEMUIOVector *qiov); int (*bdrv_snapshot_create)(BlockDriverState *bs, QEMUSnapshotInfo *sn_info); @@ -244,6 +242,8 @@ struct BlockDriver { BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque); + int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs, + unsigned long int req, void *buf); /* List of options for creating images, terminated by name == NULL */ QemuOptsList *create_opts; @@ -443,7 +443,7 @@ struct BlockDriverState { note this is a reference count */ CoQueue flush_queue; /* Serializing flush queue */ - BdrvTrackedRequest *active_flush_req; /* Flush request in flight */ + bool active_flush_req; /* Flush request in flight? */ unsigned int write_gen; /* Current data generation */ unsigned int flushed_gen; /* Flushed write generation */ @@ -471,9 +471,12 @@ struct BlockDriverState { /* Callback before write request is processed */ NotifierWithReturnList before_write_notifiers; - /* number of in-flight serialising requests */ + /* number of in-flight requests; overall and serialising */ + unsigned int in_flight; unsigned int serialising_in_flight; + bool wakeup; + /* Offset after the highest byte written to */ uint64_t wr_highest_offset; @@ -562,15 +565,6 @@ extern BlockDriver bdrv_file; extern BlockDriver bdrv_raw; extern BlockDriver bdrv_qcow2; -/** - * bdrv_setup_io_funcs: - * - * Prepare a #BlockDriver for I/O request processing by populating - * unimplemented coroutine and AIO interfaces with generic wrapper functions - * that fall back to implemented interfaces. - */ -void bdrv_setup_io_funcs(BlockDriver *bdrv); - int coroutine_fn bdrv_co_preadv(BdrvChild *child, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); @@ -641,6 +635,21 @@ void bdrv_remove_aio_context_notifier(BlockDriverState *bs, void (*aio_context_detached)(void *), void *opaque); +/** + * bdrv_wakeup: + * @bs: The BlockDriverState for which an I/O operation has been completed. + * + * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During + * synchronous I/O on a BlockDriverState that is attached to another + * I/O thread, the main thread lets the I/O thread's event loop run, + * waiting for the I/O operation to complete. A bdrv_wakeup will wake + * up the main thread if necessary. + * + * Manual calls to bdrv_wakeup are rarely necessary, because + * bdrv_dec_in_flight already calls it. + */ +void bdrv_wakeup(BlockDriverState *bs); + #ifdef _WIN32 int is_windows_drive(const char *filename); #endif @@ -656,8 +665,6 @@ int is_windows_drive(const char *filename); * the new backing file if the job completes. Ignored if @base is %NULL. * @speed: The maximum speed, in bytes per second, or 0 for unlimited. * @on_error: The action to take upon error. - * @cb: Completion function for the job. - * @opaque: Opaque pointer value passed to @cb. * @errp: Error object. * * Start a streaming operation on @bs. Clusters that are unallocated @@ -669,8 +676,7 @@ int is_windows_drive(const char *filename); */ void stream_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, const char *backing_file_str, - int64_t speed, BlockdevOnError on_error, - BlockCompletionFunc *cb, void *opaque, Error **errp); + int64_t speed, BlockdevOnError on_error, Error **errp); /** * commit_start: @@ -681,34 +687,35 @@ void stream_start(const char *job_id, BlockDriverState *bs, * @base: Block device that will be written into, and become the new top. * @speed: The maximum speed, in bytes per second, or 0 for unlimited. * @on_error: The action to take upon error. - * @cb: Completion function for the job. - * @opaque: Opaque pointer value passed to @cb. * @backing_file_str: String to use as the backing file in @top's overlay * @errp: Error object. * */ void commit_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, BlockDriverState *top, int64_t speed, - BlockdevOnError on_error, BlockCompletionFunc *cb, - void *opaque, const char *backing_file_str, Error **errp); + BlockdevOnError on_error, const char *backing_file_str, + Error **errp); /** * commit_active_start: * @job_id: The id of the newly-created job, or %NULL to use the * device name of @bs. * @bs: Active block device to be committed. * @base: Block device that will be written into, and become the new top. + * @creation_flags: Flags that control the behavior of the Job lifetime. + * See @BlockJobCreateFlags * @speed: The maximum speed, in bytes per second, or 0 for unlimited. * @on_error: The action to take upon error. * @cb: Completion function for the job. * @opaque: Opaque pointer value passed to @cb. * @errp: Error object. + * @auto_complete: Auto complete the job. * */ void commit_active_start(const char *job_id, BlockDriverState *bs, - BlockDriverState *base, int64_t speed, - BlockdevOnError on_error, + BlockDriverState *base, int creation_flags, + int64_t speed, BlockdevOnError on_error, BlockCompletionFunc *cb, - void *opaque, Error **errp); + void *opaque, Error **errp, bool auto_complete); /* * mirror_start: * @job_id: The id of the newly-created job, or %NULL to use the @@ -725,12 +732,10 @@ void commit_active_start(const char *job_id, BlockDriverState *bs, * @on_source_error: The action to take upon error reading from the source. * @on_target_error: The action to take upon error writing to the target. * @unmap: Whether to unmap target where source sectors only contain zeroes. - * @cb: Completion function for the job. - * @opaque: Opaque pointer value passed to @cb. * @errp: Error object. * * Start a mirroring operation on @bs. Clusters that are allocated - * in @bs will be written to @bs until the job is cancelled or + * in @bs will be written to @target until the job is cancelled or * manually completed. At the end of a successful mirroring job, * @bs will be switched to read from @target. */ @@ -740,12 +745,10 @@ void mirror_start(const char *job_id, BlockDriverState *bs, MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, - bool unmap, - BlockCompletionFunc *cb, - void *opaque, Error **errp); + bool unmap, Error **errp); /* - * backup_start: + * backup_job_create: * @job_id: The id of the newly-created job, or %NULL to use the * device name of @bs. * @bs: Block device to operate on. @@ -755,20 +758,25 @@ void mirror_start(const char *job_id, BlockDriverState *bs, * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL. * @on_source_error: The action to take upon error reading from the source. * @on_target_error: The action to take upon error writing to the target. + * @creation_flags: Flags that control the behavior of the Job lifetime. + * See @BlockJobCreateFlags * @cb: Completion function for the job. * @opaque: Opaque pointer value passed to @cb. * @txn: Transaction that this job is part of (may be NULL). * - * Start a backup operation on @bs. Clusters in @bs are written to @target + * Create a backup operation on @bs. Clusters in @bs are written to @target * until the job is cancelled or manually completed. */ -void backup_start(const char *job_id, BlockDriverState *bs, - BlockDriverState *target, int64_t speed, - MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, - BlockdevOnError on_source_error, - BlockdevOnError on_target_error, - BlockCompletionFunc *cb, void *opaque, - BlockJobTxn *txn, Error **errp); +BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, + BlockDriverState *target, int64_t speed, + MirrorSyncMode sync_mode, + BdrvDirtyBitmap *sync_bitmap, + bool compress, + BlockdevOnError on_source_error, + BlockdevOnError on_target_error, + int creation_flags, + BlockCompletionFunc *cb, void *opaque, + BlockJobTxn *txn, Error **errp); void hmp_drive_add_node(Monitor *mon, const char *optstr); @@ -792,6 +800,9 @@ bool bdrv_requests_pending(BlockDriverState *bs); void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in); +void bdrv_inc_in_flight(BlockDriverState *bs); +void bdrv_dec_in_flight(BlockDriverState *bs); + void blockdev_close_all_bdrv_states(void); #endif /* BLOCK_INT_H */ diff --git a/include/block/blockjob.h b/include/block/blockjob.h index 4ddb4ae2e1..1acb256223 100644 --- a/include/block/blockjob.h +++ b/include/block/blockjob.h @@ -28,78 +28,15 @@ #include "block/block.h" -/** - * BlockJobDriver: - * - * A class type for block job driver. - */ -typedef struct BlockJobDriver { - /** Derived BlockJob struct size */ - size_t instance_size; - - /** String describing the operation, part of query-block-jobs QMP API */ - BlockJobType job_type; - - /** Optional callback for job types that support setting a speed limit */ - void (*set_speed)(BlockJob *job, int64_t speed, Error **errp); - - /** Optional callback for job types that need to forward I/O status reset */ - void (*iostatus_reset)(BlockJob *job); - - /** - * Optional callback for job types whose completion must be triggered - * manually. - */ - void (*complete)(BlockJob *job, Error **errp); - - /** - * If the callback is not NULL, it will be invoked when all the jobs - * belonging to the same transaction complete; or upon this job's - * completion if it is not in a transaction. Skipped if NULL. - * - * All jobs will complete with a call to either .commit() or .abort() but - * never both. - */ - void (*commit)(BlockJob *job); - - /** - * If the callback is not NULL, it will be invoked when any job in the - * same transaction fails; or upon this job's failure (due to error or - * cancellation) if it is not in a transaction. Skipped if NULL. - * - * All jobs will complete with a call to either .commit() or .abort() but - * never both. - */ - void (*abort)(BlockJob *job); - - /** - * If the callback is not NULL, it will be invoked when the job transitions - * into the paused state. Paused jobs must not perform any asynchronous - * I/O or event loop activity. This callback is used to quiesce jobs. - */ - void coroutine_fn (*pause)(BlockJob *job); - - /** - * If the callback is not NULL, it will be invoked when the job transitions - * out of the paused state. Any asynchronous I/O or event loop activity - * should be restarted from this callback. - */ - void coroutine_fn (*resume)(BlockJob *job); - - /* - * If the callback is not NULL, it will be invoked before the job is - * resumed in a new AioContext. This is the place to move any resources - * besides job->blk to the new AioContext. - */ - void (*attached_aio_context)(BlockJob *job, AioContext *new_context); -} BlockJobDriver; +typedef struct BlockJobDriver BlockJobDriver; +typedef struct BlockJobTxn BlockJobTxn; /** * BlockJob: * * Long-running operation on a BlockDriverState. */ -struct BlockJob { +typedef struct BlockJob { /** The job type, including the job vtable. */ const BlockJobDriver *driver; @@ -107,7 +44,7 @@ struct BlockJob { BlockBackend *blk; /** - * The ID of the block job. + * The ID of the block job. May be NULL for internal jobs. */ char *id; @@ -181,6 +118,9 @@ struct BlockJob { /** Block other operations when block job is running */ Error *blocker; + /** BlockDriverStates that are involved in this block job */ + GSList *nodes; + /** The opaque value that is passed to the completion function. */ void *opaque; @@ -198,7 +138,12 @@ struct BlockJob { /** Non-NULL if this job is part of a transaction */ BlockJobTxn *txn; QLIST_ENTRY(BlockJob) txn_list; -}; +} BlockJob; + +typedef enum BlockJobCreateFlags { + BLOCK_JOB_DEFAULT = 0x00, + BLOCK_JOB_INTERNAL = 0x01, +} BlockJobCreateFlags; /** * block_job_next: @@ -222,74 +167,15 @@ BlockJob *block_job_next(BlockJob *job); BlockJob *block_job_get(const char *id); /** - * block_job_create: - * @job_id: The id of the newly-created job, or %NULL to have one - * generated automatically. - * @job_type: The class object for the newly-created job. - * @bs: The block - * @speed: The maximum speed, in bytes per second, or 0 for unlimited. - * @cb: Completion function for the job. - * @opaque: Opaque pointer value passed to @cb. - * @errp: Error object. - * - * Create a new long-running block device job and return it. The job - * will call @cb asynchronously when the job completes. Note that - * @bs may have been closed at the time the @cb it is called. If - * this is the case, the job may be reported as either cancelled or - * completed. - * - * This function is not part of the public job interface; it should be - * called from a wrapper that is specific to the job type. - */ -void *block_job_create(const char *job_id, const BlockJobDriver *driver, - BlockDriverState *bs, int64_t speed, - BlockCompletionFunc *cb, void *opaque, Error **errp); - -/** - * block_job_sleep_ns: - * @job: The job that calls the function. - * @clock: The clock to sleep on. - * @ns: How many nanoseconds to stop for. - * - * Put the job to sleep (assuming that it wasn't canceled) for @ns - * nanoseconds. Canceling the job will interrupt the wait immediately. - */ -void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns); - -/** - * block_job_yield: - * @job: The job that calls the function. - * - * Yield the block job coroutine. - */ -void block_job_yield(BlockJob *job); - -/** - * block_job_ref: - * @bs: The block device. + * block_job_add_bdrv: + * @job: A block job + * @bs: A BlockDriverState that is involved in @job * - * Grab a reference to the block job. Should be paired with block_job_unref. + * Add @bs to the list of BlockDriverState that are involved in + * @job. This means that all operations will be blocked on @bs while + * @job exists. */ -void block_job_ref(BlockJob *job); - -/** - * block_job_unref: - * @bs: The block device. - * - * Release reference to the block job and release resources if it is the last - * reference. - */ -void block_job_unref(BlockJob *job); - -/** - * block_job_completed: - * @job: The job being completed. - * @ret: The status code. - * - * Call the completion function that was registered at creation time, and - * free @job. - */ -void block_job_completed(BlockJob *job, int ret); +void block_job_add_bdrv(BlockJob *job, BlockDriverState *bs); /** * block_job_set_speed: @@ -303,6 +189,15 @@ void block_job_completed(BlockJob *job, int ret); void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); /** + * block_job_start: + * @job: A job that has not yet been started. + * + * Begins execution of a block job. + * Takes ownership of one reference to the job object. + */ +void block_job_start(BlockJob *job); + +/** * block_job_cancel: * @job: The job to be canceled. * @@ -320,29 +215,12 @@ void block_job_cancel(BlockJob *job); void block_job_complete(BlockJob *job, Error **errp); /** - * block_job_is_cancelled: - * @job: The job being queried. - * - * Returns whether the job is scheduled for cancellation. - */ -bool block_job_is_cancelled(BlockJob *job); - -/** * block_job_query: * @job: The job to get information about. * * Return information about a job. */ -BlockJobInfo *block_job_query(BlockJob *job); - -/** - * block_job_pause_point: - * @job: The job that is ready to pause. - * - * Pause now if block_job_pause() has been called. Block jobs that perform - * lots of I/O must call this between requests so that the job can be paused. - */ -void coroutine_fn block_job_pause_point(BlockJob *job); +BlockJobInfo *block_job_query(BlockJob *job, Error **errp); /** * block_job_pause: @@ -353,45 +231,38 @@ void coroutine_fn block_job_pause_point(BlockJob *job); void block_job_pause(BlockJob *job); /** - * block_job_resume: - * @job: The job to be resumed. - * - * Resume the specified job. Must be paired with a preceding block_job_pause. - */ -void block_job_resume(BlockJob *job); - -/** - * block_job_enter: - * @job: The job to enter. + * block_job_user_pause: + * @job: The job to be paused. * - * Continue the specified job by entering the coroutine. + * Asynchronously pause the specified job. + * Do not allow a resume until a matching call to block_job_user_resume. */ -void block_job_enter(BlockJob *job); +void block_job_user_pause(BlockJob *job); /** - * block_job_event_cancelled: - * @job: The job whose information is requested. + * block_job_paused: + * @job: The job to query. * - * Send a BLOCK_JOB_CANCELLED event for the specified job. + * Returns true if the job is user-paused. */ -void block_job_event_cancelled(BlockJob *job); +bool block_job_user_paused(BlockJob *job); /** - * block_job_ready: - * @job: The job which is now ready to complete. - * @msg: Error message. Only present on failure. + * block_job_resume: + * @job: The job to be resumed. * - * Send a BLOCK_JOB_COMPLETED event for the specified job. + * Resume the specified job. Must be paired with a preceding block_job_pause. */ -void block_job_event_completed(BlockJob *job, const char *msg); +void block_job_resume(BlockJob *job); /** - * block_job_ready: - * @job: The job which is now ready to complete. + * block_job_user_resume: + * @job: The job to be resumed. * - * Send a BLOCK_JOB_READY event for the specified job. + * Resume the specified job. + * Must be paired with a preceding block_job_user_pause. */ -void block_job_event_ready(BlockJob *job); +void block_job_user_resume(BlockJob *job); /** * block_job_cancel_sync: @@ -439,37 +310,6 @@ int block_job_complete_sync(BlockJob *job, Error **errp); void block_job_iostatus_reset(BlockJob *job); /** - * block_job_error_action: - * @job: The job to signal an error for. - * @on_err: The error action setting. - * @is_read: Whether the operation was a read. - * @error: The error that was reported. - * - * Report an I/O error for a block job and possibly stop the VM. Return the - * action that was selected based on @on_err and @error. - */ -BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, - int is_read, int error); - -typedef void BlockJobDeferToMainLoopFn(BlockJob *job, void *opaque); - -/** - * block_job_defer_to_main_loop: - * @job: The job - * @fn: The function to run in the main loop - * @opaque: The opaque value that is passed to @fn - * - * Execute a given function in the main loop with the BlockDriverState - * AioContext acquired. Block jobs must call bdrv_unref(), bdrv_close(), and - * anything that uses bdrv_drain_all() in the main loop. - * - * The @job AioContext is held while @fn executes. - */ -void block_job_defer_to_main_loop(BlockJob *job, - BlockJobDeferToMainLoopFn *fn, - void *opaque); - -/** * block_job_txn_new: * * Allocate and return a new block job transaction. Jobs can be added to the @@ -504,4 +344,12 @@ void block_job_txn_unref(BlockJobTxn *txn); */ void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job); +/** + * block_job_is_internal: + * @job: The job to determine if it is user-visible or not. + * + * Returns true if the job should not be visible to the management layer. + */ +bool block_job_is_internal(BlockJob *job); + #endif diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h new file mode 100644 index 0000000000..82238229c6 --- /dev/null +++ b/include/block/blockjob_int.h @@ -0,0 +1,250 @@ +/* + * Declarations for long-running block device operations + * + * Copyright (c) 2011 IBM Corp. + * Copyright (c) 2012 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef BLOCKJOB_INT_H +#define BLOCKJOB_INT_H + +#include "block/blockjob.h" +#include "block/block.h" + +/** + * BlockJobDriver: + * + * A class type for block job driver. + */ +struct BlockJobDriver { + /** Derived BlockJob struct size */ + size_t instance_size; + + /** String describing the operation, part of query-block-jobs QMP API */ + BlockJobType job_type; + + /** Optional callback for job types that support setting a speed limit */ + void (*set_speed)(BlockJob *job, int64_t speed, Error **errp); + + /** Optional callback for job types that need to forward I/O status reset */ + void (*iostatus_reset)(BlockJob *job); + + /** Mandatory: Entrypoint for the Coroutine. */ + CoroutineEntry *start; + + /** + * Optional callback for job types whose completion must be triggered + * manually. + */ + void (*complete)(BlockJob *job, Error **errp); + + /** + * If the callback is not NULL, it will be invoked when all the jobs + * belonging to the same transaction complete; or upon this job's + * completion if it is not in a transaction. Skipped if NULL. + * + * All jobs will complete with a call to either .commit() or .abort() but + * never both. + */ + void (*commit)(BlockJob *job); + + /** + * If the callback is not NULL, it will be invoked when any job in the + * same transaction fails; or upon this job's failure (due to error or + * cancellation) if it is not in a transaction. Skipped if NULL. + * + * All jobs will complete with a call to either .commit() or .abort() but + * never both. + */ + void (*abort)(BlockJob *job); + + /** + * If the callback is not NULL, it will be invoked after a call to either + * .commit() or .abort(). Regardless of which callback is invoked after + * completion, .clean() will always be called, even if the job does not + * belong to a transaction group. + */ + void (*clean)(BlockJob *job); + + /** + * If the callback is not NULL, it will be invoked when the job transitions + * into the paused state. Paused jobs must not perform any asynchronous + * I/O or event loop activity. This callback is used to quiesce jobs. + */ + void coroutine_fn (*pause)(BlockJob *job); + + /** + * If the callback is not NULL, it will be invoked when the job transitions + * out of the paused state. Any asynchronous I/O or event loop activity + * should be restarted from this callback. + */ + void coroutine_fn (*resume)(BlockJob *job); + + /* + * If the callback is not NULL, it will be invoked before the job is + * resumed in a new AioContext. This is the place to move any resources + * besides job->blk to the new AioContext. + */ + void (*attached_aio_context)(BlockJob *job, AioContext *new_context); + + /* + * If the callback is not NULL, it will be invoked when the job has to be + * synchronously cancelled or completed; it should drain BlockDriverStates + * as required to ensure progress. + */ + void (*drain)(BlockJob *job); +}; + +/** + * block_job_create: + * @job_id: The id of the newly-created job, or %NULL to have one + * generated automatically. + * @job_type: The class object for the newly-created job. + * @bs: The block + * @speed: The maximum speed, in bytes per second, or 0 for unlimited. + * @cb: Completion function for the job. + * @opaque: Opaque pointer value passed to @cb. + * @errp: Error object. + * + * Create a new long-running block device job and return it. The job + * will call @cb asynchronously when the job completes. Note that + * @bs may have been closed at the time the @cb it is called. If + * this is the case, the job may be reported as either cancelled or + * completed. + * + * This function is not part of the public job interface; it should be + * called from a wrapper that is specific to the job type. + */ +void *block_job_create(const char *job_id, const BlockJobDriver *driver, + BlockDriverState *bs, int64_t speed, int flags, + BlockCompletionFunc *cb, void *opaque, Error **errp); + +/** + * block_job_sleep_ns: + * @job: The job that calls the function. + * @clock: The clock to sleep on. + * @ns: How many nanoseconds to stop for. + * + * Put the job to sleep (assuming that it wasn't canceled) for @ns + * nanoseconds. Canceling the job will interrupt the wait immediately. + */ +void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns); + +/** + * block_job_yield: + * @job: The job that calls the function. + * + * Yield the block job coroutine. + */ +void block_job_yield(BlockJob *job); + +/** + * block_job_ref: + * @bs: The block device. + * + * Grab a reference to the block job. Should be paired with block_job_unref. + */ +void block_job_ref(BlockJob *job); + +/** + * block_job_unref: + * @bs: The block device. + * + * Release reference to the block job and release resources if it is the last + * reference. + */ +void block_job_unref(BlockJob *job); + +/** + * block_job_completed: + * @job: The job being completed. + * @ret: The status code. + * + * Call the completion function that was registered at creation time, and + * free @job. + */ +void block_job_completed(BlockJob *job, int ret); + +/** + * block_job_is_cancelled: + * @job: The job being queried. + * + * Returns whether the job is scheduled for cancellation. + */ +bool block_job_is_cancelled(BlockJob *job); + +/** + * block_job_pause_point: + * @job: The job that is ready to pause. + * + * Pause now if block_job_pause() has been called. Block jobs that perform + * lots of I/O must call this between requests so that the job can be paused. + */ +void coroutine_fn block_job_pause_point(BlockJob *job); + +/** + * block_job_enter: + * @job: The job to enter. + * + * Continue the specified job by entering the coroutine. + */ +void block_job_enter(BlockJob *job); + +/** + * block_job_event_ready: + * @job: The job which is now ready to be completed. + * + * Send a BLOCK_JOB_READY event for the specified job. + */ +void block_job_event_ready(BlockJob *job); + +/** + * block_job_error_action: + * @job: The job to signal an error for. + * @on_err: The error action setting. + * @is_read: Whether the operation was a read. + * @error: The error that was reported. + * + * Report an I/O error for a block job and possibly stop the VM. Return the + * action that was selected based on @on_err and @error. + */ +BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, + int is_read, int error); + +typedef void BlockJobDeferToMainLoopFn(BlockJob *job, void *opaque); + +/** + * block_job_defer_to_main_loop: + * @job: The job + * @fn: The function to run in the main loop + * @opaque: The opaque value that is passed to @fn + * + * Execute a given function in the main loop with the BlockDriverState + * AioContext acquired. Block jobs must call bdrv_unref(), bdrv_close(), and + * anything that uses bdrv_drain_all() in the main loop. + * + * The @job AioContext is held while @fn executes. + */ +void block_job_defer_to_main_loop(BlockJob *job, + BlockJobDeferToMainLoopFn *fn, + void *opaque); + +#endif diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index ee3388f90d..9dea14ba03 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -8,6 +8,9 @@ BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, uint32_t granularity, const char *name, Error **errp); +void bdrv_create_meta_dirty_bitmap(BdrvDirtyBitmap *bitmap, + int chunk_size); +void bdrv_release_meta_dirty_bitmap(BdrvDirtyBitmap *bitmap); int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, Error **errp); @@ -27,8 +30,11 @@ void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap); BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs); uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs); uint32_t bdrv_dirty_bitmap_granularity(BdrvDirtyBitmap *bitmap); +uint32_t bdrv_dirty_bitmap_meta_granularity(BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_frozen(BdrvDirtyBitmap *bitmap); +const char *bdrv_dirty_bitmap_name(const BdrvDirtyBitmap *bitmap); +int64_t bdrv_dirty_bitmap_size(const BdrvDirtyBitmap *bitmap); DirtyBitmapStatus bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap); int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector); @@ -36,9 +42,34 @@ void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int64_t nr_sectors); void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int64_t nr_sectors); -void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, struct HBitmapIter *hbi); -void bdrv_set_dirty_iter(struct HBitmapIter *hbi, int64_t offset); +int bdrv_dirty_bitmap_get_meta(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, int64_t sector, + int nb_sectors); +void bdrv_dirty_bitmap_reset_meta(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, int64_t sector, + int nb_sectors); +BdrvDirtyBitmapIter *bdrv_dirty_meta_iter_new(BdrvDirtyBitmap *bitmap); +BdrvDirtyBitmapIter *bdrv_dirty_iter_new(BdrvDirtyBitmap *bitmap, + uint64_t first_sector); +void bdrv_dirty_iter_free(BdrvDirtyBitmapIter *iter); +int64_t bdrv_dirty_iter_next(BdrvDirtyBitmapIter *iter); +void bdrv_set_dirty_iter(BdrvDirtyBitmapIter *hbi, int64_t sector_num); int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap); +int64_t bdrv_get_meta_dirty_count(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_truncate(BlockDriverState *bs); +uint64_t bdrv_dirty_bitmap_serialization_size(const BdrvDirtyBitmap *bitmap, + uint64_t start, uint64_t count); +uint64_t bdrv_dirty_bitmap_serialization_align(const BdrvDirtyBitmap *bitmap); +void bdrv_dirty_bitmap_serialize_part(const BdrvDirtyBitmap *bitmap, + uint8_t *buf, uint64_t start, + uint64_t count); +void bdrv_dirty_bitmap_deserialize_part(BdrvDirtyBitmap *bitmap, + uint8_t *buf, uint64_t start, + uint64_t count, bool finish); +void bdrv_dirty_bitmap_deserialize_zeroes(BdrvDirtyBitmap *bitmap, + uint64_t start, uint64_t count, + bool finish); +void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap); + #endif diff --git a/include/block/nbd.h b/include/block/nbd.h index 1897557a9b..3e373f0498 100644 --- a/include/block/nbd.h +++ b/include/block/nbd.h @@ -1,4 +1,5 @@ /* + * Copyright (C) 2016 Red Hat, Inc. * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws> * * Network Block Device @@ -25,52 +26,89 @@ #include "io/channel-socket.h" #include "crypto/tlscreds.h" -/* Note: these are _NOT_ the same as the network representation of an NBD +/* Handshake phase structs - this struct is passed on the wire */ + +struct nbd_option { + uint64_t magic; /* NBD_OPTS_MAGIC */ + uint32_t option; /* NBD_OPT_* */ + uint32_t length; +} QEMU_PACKED; +typedef struct nbd_option nbd_option; + +struct nbd_opt_reply { + uint64_t magic; /* NBD_REP_MAGIC */ + uint32_t option; /* NBD_OPT_* */ + uint32_t type; /* NBD_REP_* */ + uint32_t length; +} QEMU_PACKED; +typedef struct nbd_opt_reply nbd_opt_reply; + +/* Transmission phase structs + * + * Note: these are _NOT_ the same as the network representation of an NBD * request and reply! */ -struct nbd_request { +struct NBDRequest { uint64_t handle; uint64_t from; uint32_t len; - uint32_t type; + uint16_t flags; /* NBD_CMD_FLAG_* */ + uint16_t type; /* NBD_CMD_* */ }; +typedef struct NBDRequest NBDRequest; -struct nbd_reply { +struct NBDReply { uint64_t handle; uint32_t error; }; +typedef struct NBDReply NBDReply; +/* Transmission (export) flags: sent from server to client during handshake, + but describe what will happen during transmission */ #define NBD_FLAG_HAS_FLAGS (1 << 0) /* Flags are there */ #define NBD_FLAG_READ_ONLY (1 << 1) /* Device is read-only */ #define NBD_FLAG_SEND_FLUSH (1 << 2) /* Send FLUSH */ #define NBD_FLAG_SEND_FUA (1 << 3) /* Send FUA (Force Unit Access) */ #define NBD_FLAG_ROTATIONAL (1 << 4) /* Use elevator algorithm - rotational media */ #define NBD_FLAG_SEND_TRIM (1 << 5) /* Send TRIM (discard) */ +#define NBD_FLAG_SEND_WRITE_ZEROES (1 << 6) /* Send WRITE_ZEROES */ -/* New-style global flags. */ -#define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */ +/* New-style handshake (global) flags, sent from server to client, and + control what will happen during handshake phase. */ +#define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */ +#define NBD_FLAG_NO_ZEROES (1 << 1) /* End handshake without zeroes. */ -/* New-style client flags. */ -#define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */ +/* New-style client flags, sent from client to server to control what happens + during handshake phase. */ +#define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */ +#define NBD_FLAG_C_NO_ZEROES (1 << 1) /* End handshake without zeroes. */ /* Reply types. */ +#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value)) + #define NBD_REP_ACK (1) /* Data sending finished. */ #define NBD_REP_SERVER (2) /* Export description. */ -#define NBD_REP_ERR_UNSUP ((UINT32_C(1) << 31) | 1) /* Unknown option. */ -#define NBD_REP_ERR_POLICY ((UINT32_C(1) << 31) | 2) /* Server denied */ -#define NBD_REP_ERR_INVALID ((UINT32_C(1) << 31) | 3) /* Invalid length. */ -#define NBD_REP_ERR_TLS_REQD ((UINT32_C(1) << 31) | 5) /* TLS required */ +#define NBD_REP_ERR_UNSUP NBD_REP_ERR(1) /* Unknown option */ +#define NBD_REP_ERR_POLICY NBD_REP_ERR(2) /* Server denied */ +#define NBD_REP_ERR_INVALID NBD_REP_ERR(3) /* Invalid length */ +#define NBD_REP_ERR_PLATFORM NBD_REP_ERR(4) /* Not compiled in */ +#define NBD_REP_ERR_TLS_REQD NBD_REP_ERR(5) /* TLS required */ +#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */ -#define NBD_CMD_MASK_COMMAND 0x0000ffff -#define NBD_CMD_FLAG_FUA (1 << 16) +/* Request flags, sent from client to server during transmission phase */ +#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */ +#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */ +/* Supported request types */ enum { NBD_CMD_READ = 0, NBD_CMD_WRITE = 1, NBD_CMD_DISC = 2, NBD_CMD_FLUSH = 3, - NBD_CMD_TRIM = 4 + NBD_CMD_TRIM = 4, + /* 5 reserved for failed experiment NBD_CMD_CACHE */ + NBD_CMD_WRITE_ZEROES = 6, }; #define NBD_DEFAULT_PORT 10809 @@ -95,16 +133,17 @@ int nbd_receive_negotiate(QIOChannel *ioc, const char *name, uint16_t *flags, QIOChannel **outioc, off_t *size, Error **errp); int nbd_init(int fd, QIOChannelSocket *sioc, uint16_t flags, off_t size); -ssize_t nbd_send_request(QIOChannel *ioc, struct nbd_request *request); -ssize_t nbd_receive_reply(QIOChannel *ioc, struct nbd_reply *reply); +ssize_t nbd_send_request(QIOChannel *ioc, NBDRequest *request); +ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply); int nbd_client(int fd); int nbd_disconnect(int fd); typedef struct NBDExport NBDExport; typedef struct NBDClient NBDClient; -NBDExport *nbd_export_new(BlockBackend *blk, off_t dev_offset, off_t size, +NBDExport *nbd_export_new(BlockDriverState *bs, off_t dev_offset, off_t size, uint16_t nbdflags, void (*close)(NBDExport *), + bool writethrough, BlockBackend *on_eject_blk, Error **errp); void nbd_export_close(NBDExport *exp); void nbd_export_get(NBDExport *exp); @@ -114,6 +153,7 @@ BlockBackend *nbd_export_get_blockdev(NBDExport *exp); NBDExport *nbd_export_find(const char *name); void nbd_export_set_name(NBDExport *exp, const char *name); +void nbd_export_set_description(NBDExport *exp, const char *description); void nbd_export_close_all(void); void nbd_client_new(NBDExport *exp, |