summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-02-05 17:11:50 +0000
committerPeter Maydell <peter.maydell@linaro.org>2015-02-05 17:11:50 +0000
commitb3cd91e0eaf5c962cf139ce6ca59392c8b00f177 (patch)
treed6221d057f8cd22f40256a0c37c022793f1d2c93
parent651621b780515f35711c5d7305310ab1d241c7e2 (diff)
parentbb426311901776b95b021cece831b69dce4ef5ee (diff)
downloadqemu-b3cd91e0eaf5c962cf139ce6ca59392c8b00f177.tar.gz
qemu-b3cd91e0eaf5c962cf139ce6ca59392c8b00f177.tar.bz2
qemu-b3cd91e0eaf5c962cf139ce6ca59392c8b00f177.zip
Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20150205' into staging
migration/next for 20150205 # gpg: Signature made Thu 05 Feb 2015 16:17:08 GMT using RSA key ID 5872D723 # gpg: Can't check signature: public key not found * remotes/juanquintela/tags/migration/20150205: fix mc146818rtc wrong subsection name to avoid vmstate_subsection_load() fail Tracify migration/rdma.c Add migration stream analyzation script migration: Append JSON description of migration stream qemu-file: Add fast ftell code path QJSON: Add JSON writer Print errors in some of the early migration failure cases. Migration: Add lots of trace events savevm: Convert fprintf to error_report vmstate-static-checker: update whitelist Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--Makefile.objs1
-rw-r--r--hw/pci/pci.c2
-rw-r--r--hw/scsi/spapr_vscsi.c2
-rw-r--r--hw/timer/mc146818rtc.c2
-rw-r--r--hw/virtio/virtio.c2
-rw-r--r--include/migration/migration.h1
-rw-r--r--include/migration/qemu-file.h1
-rw-r--r--include/migration/vmstate.h3
-rw-r--r--include/qjson.h29
-rw-r--r--migration/qemu-file.c16
-rw-r--r--migration/rdma.c333
-rw-r--r--migration/vmstate.c217
-rw-r--r--qjson.c129
-rw-r--r--savevm.c92
-rwxr-xr-xscripts/analyze-migration.py592
-rwxr-xr-xscripts/vmstate-static-checker.py2
-rw-r--r--tests/Makefile3
-rw-r--r--tests/test-vmstate.c6
-rw-r--r--trace-events73
19 files changed, 1274 insertions, 232 deletions
diff --git a/Makefile.objs b/Makefile.objs
index abeb902b58..28999d39c4 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -51,6 +51,7 @@ common-obj-$(CONFIG_LINUX) += fsdev/
common-obj-y += migration/
common-obj-y += qemu-char.o #aio.o
common-obj-y += page_cache.o
+common-obj-y += qjson.o
common-obj-$(CONFIG_SPICE) += spice-qemu-char.o
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index d5e0e419c2..d50893002d 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -513,7 +513,7 @@ void pci_device_save(PCIDevice *s, QEMUFile *f)
* This makes us compatible with old devices
* which never set or clear this bit. */
s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
- vmstate_save_state(f, pci_get_vmstate(s), s);
+ vmstate_save_state(f, pci_get_vmstate(s), s, NULL);
/* Restore the interrupt status bit. */
pci_update_irq_status(s);
}
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index 20b20f0bae..36392359e3 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -630,7 +630,7 @@ static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq)
vscsi_req *req = sreq->hba_private;
assert(req->active);
- vmstate_save_state(f, &vmstate_spapr_vscsi_req, req);
+ vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL);
DPRINTF("VSCSI: saving tag=%u, current desc#%d, offset=%x\n",
req->qtag, req->cur_desc_num, req->cur_desc_offset);
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index 5a107fad5d..0600c9a1fa 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -734,7 +734,7 @@ static int rtc_post_load(void *opaque, int version_id)
}
static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = {
- .name = "irq_reinject_on_ack_count",
+ .name = "mc146818rtc/irq_reinject_on_ack_count",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 013979a6b8..d735343ca8 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -955,7 +955,7 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
}
/* Subsections */
- vmstate_save_state(f, &vmstate_virtio, vdev);
+ vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
int virtio_set_features(VirtIODevice *vdev, uint32_t val)
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 3cb5ba80c3..f37348b619 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -33,6 +33,7 @@
#define QEMU_VM_SECTION_END 0x03
#define QEMU_VM_SECTION_FULL 0x04
#define QEMU_VM_SUBSECTION 0x05
+#define QEMU_VM_VMDESCRIPTION 0x06
struct MigrationParams {
bool blk;
diff --git a/include/migration/qemu-file.h b/include/migration/qemu-file.h
index d843c0010c..a923cec2a6 100644
--- a/include/migration/qemu-file.h
+++ b/include/migration/qemu-file.h
@@ -121,6 +121,7 @@ QEMUFile *qemu_bufopen(const char *mode, QEMUSizedBuffer *input);
int qemu_get_fd(QEMUFile *f);
int qemu_fclose(QEMUFile *f);
int64_t qemu_ftell(QEMUFile *f);
+int64_t qemu_ftell_fast(QEMUFile *f);
void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size);
void qemu_put_byte(QEMUFile *f, int v);
/*
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index fa307a6c0f..0b26bc68dd 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -29,6 +29,7 @@
#ifndef CONFIG_USER_ONLY
#include <migration/qemu-file.h>
#endif
+#include <qjson.h>
typedef void SaveStateHandler(QEMUFile *f, void *opaque);
typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
@@ -801,7 +802,7 @@ extern const VMStateInfo vmstate_info_bitmap;
int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, int version_id);
void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque);
+ void *opaque, QJSON *vmdesc);
int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
const VMStateDescription *vmsd,
diff --git a/include/qjson.h b/include/qjson.h
new file mode 100644
index 0000000000..7c54fdf0ac
--- /dev/null
+++ b/include/qjson.h
@@ -0,0 +1,29 @@
+/*
+ * QEMU JSON writer
+ *
+ * Copyright Alexander Graf
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+#ifndef QEMU_QJSON_H
+#define QEMU_QJSON_H
+
+#define TYPE_QJSON "QJSON"
+typedef struct QJSON QJSON;
+
+QJSON *qjson_new(void);
+void json_prop_str(QJSON *json, const char *name, const char *str);
+void json_prop_int(QJSON *json, const char *name, int64_t val);
+void json_end_array(QJSON *json);
+void json_start_array(QJSON *json, const char *name);
+void json_end_object(QJSON *json);
+void json_start_object(QJSON *json, const char *name);
+const char *qjson_get_str(QJSON *json);
+void qjson_finish(QJSON *json);
+
+#endif /* QEMU_QJSON_H */
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index edc283073a..e66e55712f 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -452,6 +452,22 @@ int qemu_get_byte(QEMUFile *f)
return result;
}
+int64_t qemu_ftell_fast(QEMUFile *f)
+{
+ int64_t ret = f->pos;
+ int i;
+
+ if (f->ops->writev_buffer) {
+ for (i = 0; i < f->iovcnt; i++) {
+ ret += f->iov[i].iov_len;
+ }
+ } else {
+ ret += f->buf_index;
+ }
+
+ return ret;
+}
+
int64_t qemu_ftell(QEMUFile *f)
{
qemu_fflush(f);
diff --git a/migration/rdma.c b/migration/rdma.c
index b32dbdfccd..fc351eabf2 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -26,34 +26,7 @@
#include <arpa/inet.h>
#include <string.h>
#include <rdma/rdma_cma.h>
-
-//#define DEBUG_RDMA
-//#define DEBUG_RDMA_VERBOSE
-//#define DEBUG_RDMA_REALLY_VERBOSE
-
-#ifdef DEBUG_RDMA
-#define DPRINTF(fmt, ...) \
- do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
-#ifdef DEBUG_RDMA_VERBOSE
-#define DDPRINTF(fmt, ...) \
- do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DDPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
-#ifdef DEBUG_RDMA_REALLY_VERBOSE
-#define DDDPRINTF(fmt, ...) \
- do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DDDPRINTF(fmt, ...) \
- do { } while (0)
-#endif
+#include "trace.h"
/*
* Print and error on both the Monitor and the Log file.
@@ -104,8 +77,8 @@ static uint32_t known_capabilities = RDMA_CAPABILITY_PIN_ALL;
do { \
if (rdma->error_state) { \
if (!rdma->error_reported) { \
- fprintf(stderr, "RDMA is in an error state waiting migration" \
- " to abort!\n"); \
+ error_report("RDMA is in an error state waiting migration" \
+ " to abort!"); \
rdma->error_reported = 1; \
} \
return rdma->error_state; \
@@ -578,12 +551,13 @@ static int __qemu_rdma_add_block(RDMAContext *rdma, void *host_addr,
g_hash_table_insert(rdma->blockmap, (void *) block_offset, block);
- DDPRINTF("Added Block: %d, addr: %" PRIu64 ", offset: %" PRIu64
- " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n",
- local->nb_blocks, (uint64_t) block->local_host_addr, block->offset,
- block->length, (uint64_t) (block->local_host_addr + block->length),
- BITS_TO_LONGS(block->nb_chunks) *
- sizeof(unsigned long) * 8, block->nb_chunks);
+ trace___qemu_rdma_add_block(local->nb_blocks,
+ (uint64_t) block->local_host_addr, block->offset,
+ block->length,
+ (uint64_t) (block->local_host_addr + block->length),
+ BITS_TO_LONGS(block->nb_chunks) *
+ sizeof(unsigned long) * 8,
+ block->nb_chunks);
local->nb_blocks++;
@@ -614,7 +588,7 @@ static int qemu_rdma_init_ram_blocks(RDMAContext *rdma)
rdma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal);
memset(local, 0, sizeof *local);
qemu_ram_foreach_block(qemu_rdma_init_one_block, rdma);
- DPRINTF("Allocated %d local ram block structures\n", local->nb_blocks);
+ trace_qemu_rdma_init_ram_blocks(local->nb_blocks);
rdma->block = (RDMARemoteBlock *) g_malloc0(sizeof(RDMARemoteBlock) *
rdma->local_ram_blocks.nb_blocks);
local->init = true;
@@ -683,12 +657,12 @@ static int __qemu_rdma_delete_block(RDMAContext *rdma, ram_addr_t block_offset)
local->block = NULL;
}
- DDPRINTF("Deleted Block: %d, addr: %" PRIu64 ", offset: %" PRIu64
- " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n",
- local->nb_blocks, (uint64_t) block->local_host_addr, block->offset,
- block->length, (uint64_t) (block->local_host_addr + block->length),
- BITS_TO_LONGS(block->nb_chunks) *
- sizeof(unsigned long) * 8, block->nb_chunks);
+ trace___qemu_rdma_delete_block(local->nb_blocks,
+ (uint64_t)block->local_host_addr,
+ block->offset, block->length,
+ (uint64_t)(block->local_host_addr + block->length),
+ BITS_TO_LONGS(block->nb_chunks) *
+ sizeof(unsigned long) * 8, block->nb_chunks);
g_free(old);
@@ -713,7 +687,7 @@ static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs)
struct ibv_port_attr port;
if (ibv_query_port(verbs, 1, &port)) {
- fprintf(stderr, "FAILED TO QUERY PORT INFORMATION!\n");
+ error_report("Failed to query port information");
return;
}
@@ -744,7 +718,7 @@ static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
char dgid[33];
inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.sgid, sgid, sizeof sgid);
inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.dgid, dgid, sizeof dgid);
- DPRINTF("%s Source GID: %s, Dest GID: %s\n", who, sgid, dgid);
+ trace_qemu_rdma_dump_gid(who, sgid, dgid);
}
/*
@@ -918,7 +892,7 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
for (e = res; e != NULL; e = e->ai_next) {
inet_ntop(e->ai_family,
&((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
- DPRINTF("Trying %s => %s\n", rdma->host, ip);
+ trace_qemu_rdma_resolve_host_trying(rdma->host, ip);
ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
RDMA_RESOLVE_TIMEOUT_MS);
@@ -997,14 +971,14 @@ static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
/* allocate pd */
rdma->pd = ibv_alloc_pd(rdma->verbs);
if (!rdma->pd) {
- fprintf(stderr, "failed to allocate protection domain\n");
+ error_report("failed to allocate protection domain");
return -1;
}
/* create completion channel */
rdma->comp_channel = ibv_create_comp_channel(rdma->verbs);
if (!rdma->comp_channel) {
- fprintf(stderr, "failed to allocate completion channel\n");
+ error_report("failed to allocate completion channel");
goto err_alloc_pd_cq;
}
@@ -1015,7 +989,7 @@ static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
NULL, rdma->comp_channel, 0);
if (!rdma->cq) {
- fprintf(stderr, "failed to allocate completion queue\n");
+ error_report("failed to allocate completion queue");
goto err_alloc_pd_cq;
}
@@ -1160,8 +1134,7 @@ static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
if (!block->pmr[chunk]) {
uint64_t len = chunk_end - chunk_start;
- DDPRINTF("Registering %" PRIu64 " bytes @ %p\n",
- len, chunk_start);
+ trace_qemu_rdma_register_and_get_keys(len, chunk_start);
block->pmr[chunk] = ibv_reg_mr(rdma->pd,
chunk_start, len,
@@ -1204,7 +1177,7 @@ static int qemu_rdma_reg_control(RDMAContext *rdma, int idx)
rdma->total_registrations++;
return 0;
}
- fprintf(stderr, "qemu_rdma_reg_control failed!\n");
+ error_report("qemu_rdma_reg_control failed");
return -1;
}
@@ -1270,8 +1243,8 @@ static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
.repeat = 1,
};
- DDPRINTF("Processing unregister for chunk: %" PRIu64
- " at position %d\n", chunk, rdma->unregister_current);
+ trace_qemu_rdma_unregister_waiting_proc(chunk,
+ rdma->unregister_current);
rdma->unregistrations[rdma->unregister_current] = 0;
rdma->unregister_current++;
@@ -1291,11 +1264,11 @@ static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
clear_bit(chunk, block->unregister_bitmap);
if (test_bit(chunk, block->transit_bitmap)) {
- DDPRINTF("Cannot unregister inflight chunk: %" PRIu64 "\n", chunk);
+ trace_qemu_rdma_unregister_waiting_inflight(chunk);
continue;
}
- DDPRINTF("Sending unregister for chunk: %" PRIu64 "\n", chunk);
+ trace_qemu_rdma_unregister_waiting_send(chunk);
ret = ibv_dereg_mr(block->pmr[chunk]);
block->pmr[chunk] = NULL;
@@ -1315,7 +1288,7 @@ static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
return ret;
}
- DDPRINTF("Unregister for chunk: %" PRIu64 " complete.\n", chunk);
+ trace_qemu_rdma_unregister_waiting_complete(chunk);
}
return 0;
@@ -1340,13 +1313,13 @@ static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
uint64_t chunk, uint64_t wr_id)
{
if (rdma->unregistrations[rdma->unregister_next] != 0) {
- fprintf(stderr, "rdma migration: queue is full!\n");
+ error_report("rdma migration: queue is full");
} else {
RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
if (!test_and_set_bit(chunk, block->unregister_bitmap)) {
- DDPRINTF("Appending unregister chunk %" PRIu64
- " at position %d\n", chunk, rdma->unregister_next);
+ trace_qemu_rdma_signal_unregister_append(chunk,
+ rdma->unregister_next);
rdma->unregistrations[rdma->unregister_next++] =
qemu_rdma_make_wrid(wr_id, index, chunk);
@@ -1355,8 +1328,7 @@ static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
rdma->unregister_next = 0;
}
} else {
- DDPRINTF("Unregister chunk %" PRIu64 " already in queue.\n",
- chunk);
+ trace_qemu_rdma_signal_unregister_already(chunk);
}
}
}
@@ -1381,7 +1353,7 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
}
if (ret < 0) {
- fprintf(stderr, "ibv_poll_cq return %d!\n", ret);
+ error_report("ibv_poll_cq return %d", ret);
return ret;
}
@@ -1397,8 +1369,7 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
if (rdma->control_ready_expected &&
(wr_id >= RDMA_WRID_RECV_CONTROL)) {
- DDDPRINTF("completion %s #%" PRId64 " received (%" PRId64 ")"
- " left %d\n", wrid_desc[RDMA_WRID_RECV_CONTROL],
+ trace_qemu_rdma_poll_recv(wrid_desc[RDMA_WRID_RECV_CONTROL],
wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent);
rdma->control_ready_expected = 0;
}
@@ -1410,9 +1381,8 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
(wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
- DDDPRINTF("completions %s (%" PRId64 ") left %d, "
- "block %" PRIu64 ", chunk: %" PRIu64 " %p %p\n",
- print_wrid(wr_id), wr_id, rdma->nb_sent, index, chunk,
+ trace_qemu_rdma_poll_write(print_wrid(wr_id), wr_id, rdma->nb_sent,
+ index, chunk,
block->local_host_addr, (void *)block->remote_host_addr);
clear_bit(chunk, block->transit_bitmap);
@@ -1433,8 +1403,7 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
#endif
}
} else {
- DDDPRINTF("other completion %s (%" PRId64 ") received left %d\n",
- print_wrid(wr_id), wr_id, rdma->nb_sent);
+ trace_qemu_rdma_poll_other(print_wrid(wr_id), wr_id, rdma->nb_sent);
}
*wr_id_out = wc.wr_id;
@@ -1482,9 +1451,8 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested,
break;
}
if (wr_id != wrid_requested) {
- DDDPRINTF("A Wanted wrid %s (%d) but got %s (%" PRIu64 ")\n",
- print_wrid(wrid_requested),
- wrid_requested, print_wrid(wr_id), wr_id);
+ trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested),
+ wrid_requested, print_wrid(wr_id), wr_id);
}
}
@@ -1524,9 +1492,8 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested,
break;
}
if (wr_id != wrid_requested) {
- DDDPRINTF("B Wanted wrid %s (%d) but got %s (%" PRIu64 ")\n",
- print_wrid(wrid_requested), wrid_requested,
- print_wrid(wr_id), wr_id);
+ trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested),
+ wrid_requested, print_wrid(wr_id), wr_id);
}
}
@@ -1571,7 +1538,7 @@ static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf,
.num_sge = 1,
};
- DDDPRINTF("CONTROL: sending %s..\n", control_desc[head->type]);
+ trace_qemu_rdma_post_send_control(control_desc[head->type]);
/*
* We don't actually need to do a memcpy() in here if we used
@@ -1593,13 +1560,13 @@ static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf,
ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
if (ret > 0) {
- fprintf(stderr, "Failed to use post IB SEND for control!\n");
+ error_report("Failed to use post IB SEND for control");
return -ret;
}
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL);
if (ret < 0) {
- fprintf(stderr, "rdma migration: send polling control error!\n");
+ error_report("rdma migration: send polling control error");
}
return ret;
@@ -1643,32 +1610,31 @@ static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
&byte_len);
if (ret < 0) {
- fprintf(stderr, "rdma migration: recv polling control error!\n");
+ error_report("rdma migration: recv polling control error!");
return ret;
}
network_to_control((void *) rdma->wr_data[idx].control);
memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader));
- DDDPRINTF("CONTROL: %s receiving...\n", control_desc[expecting]);
+ trace_qemu_rdma_exchange_get_response_start(control_desc[expecting]);
if (expecting == RDMA_CONTROL_NONE) {
- DDDPRINTF("Surprise: got %s (%d)\n",
- control_desc[head->type], head->type);
+ trace_qemu_rdma_exchange_get_response_none(control_desc[head->type],
+ head->type);
} else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) {
- fprintf(stderr, "Was expecting a %s (%d) control message"
- ", but got: %s (%d), length: %d\n",
+ error_report("Was expecting a %s (%d) control message"
+ ", but got: %s (%d), length: %d",
control_desc[expecting], expecting,
control_desc[head->type], head->type, head->len);
return -EIO;
}
if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) {
- fprintf(stderr, "too long length: %d\n", head->len);
+ error_report("too long length: %d\n", head->len);
return -EINVAL;
}
if (sizeof(*head) + head->len != byte_len) {
- fprintf(stderr, "Malformed length: %d byte_len %d\n",
- head->len, byte_len);
+ error_report("Malformed length: %d byte_len %d", head->len, byte_len);
return -EINVAL;
}
@@ -1730,7 +1696,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
if (resp) {
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA);
if (ret) {
- fprintf(stderr, "rdma migration: error posting"
+ error_report("rdma migration: error posting"
" extra control recv for anticipated result!");
return ret;
}
@@ -1741,7 +1707,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
*/
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
if (ret) {
- fprintf(stderr, "rdma migration: error posting first control recv!");
+ error_report("rdma migration: error posting first control recv!");
return ret;
}
@@ -1751,7 +1717,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
ret = qemu_rdma_post_send_control(rdma, data, head);
if (ret < 0) {
- fprintf(stderr, "Failed to send control buffer!\n");
+ error_report("Failed to send control buffer!");
return ret;
}
@@ -1760,14 +1726,14 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
*/
if (resp) {
if (callback) {
- DDPRINTF("Issuing callback before receiving response...\n");
+ trace_qemu_rdma_exchange_send_issue_callback();
ret = callback(rdma);
if (ret < 0) {
return ret;
}
}
- DDPRINTF("Waiting for response %s\n", control_desc[resp->type]);
+ trace_qemu_rdma_exchange_send_waiting(control_desc[resp->type]);
ret = qemu_rdma_exchange_get_response(rdma, resp,
resp->type, RDMA_WRID_DATA);
@@ -1779,7 +1745,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
if (resp_idx) {
*resp_idx = RDMA_WRID_DATA;
}
- DDPRINTF("Response %s received.\n", control_desc[resp->type]);
+ trace_qemu_rdma_exchange_send_received(control_desc[resp->type]);
}
rdma->control_ready_expected = 1;
@@ -1807,7 +1773,7 @@ static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
ret = qemu_rdma_post_send_control(rdma, NULL, &ready);
if (ret < 0) {
- fprintf(stderr, "Failed to send control buffer!\n");
+ error_report("Failed to send control buffer!");
return ret;
}
@@ -1828,7 +1794,7 @@ static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
*/
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
if (ret) {
- fprintf(stderr, "rdma migration: error posting second control recv!");
+ error_report("rdma migration: error posting second control recv!");
return ret;
}
@@ -1882,8 +1848,9 @@ retry:
}
}
- DDPRINTF("Writing %" PRIu64 " chunks, (%" PRIu64 " MB)\n",
- chunks + 1, (chunks + 1) * (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024);
+ trace_qemu_rdma_write_one_top(chunks + 1,
+ (chunks + 1) *
+ (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024);
chunk_end = ram_chunk_end(block, chunk + chunks);
@@ -1895,17 +1862,15 @@ retry:
while (test_bit(chunk, block->transit_bitmap)) {
(void)count;
- DDPRINTF("(%d) Not clobbering: block: %d chunk %" PRIu64
- " current %" PRIu64 " len %" PRIu64 " %d %d\n",
- count++, current_index, chunk,
+ trace_qemu_rdma_write_one_block(count++, current_index, chunk,
sge.addr, length, rdma->nb_sent, block->nb_chunks);
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
if (ret < 0) {
- fprintf(stderr, "Failed to Wait for previous write to complete "
+ error_report("Failed to Wait for previous write to complete "
"block %d chunk %" PRIu64
- " current %" PRIu64 " len %" PRIu64 " %d\n",
+ " current %" PRIu64 " len %" PRIu64 " %d",
current_index, chunk, sge.addr, length, rdma->nb_sent);
return ret;
}
@@ -1932,10 +1897,8 @@ retry:
head.len = sizeof(comp);
head.type = RDMA_CONTROL_COMPRESS;
- DDPRINTF("Entire chunk is zero, sending compress: %"
- PRIu64 " for %d "
- "bytes, index: %d, offset: %" PRId64 "...\n",
- chunk, sge.length, current_index, current_addr);
+ trace_qemu_rdma_write_one_zero(chunk, sge.length,
+ current_index, current_addr);
compress_to_network(&comp);
ret = qemu_rdma_exchange_send(rdma, &head,
@@ -1961,9 +1924,8 @@ retry:
}
reg.chunks = chunks;
- DDPRINTF("Sending registration request chunk %" PRIu64 " for %d "
- "bytes, index: %d, offset: %" PRId64 "...\n",
- chunk, sge.length, current_index, current_addr);
+ trace_qemu_rdma_write_one_sendreg(chunk, sge.length, current_index,
+ current_addr);
register_to_network(&reg);
ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
@@ -1977,7 +1939,7 @@ retry:
(uint8_t *) sge.addr,
&sge.lkey, NULL, chunk,
chunk_start, chunk_end)) {
- fprintf(stderr, "cannot get lkey!\n");
+ error_report("cannot get lkey");
return -EINVAL;
}
@@ -1986,9 +1948,8 @@ retry:
network_to_result(reg_result);
- DDPRINTF("Received registration result:"
- " my key: %x their key %x, chunk %" PRIu64 "\n",
- block->remote_keys[chunk], reg_result->rkey, chunk);
+ trace_qemu_rdma_write_one_recvregres(block->remote_keys[chunk],
+ reg_result->rkey, chunk);
block->remote_keys[chunk] = reg_result->rkey;
block->remote_host_addr = reg_result->host_addr;
@@ -1998,7 +1959,7 @@ retry:
(uint8_t *)sge.addr,
&sge.lkey, NULL, chunk,
chunk_start, chunk_end)) {
- fprintf(stderr, "cannot get lkey!\n");
+ error_report("cannot get lkey!");
return -EINVAL;
}
}
@@ -2010,7 +1971,7 @@ retry:
if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr,
&sge.lkey, NULL, chunk,
chunk_start, chunk_end)) {
- fprintf(stderr, "cannot get lkey!\n");
+ error_report("cannot get lkey!");
return -EINVAL;
}
}
@@ -2031,10 +1992,8 @@ retry:
send_wr.wr.rdma.remote_addr = block->remote_host_addr +
(current_addr - block->offset);
- DDDPRINTF("Posting chunk: %" PRIu64 ", addr: %lx"
- " remote: %lx, bytes %" PRIu32 "\n",
- chunk, sge.addr, send_wr.wr.rdma.remote_addr,
- sge.length);
+ trace_qemu_rdma_write_one_post(chunk, sge.addr, send_wr.wr.rdma.remote_addr,
+ sge.length);
/*
* ibv_post_send() does not return negative error numbers,
@@ -2043,11 +2002,11 @@ retry:
ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
if (ret == ENOMEM) {
- DDPRINTF("send queue is full. wait a little....\n");
+ trace_qemu_rdma_write_one_queue_full();
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
if (ret < 0) {
- fprintf(stderr, "rdma migration: failed to make "
- "room in full send queue! %d\n", ret);
+ error_report("rdma migration: failed to make "
+ "room in full send queue! %d", ret);
return ret;
}
@@ -2088,7 +2047,7 @@ static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
if (ret == 0) {
rdma->nb_sent++;
- DDDPRINTF("sent total: %d\n", rdma->nb_sent);
+ trace_qemu_rdma_write_flush(rdma->nb_sent);
}
rdma->current_length = 0;
@@ -2173,7 +2132,7 @@ static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
ret = qemu_rdma_search_ram_block(rdma, block_offset,
offset, len, &index, &chunk);
if (ret) {
- fprintf(stderr, "ram block search failed\n");
+ error_report("ram block search failed");
return ret;
}
rdma->current_index = index;
@@ -2202,19 +2161,19 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
.type = RDMA_CONTROL_ERROR,
.repeat = 1,
};
- fprintf(stderr, "Early error. Sending error.\n");
+ error_report("Early error. Sending error.");
qemu_rdma_post_send_control(rdma, NULL, &head);
}
ret = rdma_disconnect(rdma->cm_id);
if (!ret) {
- DDPRINTF("waiting for disconnect\n");
+ trace_qemu_rdma_cleanup_waiting_for_disconnect();
ret = rdma_get_cm_event(rdma->channel, &cm_event);
if (!ret) {
rdma_ack_cm_event(cm_event);
}
}
- DDPRINTF("Disconnected.\n");
+ trace_qemu_rdma_cleanup_disconnect();
rdma->connected = false;
}
@@ -2341,7 +2300,7 @@ static int qemu_rdma_connect(RDMAContext *rdma, Error **errp)
* on the source first requested the capability.
*/
if (rdma->pin_all) {
- DPRINTF("Server pin-all memory requested.\n");
+ trace_qemu_rdma_connect_pin_all_requested();
cap.flags |= RDMA_CAPABILITY_PIN_ALL;
}
@@ -2389,7 +2348,7 @@ static int qemu_rdma_connect(RDMAContext *rdma, Error **errp)
rdma->pin_all = false;
}
- DPRINTF("Pin all memory: %s\n", rdma->pin_all ? "enabled" : "disabled");
+ trace_qemu_rdma_connect_pin_all_outcome(rdma->pin_all);
rdma_ack_cm_event(cm_event);
@@ -2456,7 +2415,7 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
for (e = res; e != NULL; e = e->ai_next) {
inet_ntop(e->ai_family,
&((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
- DPRINTF("Trying %s => %s\n", rdma->host, ip);
+ trace_qemu_rdma_dest_init_trying(rdma->host, ip);
ret = rdma_bind_addr(listen_id, e->ai_dst_addr);
if (!ret) {
if (e->ai_family == AF_INET6) {
@@ -2575,8 +2534,7 @@ static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf,
size_t len = 0;
if (rdma->wr_data[idx].control_len) {
- DDDPRINTF("RDMA %" PRId64 " of %d bytes already in buffer\n",
- rdma->wr_data[idx].control_len, size);
+ trace_qemu_rdma_fill(rdma->wr_data[idx].control_len, size);
len = MIN(size, rdma->wr_data[idx].control_len);
memcpy(buf, rdma->wr_data[idx].control_curr, len);
@@ -2643,7 +2601,7 @@ static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
while (rdma->nb_sent) {
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
if (ret < 0) {
- fprintf(stderr, "rdma migration: complete polling error!\n");
+ error_report("rdma migration: complete polling error!");
return -EIO;
}
}
@@ -2655,7 +2613,7 @@ static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
static int qemu_rdma_close(void *opaque)
{
- DPRINTF("Shutting down connection.\n");
+ trace_qemu_rdma_close();
QEMUFileRDMA *r = opaque;
if (r->rdma) {
qemu_rdma_cleanup(r->rdma);
@@ -2719,7 +2677,7 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
*/
ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
if (ret < 0) {
- fprintf(stderr, "rdma migration: write error! %d\n", ret);
+ error_report("rdma migration: write error! %d", ret);
goto err;
}
@@ -2752,7 +2710,7 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
offset, size, &index, &chunk);
if (ret) {
- fprintf(stderr, "ram block search failed\n");
+ error_report("ram block search failed");
goto err;
}
@@ -2779,7 +2737,7 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
uint64_t wr_id, wr_id_in;
int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL);
if (ret < 0) {
- fprintf(stderr, "rdma migration: polling error! %d\n", ret);
+ error_report("rdma migration: polling error! %d", ret);
goto err;
}
@@ -2824,7 +2782,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
network_to_caps(&cap);
if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) {
- fprintf(stderr, "Unknown source RDMA version: %d, bailing...\n",
+ error_report("Unknown source RDMA version: %d, bailing...",
cap.version);
rdma_ack_cm_event(cm_event);
goto err_rdma_dest_wait;
@@ -2848,17 +2806,17 @@ static int qemu_rdma_accept(RDMAContext *rdma)
rdma_ack_cm_event(cm_event);
- DPRINTF("Memory pin all: %s\n", rdma->pin_all ? "enabled" : "disabled");
+ trace_qemu_rdma_accept_pin_state(rdma->pin_all);
caps_to_network(&cap);
- DPRINTF("verbs context after listen: %p\n", verbs);
+ trace_qemu_rdma_accept_pin_verbsc(verbs);
if (!rdma->verbs) {
rdma->verbs = verbs;
} else if (rdma->verbs != verbs) {
- fprintf(stderr, "ibv context not matching %p, %p!\n",
- rdma->verbs, verbs);
+ error_report("ibv context not matching %p, %p!", rdma->verbs,
+ verbs);
goto err_rdma_dest_wait;
}
@@ -2866,26 +2824,26 @@ static int qemu_rdma_accept(RDMAContext *rdma)
ret = qemu_rdma_alloc_pd_cq(rdma);
if (ret) {
- fprintf(stderr, "rdma migration: error allocating pd and cq!\n");
+ error_report("rdma migration: error allocating pd and cq!");
goto err_rdma_dest_wait;
}
ret = qemu_rdma_alloc_qp(rdma);
if (ret) {
- fprintf(stderr, "rdma migration: error allocating qp!\n");
+ error_report("rdma migration: error allocating qp!");
goto err_rdma_dest_wait;
}
ret = qemu_rdma_init_ram_blocks(rdma);
if (ret) {
- fprintf(stderr, "rdma migration: error initializing ram blocks!\n");
+ error_report("rdma migration: error initializing ram blocks!");
goto err_rdma_dest_wait;
}
for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
ret = qemu_rdma_reg_control(rdma, idx);
if (ret) {
- fprintf(stderr, "rdma: error registering %d control!\n", idx);
+ error_report("rdma: error registering %d control", idx);
goto err_rdma_dest_wait;
}
}
@@ -2894,18 +2852,18 @@ static int qemu_rdma_accept(RDMAContext *rdma)
ret = rdma_accept(rdma->cm_id, &conn_param);
if (ret) {
- fprintf(stderr, "rdma_accept returns %d!\n", ret);
+ error_report("rdma_accept returns %d", ret);
goto err_rdma_dest_wait;
}
ret = rdma_get_cm_event(rdma->channel, &cm_event);
if (ret) {
- fprintf(stderr, "rdma_accept get_cm_event failed %d!\n", ret);
+ error_report("rdma_accept get_cm_event failed %d", ret);
goto err_rdma_dest_wait;
}
if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
- fprintf(stderr, "rdma_accept not event established!\n");
+ error_report("rdma_accept not event established");
rdma_ack_cm_event(cm_event);
goto err_rdma_dest_wait;
}
@@ -2915,7 +2873,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
if (ret) {
- fprintf(stderr, "rdma migration: error posting second control recv!\n");
+ error_report("rdma migration: error posting second control recv");
goto err_rdma_dest_wait;
}
@@ -2969,7 +2927,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
CHECK_ERROR_STATE();
do {
- DDDPRINTF("Waiting for next request %" PRIu64 "...\n", flags);
+ trace_qemu_rdma_registration_handle_wait(flags);
ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE);
@@ -2978,8 +2936,8 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
}
if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) {
- fprintf(stderr, "rdma: Too many requests in this message (%d)."
- "Bailing.\n", head.repeat);
+ error_report("rdma: Too many requests in this message (%d)."
+ "Bailing.", head.repeat);
ret = -EIO;
break;
}
@@ -2989,9 +2947,9 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
comp = (RDMACompress *) rdma->wr_data[idx].control_curr;
network_to_compress(comp);
- DDPRINTF("Zapping zero chunk: %" PRId64
- " bytes, index %d, offset %" PRId64 "\n",
- comp->length, comp->block_idx, comp->offset);
+ trace_qemu_rdma_registration_handle_compress(comp->length,
+ comp->block_idx,
+ comp->offset);
block = &(rdma->local_ram_blocks.block[comp->block_idx]);
host_addr = block->local_host_addr +
@@ -3001,17 +2959,17 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
break;
case RDMA_CONTROL_REGISTER_FINISHED:
- DDDPRINTF("Current registrations complete.\n");
+ trace_qemu_rdma_registration_handle_finished();
goto out;
case RDMA_CONTROL_RAM_BLOCKS_REQUEST:
- DPRINTF("Initial setup info requested.\n");
+ trace_qemu_rdma_registration_handle_ram_blocks();
if (rdma->pin_all) {
ret = qemu_rdma_reg_whole_ram_blocks(rdma);
if (ret) {
- fprintf(stderr, "rdma migration: error dest "
- "registering ram blocks!\n");
+ error_report("rdma migration: error dest "
+ "registering ram blocks");
goto out;
}
}
@@ -3044,13 +3002,13 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
(uint8_t *) rdma->block, &blocks);
if (ret < 0) {
- fprintf(stderr, "rdma migration: error sending remote info!\n");
+ error_report("rdma migration: error sending remote info");
goto out;
}
break;
case RDMA_CONTROL_REGISTER_REQUEST:
- DDPRINTF("There are %d registration requests\n", head.repeat);
+ trace_qemu_rdma_registration_handle_register(head.repeat);
reg_resp.repeat = head.repeat;
registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
@@ -3064,8 +3022,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
reg_result = &results[count];
- DDPRINTF("Registration request (%d): index %d, current_addr %"
- PRIu64 " chunks: %" PRIu64 "\n", count,
+ trace_qemu_rdma_registration_handle_register_loop(count,
reg->current_index, reg->key.current_addr, reg->chunks);
block = &(rdma->local_ram_blocks.block[reg->current_index]);
@@ -3084,15 +3041,15 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
if (qemu_rdma_register_and_get_keys(rdma, block,
(uint8_t *)host_addr, NULL, &reg_result->rkey,
chunk, chunk_start, chunk_end)) {
- fprintf(stderr, "cannot get rkey!\n");
+ error_report("cannot get rkey");
ret = -EINVAL;
goto out;
}
reg_result->host_addr = (uint64_t) block->local_host_addr;
- DDPRINTF("Registered rkey for this request: %x\n",
- reg_result->rkey);
+ trace_qemu_rdma_registration_handle_register_rkey(
+ reg_result->rkey);
result_to_network(reg_result);
}
@@ -3101,12 +3058,12 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
(uint8_t *) results, &reg_resp);
if (ret < 0) {
- fprintf(stderr, "Failed to send control buffer!\n");
+ error_report("Failed to send control buffer");
goto out;
}
break;
case RDMA_CONTROL_UNREGISTER_REQUEST:
- DDPRINTF("There are %d unregistration requests\n", head.repeat);
+ trace_qemu_rdma_registration_handle_unregister(head.repeat);
unreg_resp.repeat = head.repeat;
registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
@@ -3114,9 +3071,8 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
reg = &registers[count];
network_to_register(reg);
- DDPRINTF("Unregistration request (%d): "
- " index %d, chunk %" PRIu64 "\n",
- count, reg->current_index, reg->key.chunk);
+ trace_qemu_rdma_registration_handle_unregister_loop(count,
+ reg->current_index, reg->key.chunk);
block = &(rdma->local_ram_blocks.block[reg->current_index]);
@@ -3131,24 +3087,23 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
rdma->total_registrations--;
- DDPRINTF("Unregistered chunk %" PRIu64 " successfully.\n",
- reg->key.chunk);
+ trace_qemu_rdma_registration_handle_unregister_success(
+ reg->key.chunk);
}
ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp);
if (ret < 0) {
- fprintf(stderr, "Failed to send control buffer!\n");
+ error_report("Failed to send control buffer");
goto out;
}
break;
case RDMA_CONTROL_REGISTER_RESULT:
- fprintf(stderr, "Invalid RESULT message at dest.\n");
+ error_report("Invalid RESULT message at dest.");
ret = -EIO;
goto out;
default:
- fprintf(stderr, "Unknown control message %s\n",
- control_desc[head.type]);
+ error_report("Unknown control message %s", control_desc[head.type]);
ret = -EIO;
goto out;
}
@@ -3168,7 +3123,7 @@ static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
CHECK_ERROR_STATE();
- DDDPRINTF("start section: %" PRIu64 "\n", flags);
+ trace_qemu_rdma_registration_start(flags);
qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
qemu_fflush(f);
@@ -3203,7 +3158,7 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
int reg_result_idx, i, j, nb_remote_blocks;
head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST;
- DPRINTF("Sending registration setup for ram blocks...\n");
+ trace_qemu_rdma_registration_stop_ram();
/*
* Make sure that we parallelize the pinning on both sides.
@@ -3275,7 +3230,7 @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
}
}
- DDDPRINTF("Sending registration finish %" PRIu64 "...\n", flags);
+ trace_qemu_rdma_registration_stop(flags);
head.type = RDMA_CONTROL_REGISTER_FINISHED;
ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL);
@@ -3339,7 +3294,7 @@ static void rdma_accept_incoming_migration(void *opaque)
QEMUFile *f;
Error *local_err = NULL, **errp = &local_err;
- DPRINTF("Accepting rdma connection...\n");
+ trace_qemu_dma_accept_incoming_migration();
ret = qemu_rdma_accept(rdma);
if (ret) {
@@ -3347,7 +3302,7 @@ static void rdma_accept_incoming_migration(void *opaque)
return;
}
- DPRINTF("Accepted migration\n");
+ trace_qemu_dma_accept_incoming_migration_accepted();
f = qemu_fopen_rdma(rdma, "rb");
if (f == NULL) {
@@ -3366,7 +3321,7 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
RDMAContext *rdma;
Error *local_err = NULL;
- DPRINTF("Starting RDMA-based incoming migration\n");
+ trace_rdma_start_incoming_migration();
rdma = qemu_rdma_data_init(host_port, &local_err);
if (rdma == NULL) {
@@ -3379,7 +3334,7 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
goto err;
}
- DPRINTF("qemu_rdma_dest_init success\n");
+ trace_rdma_start_incoming_migration_after_dest_init();
ret = rdma_listen(rdma->listen_id, 5);
@@ -3388,7 +3343,7 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
goto err;
}
- DPRINTF("rdma_listen success\n");
+ trace_rdma_start_incoming_migration_after_rdma_listen();
qemu_set_fd_handler2(rdma->channel->fd, NULL,
rdma_accept_incoming_migration, NULL,
@@ -3419,14 +3374,14 @@ void rdma_start_outgoing_migration(void *opaque,
goto err;
}
- DPRINTF("qemu_rdma_source_init success\n");
+ trace_rdma_start_outgoing_migration_after_rdma_source_init();
ret = qemu_rdma_connect(rdma, &local_err);
if (ret) {
goto err;
}
- DPRINTF("qemu_rdma_source_connect success\n");
+ trace_rdma_start_outgoing_migration_after_rdma_connect();
s->file = qemu_fopen_rdma(rdma, "wb");
migrate_fd_connect(s);
diff --git a/migration/vmstate.c b/migration/vmstate.c
index 3dde574c0f..e5388f0596 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -3,10 +3,12 @@
#include "migration/qemu-file.h"
#include "migration/vmstate.h"
#include "qemu/bitops.h"
+#include "qemu/error-report.h"
#include "trace.h"
+#include "qjson.h"
static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque);
+ void *opaque, QJSON *vmdesc);
static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque);
@@ -72,16 +74,21 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, int version_id)
{
VMStateField *field = vmsd->fields;
- int ret;
+ int ret = 0;
+ trace_vmstate_load_state(vmsd->name, version_id);
if (version_id > vmsd->version_id) {
+ trace_vmstate_load_state_end(vmsd->name, "too new", -EINVAL);
return -EINVAL;
}
if (version_id < vmsd->minimum_version_id) {
if (vmsd->load_state_old &&
version_id >= vmsd->minimum_version_id_old) {
- return vmsd->load_state_old(f, opaque, version_id);
+ ret = vmsd->load_state_old(f, opaque, version_id);
+ trace_vmstate_load_state_end(vmsd->name, "old path", ret);
+ return ret;
}
+ trace_vmstate_load_state_end(vmsd->name, "too old", -EINVAL);
return -EINVAL;
}
if (vmsd->pre_load) {
@@ -91,6 +98,7 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
}
}
while (field->name) {
+ trace_vmstate_load_state_field(vmsd->name, field->name);
if ((field->field_exists &&
field->field_exists(opaque, version_id)) ||
(!field->field_exists &&
@@ -122,8 +130,8 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
}
}
} else if (field->flags & VMS_MUST_EXIST) {
- fprintf(stderr, "Input validation failed: %s/%s\n",
- vmsd->name, field->name);
+ error_report("Input validation failed: %s/%s",
+ vmsd->name, field->name);
return -1;
}
field++;
@@ -133,48 +141,203 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
return ret;
}
if (vmsd->post_load) {
- return vmsd->post_load(opaque, version_id);
+ ret = vmsd->post_load(opaque, version_id);
}
- return 0;
+ trace_vmstate_load_state_end(vmsd->name, "end", ret);
+ return ret;
+}
+
+static int vmfield_name_num(VMStateField *start, VMStateField *search)
+{
+ VMStateField *field;
+ int found = 0;
+
+ for (field = start; field->name; field++) {
+ if (!strcmp(field->name, search->name)) {
+ if (field == search) {
+ return found;
+ }
+ found++;
+ }
+ }
+
+ return -1;
+}
+
+static bool vmfield_name_is_unique(VMStateField *start, VMStateField *search)
+{
+ VMStateField *field;
+ int found = 0;
+
+ for (field = start; field->name; field++) {
+ if (!strcmp(field->name, search->name)) {
+ found++;
+ /* name found more than once, so it's not unique */
+ if (found > 1) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static const char *vmfield_get_type_name(VMStateField *field)
+{
+ const char *type = "unknown";
+
+ if (field->flags & VMS_STRUCT) {
+ type = "struct";
+ } else if (field->info->name) {
+ type = field->info->name;
+ }
+
+ return type;
+}
+
+static bool vmsd_can_compress(VMStateField *field)
+{
+ if (field->field_exists) {
+ /* Dynamically existing fields mess up compression */
+ return false;
+ }
+
+ if (field->flags & VMS_STRUCT) {
+ VMStateField *sfield = field->vmsd->fields;
+ while (sfield->name) {
+ if (!vmsd_can_compress(sfield)) {
+ /* Child elements can't compress, so can't we */
+ return false;
+ }
+ sfield++;
+ }
+
+ if (field->vmsd->subsections) {
+ /* Subsections may come and go, better don't compress */
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void vmsd_desc_field_start(const VMStateDescription *vmsd, QJSON *vmdesc,
+ VMStateField *field, int i, int max)
+{
+ char *name, *old_name;
+ bool is_array = max > 1;
+ bool can_compress = vmsd_can_compress(field);
+
+ if (!vmdesc) {
+ return;
+ }
+
+ name = g_strdup(field->name);
+
+ /* Field name is not unique, need to make it unique */
+ if (!vmfield_name_is_unique(vmsd->fields, field)) {
+ int num = vmfield_name_num(vmsd->fields, field);
+ old_name = name;
+ name = g_strdup_printf("%s[%d]", name, num);
+ g_free(old_name);
+ }
+
+ json_start_object(vmdesc, NULL);
+ json_prop_str(vmdesc, "name", name);
+ if (is_array) {
+ if (can_compress) {
+ json_prop_int(vmdesc, "array_len", max);
+ } else {
+ json_prop_int(vmdesc, "index", i);
+ }
+ }
+ json_prop_str(vmdesc, "type", vmfield_get_type_name(field));
+
+ if (field->flags & VMS_STRUCT) {
+ json_start_object(vmdesc, "struct");
+ }
+
+ g_free(name);
+}
+
+static void vmsd_desc_field_end(const VMStateDescription *vmsd, QJSON *vmdesc,
+ VMStateField *field, size_t size, int i)
+{
+ if (!vmdesc) {
+ return;
+ }
+
+ if (field->flags & VMS_STRUCT) {
+ /* We printed a struct in between, close its child object */
+ json_end_object(vmdesc);
+ }
+
+ json_prop_int(vmdesc, "size", size);
+ json_end_object(vmdesc);
}
void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque)
+ void *opaque, QJSON *vmdesc)
{
VMStateField *field = vmsd->fields;
if (vmsd->pre_save) {
vmsd->pre_save(opaque);
}
+
+ if (vmdesc) {
+ json_prop_str(vmdesc, "vmsd_name", vmsd->name);
+ json_prop_int(vmdesc, "version", vmsd->version_id);
+ json_start_array(vmdesc, "fields");
+ }
+
while (field->name) {
if (!field->field_exists ||
field->field_exists(opaque, vmsd->version_id)) {
void *base_addr = vmstate_base_addr(opaque, field, false);
int i, n_elems = vmstate_n_elems(opaque, field);
int size = vmstate_size(opaque, field);
+ int64_t old_offset, written_bytes;
+ QJSON *vmdesc_loop = vmdesc;
for (i = 0; i < n_elems; i++) {
void *addr = base_addr + size * i;
+ vmsd_desc_field_start(vmsd, vmdesc_loop, field, i, n_elems);
+ old_offset = qemu_ftell_fast(f);
+
if (field->flags & VMS_ARRAY_OF_POINTER) {
addr = *(void **)addr;
}
if (field->flags & VMS_STRUCT) {
- vmstate_save_state(f, field->vmsd, addr);
+ vmstate_save_state(f, field->vmsd, addr, vmdesc_loop);
} else {
field->info->put(f, addr, size);
}
+
+ written_bytes = qemu_ftell_fast(f) - old_offset;
+ vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes, i);
+
+ /* Compressed arrays only care about the first element */
+ if (vmdesc_loop && vmsd_can_compress(field)) {
+ vmdesc_loop = NULL;
+ }
}
} else {
if (field->flags & VMS_MUST_EXIST) {
- fprintf(stderr, "Output state validation failed: %s/%s\n",
+ error_report("Output state validation failed: %s/%s",
vmsd->name, field->name);
assert(!(field->flags & VMS_MUST_EXIST));
}
}
field++;
}
- vmstate_subsection_save(f, vmsd, opaque);
+
+ if (vmdesc) {
+ json_end_array(vmdesc);
+ }
+
+ vmstate_subsection_save(f, vmsd, opaque, vmdesc);
}
static const VMStateDescription *
@@ -192,6 +355,8 @@ static const VMStateDescription *
static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque)
{
+ trace_vmstate_subsection_load(vmsd->name);
+
while (qemu_peek_byte(f, 0) == QEMU_VM_SUBSECTION) {
char idstr[256];
int ret;
@@ -201,20 +366,24 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
len = qemu_peek_byte(f, 1);
if (len < strlen(vmsd->name) + 1) {
/* subsection name has be be "section_name/a" */
+ trace_vmstate_subsection_load_bad(vmsd->name, "(short)");
return 0;
}
size = qemu_peek_buffer(f, (uint8_t *)idstr, len, 2);
if (size != len) {
+ trace_vmstate_subsection_load_bad(vmsd->name, "(peek fail)");
return 0;
}
idstr[size] = 0;
if (strncmp(vmsd->name, idstr, strlen(vmsd->name)) != 0) {
+ trace_vmstate_subsection_load_bad(vmsd->name, idstr);
/* it don't have a valid subsection name */
return 0;
}
sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr);
if (sub_vmsd == NULL) {
+ trace_vmstate_subsection_load_bad(vmsd->name, "(lookup)");
return -ENOENT;
}
qemu_file_skip(f, 1); /* subsection */
@@ -224,31 +393,53 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
ret = vmstate_load_state(f, sub_vmsd, opaque, version_id);
if (ret) {
+ trace_vmstate_subsection_load_bad(vmsd->name, "(child)");
return ret;
}
}
+
+ trace_vmstate_subsection_load_good(vmsd->name);
return 0;
}
static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque)
+ void *opaque, QJSON *vmdesc)
{
const VMStateSubsection *sub = vmsd->subsections;
+ bool subsection_found = false;
while (sub && sub->needed) {
if (sub->needed(opaque)) {
const VMStateDescription *vmsd = sub->vmsd;
uint8_t len;
+ if (vmdesc) {
+ /* Only create subsection array when we have any */
+ if (!subsection_found) {
+ json_start_array(vmdesc, "subsections");
+ subsection_found = true;
+ }
+
+ json_start_object(vmdesc, NULL);
+ }
+
qemu_put_byte(f, QEMU_VM_SUBSECTION);
len = strlen(vmsd->name);
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)vmsd->name, len);
qemu_put_be32(f, vmsd->version_id);
- vmstate_save_state(f, vmsd, opaque);
+ vmstate_save_state(f, vmsd, opaque, vmdesc);
+
+ if (vmdesc) {
+ json_end_object(vmdesc);
+ }
}
sub++;
}
+
+ if (vmdesc && subsection_found) {
+ json_end_array(vmdesc);
+ }
}
/* bool */
diff --git a/qjson.c b/qjson.c
new file mode 100644
index 0000000000..b242222a58
--- /dev/null
+++ b/qjson.c
@@ -0,0 +1,129 @@
+/*
+ * QEMU JSON writer
+ *
+ * Copyright Alexander Graf
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include <qapi/qmp/qstring.h>
+#include <stdbool.h>
+#include <glib.h>
+#include <qjson.h>
+#include <qemu/module.h>
+#include <qom/object.h>
+
+struct QJSON {
+ Object obj;
+ QString *str;
+ bool omit_comma;
+};
+
+static void json_emit_element(QJSON *json, const char *name)
+{
+ /* Check whether we need to print a , before an element */
+ if (json->omit_comma) {
+ json->omit_comma = false;
+ } else {
+ qstring_append(json->str, ", ");
+ }
+
+ if (name) {
+ qstring_append(json->str, "\"");
+ qstring_append(json->str, name);
+ qstring_append(json->str, "\" : ");
+ }
+}
+
+void json_start_object(QJSON *json, const char *name)
+{
+ json_emit_element(json, name);
+ qstring_append(json->str, "{ ");
+ json->omit_comma = true;
+}
+
+void json_end_object(QJSON *json)
+{
+ qstring_append(json->str, " }");
+ json->omit_comma = false;
+}
+
+void json_start_array(QJSON *json, const char *name)
+{
+ json_emit_element(json, name);
+ qstring_append(json->str, "[ ");
+ json->omit_comma = true;
+}
+
+void json_end_array(QJSON *json)
+{
+ qstring_append(json->str, " ]");
+ json->omit_comma = false;
+}
+
+void json_prop_int(QJSON *json, const char *name, int64_t val)
+{
+ json_emit_element(json, name);
+ qstring_append_int(json->str, val);
+}
+
+void json_prop_str(QJSON *json, const char *name, const char *str)
+{
+ json_emit_element(json, name);
+ qstring_append_chr(json->str, '"');
+ qstring_append(json->str, str);
+ qstring_append_chr(json->str, '"');
+}
+
+const char *qjson_get_str(QJSON *json)
+{
+ return qstring_get_str(json->str);
+}
+
+QJSON *qjson_new(void)
+{
+ QJSON *json = (QJSON *)object_new(TYPE_QJSON);
+ return json;
+}
+
+void qjson_finish(QJSON *json)
+{
+ json_end_object(json);
+}
+
+static void qjson_initfn(Object *obj)
+{
+ QJSON *json = (QJSON *)object_dynamic_cast(obj, TYPE_QJSON);
+ assert(json);
+
+ json->str = qstring_from_str("{ ");
+ json->omit_comma = true;
+}
+
+static void qjson_finalizefn(Object *obj)
+{
+ QJSON *json = (QJSON *)object_dynamic_cast(obj, TYPE_QJSON);
+
+ assert(json);
+ qobject_decref(QOBJECT(json->str));
+}
+
+static const TypeInfo qjson_type_info = {
+ .name = TYPE_QJSON,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(QJSON),
+ .instance_init = qjson_initfn,
+ .instance_finalize = qjson_finalizefn,
+};
+
+static void qjson_register_types(void)
+{
+ type_register_static(&qjson_type_info);
+}
+
+type_init(qjson_register_types)
diff --git a/savevm.c b/savevm.c
index 08ec678ddc..80407662ad 100644
--- a/savevm.c
+++ b/savevm.c
@@ -572,14 +572,34 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
return vmstate_load_state(f, se->vmsd, se->opaque, version_id);
}
-static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
+static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc)
+{
+ int64_t old_offset, size;
+
+ old_offset = qemu_ftell_fast(f);
+ se->ops->save_state(f, se->opaque);
+ size = qemu_ftell_fast(f) - old_offset;
+
+ if (vmdesc) {
+ json_prop_int(vmdesc, "size", size);
+ json_start_array(vmdesc, "fields");
+ json_start_object(vmdesc, NULL);
+ json_prop_str(vmdesc, "name", "data");
+ json_prop_int(vmdesc, "size", size);
+ json_prop_str(vmdesc, "type", "buffer");
+ json_end_object(vmdesc);
+ json_end_array(vmdesc);
+ }
+}
+
+static void vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc)
{
trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
- if (!se->vmsd) { /* Old style */
- se->ops->save_state(f, se->opaque);
+ if (!se->vmsd) {
+ vmstate_save_old_style(f, se, vmdesc);
return;
}
- vmstate_save_state(f, se->vmsd, se->opaque);
+ vmstate_save_state(f, se->vmsd, se->opaque, vmdesc);
}
bool qemu_savevm_state_blocked(Error **errp)
@@ -674,7 +694,7 @@ int qemu_savevm_state_iterate(QEMUFile *f)
qemu_put_be32(f, se->section_id);
ret = se->ops->save_live_iterate(f, se->opaque);
- trace_savevm_section_end(se->idstr, se->section_id);
+ trace_savevm_section_end(se->idstr, se->section_id, ret);
if (ret < 0) {
qemu_file_set_error(f, ret);
@@ -692,6 +712,8 @@ int qemu_savevm_state_iterate(QEMUFile *f)
void qemu_savevm_state_complete(QEMUFile *f)
{
+ QJSON *vmdesc;
+ int vmdesc_len;
SaveStateEntry *se;
int ret;
@@ -714,13 +736,16 @@ void qemu_savevm_state_complete(QEMUFile *f)
qemu_put_be32(f, se->section_id);
ret = se->ops->save_live_complete(f, se->opaque);
- trace_savevm_section_end(se->idstr, se->section_id);
+ trace_savevm_section_end(se->idstr, se->section_id, ret);
if (ret < 0) {
qemu_file_set_error(f, ret);
return;
}
}
+ vmdesc = qjson_new();
+ json_prop_int(vmdesc, "page_size", TARGET_PAGE_SIZE);
+ json_start_array(vmdesc, "devices");
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
int len;
@@ -728,6 +753,11 @@ void qemu_savevm_state_complete(QEMUFile *f)
continue;
}
trace_savevm_section_start(se->idstr, se->section_id);
+
+ json_start_object(vmdesc, NULL);
+ json_prop_str(vmdesc, "name", se->idstr);
+ json_prop_int(vmdesc, "instance_id", se->instance_id);
+
/* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_FULL);
qemu_put_be32(f, se->section_id);
@@ -740,11 +770,23 @@ void qemu_savevm_state_complete(QEMUFile *f)
qemu_put_be32(f, se->instance_id);
qemu_put_be32(f, se->version_id);
- vmstate_save(f, se);
- trace_savevm_section_end(se->idstr, se->section_id);
+ vmstate_save(f, se, vmdesc);
+
+ json_end_object(vmdesc);
+ trace_savevm_section_end(se->idstr, se->section_id, 0);
}
qemu_put_byte(f, QEMU_VM_EOF);
+
+ json_end_array(vmdesc);
+ qjson_finish(vmdesc);
+ vmdesc_len = strlen(qjson_get_str(vmdesc));
+
+ qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
+ qemu_put_be32(f, vmdesc_len);
+ qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len);
+ object_unref(OBJECT(vmdesc));
+
qemu_fflush(f);
}
@@ -843,7 +885,7 @@ static int qemu_save_device_state(QEMUFile *f)
qemu_put_be32(f, se->instance_id);
qemu_put_be32(f, se->version_id);
- vmstate_save(f, se);
+ vmstate_save(f, se, NULL);
}
qemu_put_byte(f, QEMU_VM_EOF);
@@ -883,25 +925,30 @@ int qemu_loadvm_state(QEMUFile *f)
QLIST_HEAD(, LoadStateEntry) loadvm_handlers =
QLIST_HEAD_INITIALIZER(loadvm_handlers);
LoadStateEntry *le, *new_le;
+ Error *local_err = NULL;
uint8_t section_type;
unsigned int v;
int ret;
- if (qemu_savevm_state_blocked(NULL)) {
+ if (qemu_savevm_state_blocked(&local_err)) {
+ error_report("%s", error_get_pretty(local_err));
+ error_free(local_err);
return -EINVAL;
}
v = qemu_get_be32(f);
if (v != QEMU_VM_FILE_MAGIC) {
+ error_report("Not a migration stream");
return -EINVAL;
}
v = qemu_get_be32(f);
if (v == QEMU_VM_FILE_VERSION_COMPAT) {
- fprintf(stderr, "SaveVM v2 format is obsolete and don't work anymore\n");
+ error_report("SaveVM v2 format is obsolete and don't work anymore");
return -ENOTSUP;
}
if (v != QEMU_VM_FILE_VERSION) {
+ error_report("Unsupported migration stream version");
return -ENOTSUP;
}
@@ -911,6 +958,7 @@ int qemu_loadvm_state(QEMUFile *f)
char idstr[257];
int len;
+ trace_qemu_loadvm_state_section(section_type);
switch (section_type) {
case QEMU_VM_SECTION_START:
case QEMU_VM_SECTION_FULL:
@@ -922,18 +970,21 @@ int qemu_loadvm_state(QEMUFile *f)
instance_id = qemu_get_be32(f);
version_id = qemu_get_be32(f);
+ trace_qemu_loadvm_state_section_startfull(section_id, idstr,
+ instance_id, version_id);
/* Find savevm section */
se = find_se(idstr, instance_id);
if (se == NULL) {
- fprintf(stderr, "Unknown savevm section or instance '%s' %d\n", idstr, instance_id);
+ error_report("Unknown savevm section or instance '%s' %d",
+ idstr, instance_id);
ret = -EINVAL;
goto out;
}
/* Validate version */
if (version_id > se->version_id) {
- fprintf(stderr, "savevm: unsupported version %d for '%s' v%d\n",
- version_id, idstr, se->version_id);
+ error_report("savevm: unsupported version %d for '%s' v%d",
+ version_id, idstr, se->version_id);
ret = -EINVAL;
goto out;
}
@@ -948,8 +999,8 @@ int qemu_loadvm_state(QEMUFile *f)
ret = vmstate_load(f, le->se, le->version_id);
if (ret < 0) {
- fprintf(stderr, "qemu: warning: error while loading state for instance 0x%x of device '%s'\n",
- instance_id, idstr);
+ error_report("error while loading state for instance 0x%x of"
+ " device '%s'", instance_id, idstr);
goto out;
}
break;
@@ -957,26 +1008,27 @@ int qemu_loadvm_state(QEMUFile *f)
case QEMU_VM_SECTION_END:
section_id = qemu_get_be32(f);
+ trace_qemu_loadvm_state_section_partend(section_id);
QLIST_FOREACH(le, &loadvm_handlers, entry) {
if (le->section_id == section_id) {
break;
}
}
if (le == NULL) {
- fprintf(stderr, "Unknown savevm section %d\n", section_id);
+ error_report("Unknown savevm section %d", section_id);
ret = -EINVAL;
goto out;
}
ret = vmstate_load(f, le->se, le->version_id);
if (ret < 0) {
- fprintf(stderr, "qemu: warning: error while loading state section id %d\n",
- section_id);
+ error_report("error while loading state section id %d(%s)",
+ section_id, le->se->idstr);
goto out;
}
break;
default:
- fprintf(stderr, "Unknown savevm section type %d\n", section_type);
+ error_report("Unknown savevm section type %d", section_type);
ret = -EINVAL;
goto out;
}
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
new file mode 100755
index 0000000000..b8b9968e00
--- /dev/null
+++ b/scripts/analyze-migration.py
@@ -0,0 +1,592 @@
+#!/usr/bin/env python
+#
+# Migration Stream Analyzer
+#
+# Copyright (c) 2015 Alexander Graf <agraf@suse.de>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+import numpy as np
+import json
+import os
+import argparse
+import collections
+import pprint
+
+def mkdir_p(path):
+ try:
+ os.makedirs(path)
+ except OSError:
+ pass
+
+class MigrationFile(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self.file = open(self.filename, "rb")
+
+ def read64(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i8')[0])
+
+ def read32(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i4')[0])
+
+ def read16(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i2')[0])
+
+ def read8(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i1')[0])
+
+ def readstr(self, len = None):
+ if len is None:
+ len = self.read8()
+ if len == 0:
+ return ""
+ return np.fromfile(self.file, count=1, dtype=('S%d' % len))[0]
+
+ def readvar(self, size = None):
+ if size is None:
+ size = self.read8()
+ if size == 0:
+ return ""
+ value = self.file.read(size)
+ if len(value) != size:
+ raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
+ return value
+
+ def tell(self):
+ return self.file.tell()
+
+ # The VMSD description is at the end of the file, after EOF. Look for
+ # the last NULL byte, then for the beginning brace of JSON.
+ def read_migration_debug_json(self):
+ QEMU_VM_VMDESCRIPTION = 0x06
+
+ # Remember the offset in the file when we started
+ entrypos = self.file.tell()
+
+ # Read the last 10MB
+ self.file.seek(0, os.SEEK_END)
+ endpos = self.file.tell()
+ self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
+ datapos = self.file.tell()
+ data = self.file.read()
+ # The full file read closed the file as well, reopen it
+ self.file = open(self.filename, "rb")
+
+ # Find the last NULL byte, then the first brace after that. This should
+ # be the beginning of our JSON data.
+ nulpos = data.rfind("\0")
+ jsonpos = data.find("{", nulpos)
+
+ # Check backwards from there and see whether we guessed right
+ self.file.seek(datapos + jsonpos - 5, 0)
+ if self.read8() != QEMU_VM_VMDESCRIPTION:
+ raise Exception("No Debug Migration device found")
+
+ jsonlen = self.read32()
+
+ # Seek back to where we were at the beginning
+ self.file.seek(entrypos, 0)
+
+ return data[jsonpos:jsonpos + jsonlen]
+
+ def close(self):
+ self.file.close()
+
+class RamSection(object):
+ RAM_SAVE_FLAG_COMPRESS = 0x02
+ RAM_SAVE_FLAG_MEM_SIZE = 0x04
+ RAM_SAVE_FLAG_PAGE = 0x08
+ RAM_SAVE_FLAG_EOS = 0x10
+ RAM_SAVE_FLAG_CONTINUE = 0x20
+ RAM_SAVE_FLAG_XBZRLE = 0x40
+ RAM_SAVE_FLAG_HOOK = 0x80
+
+ def __init__(self, file, version_id, ramargs, section_key):
+ if version_id != 4:
+ raise Exception("Unknown RAM version %d" % version_id)
+
+ self.file = file
+ self.section_key = section_key
+ self.TARGET_PAGE_SIZE = ramargs['page_size']
+ self.dump_memory = ramargs['dump_memory']
+ self.write_memory = ramargs['write_memory']
+ self.sizeinfo = collections.OrderedDict()
+ self.data = collections.OrderedDict()
+ self.data['section sizes'] = self.sizeinfo
+ self.name = ''
+ if self.write_memory:
+ self.files = { }
+ if self.dump_memory:
+ self.memory = collections.OrderedDict()
+ self.data['memory'] = self.memory
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def getDict(self):
+ return self.data
+
+ def read(self):
+ # Read all RAM sections
+ while True:
+ addr = self.file.read64()
+ flags = addr & (self.TARGET_PAGE_SIZE - 1)
+ addr &= ~(self.TARGET_PAGE_SIZE - 1)
+
+ if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
+ while True:
+ namelen = self.file.read8()
+ # We assume that no RAM chunk is big enough to ever
+ # hit the first byte of the address, so when we see
+ # a zero here we know it has to be an address, not the
+ # length of the next block.
+ if namelen == 0:
+ self.file.file.seek(-1, 1)
+ break
+ self.name = self.file.readstr(len = namelen)
+ len = self.file.read64()
+ self.sizeinfo[self.name] = '0x%016x' % len
+ if self.write_memory:
+ print self.name
+ mkdir_p('./' + os.path.dirname(self.name))
+ f = open('./' + self.name, "wb")
+ f.truncate(0)
+ f.truncate(len)
+ self.files[self.name] = f
+ flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
+
+ if flags & self.RAM_SAVE_FLAG_COMPRESS:
+ if flags & self.RAM_SAVE_FLAG_CONTINUE:
+ flags &= ~self.RAM_SAVE_FLAG_CONTINUE
+ else:
+ self.name = self.file.readstr()
+ fill_char = self.file.read8()
+ # The page in question is filled with fill_char now
+ if self.write_memory and fill_char != 0:
+ self.files[self.name].seek(addr, os.SEEK_SET)
+ self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
+ if self.dump_memory:
+ self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
+ flags &= ~self.RAM_SAVE_FLAG_COMPRESS
+ elif flags & self.RAM_SAVE_FLAG_PAGE:
+ if flags & self.RAM_SAVE_FLAG_CONTINUE:
+ flags &= ~self.RAM_SAVE_FLAG_CONTINUE
+ else:
+ self.name = self.file.readstr()
+
+ if self.write_memory or self.dump_memory:
+ data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
+ else: # Just skip RAM data
+ self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
+
+ if self.write_memory:
+ self.files[self.name].seek(addr, os.SEEK_SET)
+ self.files[self.name].write(data)
+ if self.dump_memory:
+ hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
+ self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
+
+ flags &= ~self.RAM_SAVE_FLAG_PAGE
+ elif flags & self.RAM_SAVE_FLAG_XBZRLE:
+ raise Exception("XBZRLE RAM compression is not supported yet")
+ elif flags & self.RAM_SAVE_FLAG_HOOK:
+ raise Exception("RAM hooks don't make sense with files")
+
+ # End of RAM section
+ if flags & self.RAM_SAVE_FLAG_EOS:
+ break
+
+ if flags != 0:
+ raise Exception("Unknown RAM flags: %x" % flags)
+
+ def __del__(self):
+ if self.write_memory:
+ for key in self.files:
+ self.files[key].close()
+
+
+class HTABSection(object):
+ HASH_PTE_SIZE_64 = 16
+
+ def __init__(self, file, version_id, device, section_key):
+ if version_id != 1:
+ raise Exception("Unknown HTAB version %d" % version_id)
+
+ self.file = file
+ self.section_key = section_key
+
+ def read(self):
+
+ header = self.file.read32()
+
+ if (header > 0):
+ # First section, just the hash shift
+ return
+
+ # Read until end marker
+ while True:
+ index = self.file.read32()
+ n_valid = self.file.read16()
+ n_invalid = self.file.read16()
+
+ if index == 0 and n_valid == 0 and n_invalid == 0:
+ break
+
+ self.file.readvar(n_valid * HASH_PTE_SIZE_64)
+
+ def getDict(self):
+ return ""
+
+class VMSDFieldGeneric(object):
+ def __init__(self, desc, file):
+ self.file = file
+ self.desc = desc
+ self.data = ""
+
+ def __repr__(self):
+ return str(self.__str__())
+
+ def __str__(self):
+ return " ".join("{0:02x}".format(ord(c)) for c in self.data)
+
+ def getDict(self):
+ return self.__str__()
+
+ def read(self):
+ size = int(self.desc['size'])
+ self.data = self.file.readvar(size)
+ return self.data
+
+class VMSDFieldInt(VMSDFieldGeneric):
+ def __init__(self, desc, file):
+ super(VMSDFieldInt, self).__init__(desc, file)
+ self.size = int(desc['size'])
+ self.format = '0x%%0%dx' % (self.size * 2)
+ self.sdtype = '>i%d' % self.size
+ self.udtype = '>u%d' % self.size
+
+ def __repr__(self):
+ if self.data < 0:
+ return ('%s (%d)' % ((self.format % self.udata), self.data))
+ else:
+ return self.format % self.data
+
+ def __str__(self):
+ return self.__repr__()
+
+ def getDict(self):
+ return self.__str__()
+
+ def read(self):
+ super(VMSDFieldInt, self).read()
+ self.sdata = np.fromstring(self.data, count=1, dtype=(self.sdtype))[0]
+ self.udata = np.fromstring(self.data, count=1, dtype=(self.udtype))[0]
+ self.data = self.sdata
+ return self.data
+
+class VMSDFieldUInt(VMSDFieldInt):
+ def __init__(self, desc, file):
+ super(VMSDFieldUInt, self).__init__(desc, file)
+
+ def read(self):
+ super(VMSDFieldUInt, self).read()
+ self.data = self.udata
+ return self.data
+
+class VMSDFieldIntLE(VMSDFieldInt):
+ def __init__(self, desc, file):
+ super(VMSDFieldIntLE, self).__init__(desc, file)
+ self.dtype = '<i%d' % self.size
+
+class VMSDFieldBool(VMSDFieldGeneric):
+ def __init__(self, desc, file):
+ super(VMSDFieldBool, self).__init__(desc, file)
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def getDict(self):
+ return self.data
+
+ def read(self):
+ super(VMSDFieldBool, self).read()
+ if self.data[0] == 0:
+ self.data = False
+ else:
+ self.data = True
+ return self.data
+
+class VMSDFieldStruct(VMSDFieldGeneric):
+ QEMU_VM_SUBSECTION = 0x05
+
+ def __init__(self, desc, file):
+ super(VMSDFieldStruct, self).__init__(desc, file)
+ self.data = collections.OrderedDict()
+
+ # When we see compressed array elements, unfold them here
+ new_fields = []
+ for field in self.desc['struct']['fields']:
+ if not 'array_len' in field:
+ new_fields.append(field)
+ continue
+ array_len = field.pop('array_len')
+ field['index'] = 0
+ new_fields.append(field)
+ for i in xrange(1, array_len):
+ c = field.copy()
+ c['index'] = i
+ new_fields.append(c)
+
+ self.desc['struct']['fields'] = new_fields
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def read(self):
+ for field in self.desc['struct']['fields']:
+ try:
+ reader = vmsd_field_readers[field['type']]
+ except:
+ reader = VMSDFieldGeneric
+
+ field['data'] = reader(field, self.file)
+ field['data'].read()
+
+ if 'index' in field:
+ if field['name'] not in self.data:
+ self.data[field['name']] = []
+ a = self.data[field['name']]
+ if len(a) != int(field['index']):
+ raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
+ a.append(field['data'])
+ else:
+ self.data[field['name']] = field['data']
+
+ if 'subsections' in self.desc['struct']:
+ for subsection in self.desc['struct']['subsections']:
+ if self.file.read8() != self.QEMU_VM_SUBSECTION:
+ raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
+ name = self.file.readstr()
+ version_id = self.file.read32()
+ self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
+ self.data[name].read()
+
+ def getDictItem(self, value):
+ # Strings would fall into the array category, treat
+ # them specially
+ if value.__class__ is ''.__class__:
+ return value
+
+ try:
+ return self.getDictOrderedDict(value)
+ except:
+ try:
+ return self.getDictArray(value)
+ except:
+ try:
+ return value.getDict()
+ except:
+ return value
+
+ def getDictArray(self, array):
+ r = []
+ for value in array:
+ r.append(self.getDictItem(value))
+ return r
+
+ def getDictOrderedDict(self, dict):
+ r = collections.OrderedDict()
+ for (key, value) in dict.items():
+ r[key] = self.getDictItem(value)
+ return r
+
+ def getDict(self):
+ return self.getDictOrderedDict(self.data)
+
+vmsd_field_readers = {
+ "bool" : VMSDFieldBool,
+ "int8" : VMSDFieldInt,
+ "int16" : VMSDFieldInt,
+ "int32" : VMSDFieldInt,
+ "int32 equal" : VMSDFieldInt,
+ "int32 le" : VMSDFieldIntLE,
+ "int64" : VMSDFieldInt,
+ "uint8" : VMSDFieldUInt,
+ "uint16" : VMSDFieldUInt,
+ "uint32" : VMSDFieldUInt,
+ "uint32 equal" : VMSDFieldUInt,
+ "uint64" : VMSDFieldUInt,
+ "int64 equal" : VMSDFieldInt,
+ "uint8 equal" : VMSDFieldInt,
+ "uint16 equal" : VMSDFieldInt,
+ "float64" : VMSDFieldGeneric,
+ "timer" : VMSDFieldGeneric,
+ "buffer" : VMSDFieldGeneric,
+ "unused_buffer" : VMSDFieldGeneric,
+ "bitmap" : VMSDFieldGeneric,
+ "struct" : VMSDFieldStruct,
+ "unknown" : VMSDFieldGeneric,
+}
+
+class VMSDSection(VMSDFieldStruct):
+ def __init__(self, file, version_id, device, section_key):
+ self.file = file
+ self.data = ""
+ self.vmsd_name = ""
+ self.section_key = section_key
+ desc = device
+ if 'vmsd_name' in device:
+ self.vmsd_name = device['vmsd_name']
+
+ # A section really is nothing but a FieldStruct :)
+ super(VMSDSection, self).__init__({ 'struct' : desc }, file)
+
+###############################################################################
+
+class MigrationDump(object):
+ QEMU_VM_FILE_MAGIC = 0x5145564d
+ QEMU_VM_FILE_VERSION = 0x00000003
+ QEMU_VM_EOF = 0x00
+ QEMU_VM_SECTION_START = 0x01
+ QEMU_VM_SECTION_PART = 0x02
+ QEMU_VM_SECTION_END = 0x03
+ QEMU_VM_SECTION_FULL = 0x04
+ QEMU_VM_SUBSECTION = 0x05
+ QEMU_VM_VMDESCRIPTION = 0x06
+
+ def __init__(self, filename):
+ self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ],
+ ( 'spapr/htab', 0) : ( HTABSection, None ) }
+ self.filename = filename
+ self.vmsd_desc = None
+
+ def read(self, desc_only = False, dump_memory = False, write_memory = False):
+ # Read in the whole file
+ file = MigrationFile(self.filename)
+
+ # File magic
+ data = file.read32()
+ if data != self.QEMU_VM_FILE_MAGIC:
+ raise Exception("Invalid file magic %x" % data)
+
+ # Version (has to be v3)
+ data = file.read32()
+ if data != self.QEMU_VM_FILE_VERSION:
+ raise Exception("Invalid version number %d" % data)
+
+ self.load_vmsd_json(file)
+
+ # Read sections
+ self.sections = collections.OrderedDict()
+
+ if desc_only:
+ return
+
+ ramargs = {}
+ ramargs['page_size'] = self.vmsd_desc['page_size']
+ ramargs['dump_memory'] = dump_memory
+ ramargs['write_memory'] = write_memory
+ self.section_classes[('ram',0)][1] = ramargs
+
+ while True:
+ section_type = file.read8()
+ if section_type == self.QEMU_VM_EOF:
+ break
+ elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
+ section_id = file.read32()
+ name = file.readstr()
+ instance_id = file.read32()
+ version_id = file.read32()
+ section_key = (name, instance_id)
+ classdesc = self.section_classes[section_key]
+ section = classdesc[0](file, version_id, classdesc[1], section_key)
+ self.sections[section_id] = section
+ section.read()
+ elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
+ section_id = file.read32()
+ self.sections[section_id].read()
+ else:
+ raise Exception("Unknown section type: %d" % section_type)
+ file.close()
+
+ def load_vmsd_json(self, file):
+ vmsd_json = file.read_migration_debug_json()
+ self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
+ for device in self.vmsd_desc['devices']:
+ key = (device['name'], device['instance_id'])
+ value = ( VMSDSection, device )
+ self.section_classes[key] = value
+
+ def getDict(self):
+ r = collections.OrderedDict()
+ for (key, value) in self.sections.items():
+ key = "%s (%d)" % ( value.section_key[0], key )
+ r[key] = value.getDict()
+ return r
+
+###############################################################################
+
+class JSONEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, VMSDFieldGeneric):
+ return str(o)
+ return json.JSONEncoder.default(self, o)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
+parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
+parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
+parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
+args = parser.parse_args()
+
+jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
+
+if args.extract:
+ dump = MigrationDump(args.file)
+
+ dump.read(desc_only = True)
+ print "desc.json"
+ f = open("desc.json", "wb")
+ f.truncate()
+ f.write(jsonenc.encode(dump.vmsd_desc))
+ f.close()
+
+ dump.read(write_memory = True)
+ dict = dump.getDict()
+ print "state.json"
+ f = open("state.json", "wb")
+ f.truncate()
+ f.write(jsonenc.encode(dict))
+ f.close()
+elif args.dump == "state":
+ dump = MigrationDump(args.file)
+ dump.read(dump_memory = args.memory)
+ dict = dump.getDict()
+ print jsonenc.encode(dict)
+elif args.dump == "desc":
+ dump = MigrationDump(args.file)
+ dump.read(desc_only = True)
+ print jsonenc.encode(dump.vmsd_desc)
+else:
+ raise Exception("Please specify either -x, -d state or -d dump")
diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py
index f7ce3fc483..b6c0bbead9 100755
--- a/scripts/vmstate-static-checker.py
+++ b/scripts/vmstate-static-checker.py
@@ -53,6 +53,8 @@ def check_fields_match(name, s_field, d_field):
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
+ 'cirrus_vga': ['hw_cursor_x', 'vga.hw_cursor_x',
+ 'hw_cursor_y', 'vga.hw_cursor_y'],
'lsiscsi': ['dev', 'parent_obj'],
'mch': ['d', 'parent_obj'],
'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'],
diff --git a/tests/Makefile b/tests/Makefile
index db5b3c3df1..5caccf765a 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -266,7 +266,8 @@ tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
libqemuutil.a libqemustub.a
tests/test-vmstate$(EXESUF): tests/test-vmstate.o \
migration/vmstate.o migration/qemu-file.o migration/qemu-file-buf.o \
- migration/qemu-file-unix.o \
+ migration/qemu-file-unix.o qjson.o \
+ $(qom-core-obj) \
libqemuutil.a libqemustub.a
tests/test-qapi-types.c tests/test-qapi-types.h :\
diff --git a/tests/test-vmstate.c b/tests/test-vmstate.c
index 39b7b01734..1d620e04fb 100644
--- a/tests/test-vmstate.c
+++ b/tests/test-vmstate.c
@@ -85,7 +85,7 @@ static void save_vmstate(const VMStateDescription *desc, void *obj)
QEMUFile *f = open_test_file(true);
/* Save file with vmstate */
- vmstate_save_state(f, desc, obj);
+ vmstate_save_state(f, desc, obj, NULL);
qemu_put_byte(f, QEMU_VM_EOF);
g_assert(!qemu_file_get_error(f));
qemu_fclose(f);
@@ -394,7 +394,7 @@ static void test_save_noskip(void)
QEMUFile *fsave = qemu_bufopen("w", NULL);
TestStruct obj = { .a = 1, .b = 2, .c = 3, .d = 4, .e = 5, .f = 6,
.skip_c_e = false };
- vmstate_save_state(fsave, &vmstate_skipping, &obj);
+ vmstate_save_state(fsave, &vmstate_skipping, &obj, NULL);
g_assert(!qemu_file_get_error(fsave));
uint8_t expected[] = {
@@ -414,7 +414,7 @@ static void test_save_skip(void)
QEMUFile *fsave = qemu_bufopen("w", NULL);
TestStruct obj = { .a = 1, .b = 2, .c = 3, .d = 4, .e = 5, .f = 6,
.skip_c_e = true };
- vmstate_save_state(fsave, &vmstate_skipping, &obj);
+ vmstate_save_state(fsave, &vmstate_skipping, &obj, NULL);
g_assert(!qemu_file_get_error(fsave));
uint8_t expected[] = {
diff --git a/trace-events b/trace-events
index 04f5df2526..907da7ed8a 100644
--- a/trace-events
+++ b/trace-events
@@ -1142,8 +1142,11 @@ vmware_scratch_write(uint32_t index, uint32_t value) "index %d, value 0x%x"
vmware_setmode(uint32_t w, uint32_t h, uint32_t bpp) "%dx%d @ %d bpp"
# savevm.c
+qemu_loadvm_state_section(unsigned int section_type) "%d"
+qemu_loadvm_state_section_partend(uint32_t section_id) "%u"
+qemu_loadvm_state_section_startfull(uint32_t section_id, const char *idstr, uint32_t instance_id, uint32_t version_id) "%u(%s) %u %u"
savevm_section_start(const char *id, unsigned int section_id) "%s, section_id %u"
-savevm_section_end(const char *id, unsigned int section_id) "%s, section_id %u"
+savevm_section_end(const char *id, unsigned int section_id, int ret) "%s, section_id %u -> %d"
savevm_state_begin(void) ""
savevm_state_iterate(void) ""
savevm_state_complete(void) ""
@@ -1154,6 +1157,12 @@ qemu_announce_self_iter(const char *mac) "%s"
# vmstate.c
vmstate_load_field_error(const char *field, int ret) "field \"%s\" load failed, ret = %d"
+vmstate_load_state(const char *name, int version_id) "%s v%d"
+vmstate_load_state_end(const char *name, const char *reason, int val) "%s %s/%d"
+vmstate_load_state_field(const char *name, const char *field) "%s:%s"
+vmstate_subsection_load(const char *parent) "%s"
+vmstate_subsection_load_bad(const char *parent, const char *sub) "%s: %s"
+vmstate_subsection_load_good(const char *parent) "%s"
# qemu-file.c
qemu_file_fclose(void) ""
@@ -1326,6 +1335,68 @@ migrate_fd_cancel(void) ""
migrate_pending(uint64_t size, uint64_t max) "pending size %" PRIu64 " max %" PRIu64
migrate_transferred(uint64_t tranferred, uint64_t time_spent, double bandwidth, uint64_t size) "transferred %" PRIu64 " time_spent %" PRIu64 " bandwidth %g max_size %" PRId64
+# migration/rdma.c
+__qemu_rdma_add_block(int block, uint64_t addr, uint64_t offset, uint64_t len, uint64_t end, uint64_t bits, int chunks) "Added Block: %d, addr: %" PRIu64 ", offset: %" PRIu64 " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d"
+__qemu_rdma_delete_block(int block, uint64_t addr, uint64_t offset, uint64_t len, uint64_t end, uint64_t bits, int chunks) "Deleted Block: %d, addr: %" PRIu64 ", offset: %" PRIu64 " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d"
+qemu_dma_accept_incoming_migration(void) ""
+qemu_dma_accept_incoming_migration_accepted(void) ""
+qemu_rdma_accept_pin_state(bool pin) "%d"
+qemu_rdma_accept_pin_verbsc(void *verbs) "Verbs context after listen: %p"
+qemu_rdma_block_for_wrid_miss(const char *wcompstr, int wcomp, const char *gcompstr, uint64_t req) "A Wanted wrid %s (%d) but got %s (%" PRIu64 ")"
+qemu_rdma_block_for_wrid_miss_b(const char *wcompstr, int wcomp, const char *gcompstr, uint64_t req) "B Wanted wrid %s (%d) but got %s (%" PRIu64 ")"
+qemu_rdma_cleanup_disconnect(void) ""
+qemu_rdma_cleanup_waiting_for_disconnect(void) ""
+qemu_rdma_close(void) ""
+qemu_rdma_connect_pin_all_requested(void) ""
+qemu_rdma_connect_pin_all_outcome(bool pin) "%d"
+qemu_rdma_dest_init_trying(const char *host, const char *ip) "%s => %s"
+qemu_rdma_dump_gid(const char *who, const char *src, const char *dst) "%s Source GID: %s, Dest GID: %s"
+qemu_rdma_exchange_get_response_start(const char *desc) "CONTROL: %s receiving..."
+qemu_rdma_exchange_get_response_none(const char *desc, int type) "Surprise: got %s (%d)"
+qemu_rdma_exchange_send_issue_callback(void) ""
+qemu_rdma_exchange_send_waiting(const char *desc) "Waiting for response %s"
+qemu_rdma_exchange_send_received(const char *desc) "Response %s received."
+qemu_rdma_fill(int64_t control_len, int size) "RDMA %" PRId64 " of %d bytes already in buffer"
+qemu_rdma_init_ram_blocks(int blocks) "Allocated %d local ram block structures"
+qemu_rdma_poll_recv(const char *compstr, int64_t comp, int64_t id, int sent) "completion %s #%" PRId64 " received (%" PRId64 ") left %d"
+qemu_rdma_poll_write(const char *compstr, int64_t comp, int left, uint64_t block, uint64_t chunk, void *local, void *remote) "completions %s (%" PRId64 ") left %d, block %" PRIu64 ", chunk: %" PRIu64 " %p %p"
+qemu_rdma_poll_other(const char *compstr, int64_t comp, int left) "other completion %s (%" PRId64 ") received left %d"
+qemu_rdma_post_send_control(const char *desc) "CONTROL: sending %s.."
+qemu_rdma_register_and_get_keys(uint64_t len, void *start) "Registering %" PRIu64 " bytes @ %p"
+qemu_rdma_registration_handle_compress(int64_t length, int index, int64_t offset) "Zapping zero chunk: %" PRId64 " bytes, index %d, offset %" PRId64
+qemu_rdma_registration_handle_finished(void) ""
+qemu_rdma_registration_handle_ram_blocks(void) ""
+qemu_rdma_registration_handle_register(int requests) "%d requests"
+qemu_rdma_registration_handle_register_loop(int req, int index, uint64_t addr, uint64_t chunks) "Registration request (%d): index %d, current_addr %" PRIu64 " chunks: %" PRIu64
+qemu_rdma_registration_handle_register_rkey(int rkey) "%x"
+qemu_rdma_registration_handle_unregister(int requests) "%d requests"
+qemu_rdma_registration_handle_unregister_loop(int count, int index, uint64_t chunk) "Unregistration request (%d): index %d, chunk %" PRIu64
+qemu_rdma_registration_handle_unregister_success(uint64_t chunk) "%" PRIu64
+qemu_rdma_registration_handle_wait(uint64_t flags) "Waiting for next request %" PRIu64
+qemu_rdma_registration_start(uint64_t flags) "%" PRIu64
+qemu_rdma_registration_stop(uint64_t flags) "%" PRIu64
+qemu_rdma_registration_stop_ram(void) ""
+qemu_rdma_resolve_host_trying(const char *host, const char *ip) "Trying %s => %s"
+qemu_rdma_signal_unregister_append(uint64_t chunk, int pos) "Appending unregister chunk %" PRIu64 " at position %d"
+qemu_rdma_signal_unregister_already(uint64_t chunk) "Unregister chunk %" PRIu64 " already in queue"
+qemu_rdma_unregister_waiting_inflight(uint64_t chunk) "Cannot unregister inflight chunk: %" PRIu64
+qemu_rdma_unregister_waiting_proc(uint64_t chunk, int pos) "Processing unregister for chunk: %" PRIu64 " at position %d"
+qemu_rdma_unregister_waiting_send(uint64_t chunk) "Sending unregister for chunk: %" PRIu64
+qemu_rdma_unregister_waiting_complete(uint64_t chunk) "Unregister for chunk: %" PRIu64 " complete."
+qemu_rdma_write_flush(int sent) "sent total: %d"
+qemu_rdma_write_one_block(int count, int block, uint64_t chunk, uint64_t current, uint64_t len, int nb_sent, int nb_chunks) "(%d) Not clobbering: block: %d chunk %" PRIu64 " current %" PRIu64 " len %" PRIu64 " %d %d"
+qemu_rdma_write_one_post(uint64_t chunk, long addr, long remote, uint32_t len) "Posting chunk: %" PRIu64 ", addr: %lx remote: %lx, bytes %" PRIu32
+qemu_rdma_write_one_queue_full(void) ""
+qemu_rdma_write_one_recvregres(int mykey, int theirkey, uint64_t chunk) "Received registration result: my key: %x their key %x, chunk %" PRIu64
+qemu_rdma_write_one_sendreg(uint64_t chunk, int len, int index, int64_t offset) "Sending registration request chunk %" PRIu64 " for %d bytes, index: %d, offset: %" PRId64
+qemu_rdma_write_one_top(uint64_t chunks, uint64_t size) "Writing %" PRIu64 " chunks, (%" PRIu64 " MB)"
+qemu_rdma_write_one_zero(uint64_t chunk, int len, int index, int64_t offset) "Entire chunk is zero, sending compress: %" PRIu64 " for %d bytes, index: %d, offset: %" PRId64
+rdma_start_incoming_migration(void) ""
+rdma_start_incoming_migration_after_dest_init(void) ""
+rdma_start_incoming_migration_after_rdma_listen(void) ""
+rdma_start_outgoing_migration_after_rdma_connect(void) ""
+rdma_start_outgoing_migration_after_rdma_source_init(void) ""
+
# kvm-all.c
kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p"