summaryrefslogtreecommitdiff
path: root/migration-rdma.c
diff options
context:
space:
mode:
authorChanho Park <chanho61.park@samsung.com>2014-06-26 20:28:10 +0900
committerChanho Park <chanho61.park@samsung.com>2014-07-07 16:25:44 +0900
commita15119db2ff5c2fdfdeb913b297bf8aa3399132e (patch)
tree7d6f779408bb772b11c029ab88000fc01856b599 /migration-rdma.c
parent340f06c9eaee097e626c251bf7a013350649c091 (diff)
downloadqemu-a15119db2ff5c2fdfdeb913b297bf8aa3399132e.tar.gz
qemu-a15119db2ff5c2fdfdeb913b297bf8aa3399132e.tar.bz2
qemu-a15119db2ff5c2fdfdeb913b297bf8aa3399132e.zip
Imported Upstream version 2.0.0upstream/2.0.0
Change-Id: I081766c4314e7893f54fec80b920b1638d15021f
Diffstat (limited to 'migration-rdma.c')
-rw-r--r--migration-rdma.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/migration-rdma.c b/migration-rdma.c
index 3d1266f40..eeb430221 100644
--- a/migration-rdma.c
+++ b/migration-rdma.c
@@ -356,6 +356,7 @@ typedef struct RDMAContext {
*/
struct rdma_cm_id *cm_id; /* connection manager ID */
struct rdma_cm_id *listen_id;
+ bool connected;
struct ibv_context *verbs;
struct rdma_event_channel *channel;
@@ -510,19 +511,21 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
int *resp_idx,
int (*callback)(RDMAContext *rdma));
-static inline uint64_t ram_chunk_index(uint8_t *start, uint8_t *host)
+static inline uint64_t ram_chunk_index(const uint8_t *start,
+ const uint8_t *host)
{
return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT;
}
-static inline uint8_t *ram_chunk_start(RDMALocalBlock *rdma_ram_block,
+static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block,
uint64_t i)
{
return (uint8_t *) (((uintptr_t) rdma_ram_block->local_host_addr)
+ (i << RDMA_REG_CHUNK_SHIFT));
}
-static inline uint8_t *ram_chunk_end(RDMALocalBlock *rdma_ram_block, uint64_t i)
+static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block,
+ uint64_t i)
{
uint8_t *result = ram_chunk_start(rdma_ram_block, i) +
(1UL << RDMA_REG_CHUNK_SHIFT);
@@ -756,7 +759,7 @@ static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
* connections (both IPv4 and IPv6) if the destination machine does not have
* a regular infiniband network available for use.
*
- * The only way to gaurantee that an error is thrown for broken kernels is
+ * The only way to guarantee that an error is thrown for broken kernels is
* for the management software to choose a *specific* interface at bind time
* and validate what time of hardware it is.
*
@@ -778,7 +781,7 @@ static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
* Infiniband.
*
* If we detect that we have a *pure* RoCE environment, then we can safely
- * thrown an error even if the management sofware has specified '[::]' as the
+ * thrown an error even if the management software has specified '[::]' as the
* bind address.
*
* However, if there is are multiple hetergeneous devices, then we cannot make
@@ -801,7 +804,7 @@ static int qemu_rdma_broken_ipv6_kernel(Error **errp, struct ibv_context *verbs)
* devices (non-ethernet).
*
* If not, then we can safely proceed with the migration.
- * Otherwise, there are no gaurantees until the bug is fixed in linux.
+ * Otherwise, there are no guarantees until the bug is fixed in linux.
*/
if (!verbs) {
int num_devices, x;
@@ -920,9 +923,11 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
RDMA_RESOLVE_TIMEOUT_MS);
if (!ret) {
- ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs);
- if (ret) {
- continue;
+ if (e->ai_family == AF_INET6) {
+ ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs);
+ if (ret) {
+ continue;
+ }
}
goto route;
}
@@ -2192,7 +2197,7 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
struct rdma_cm_event *cm_event;
int ret, idx;
- if (rdma->cm_id) {
+ if (rdma->cm_id && rdma->connected) {
if (rdma->error_state) {
RDMAControlHeader head = { .len = 0,
.type = RDMA_CONTROL_ERROR,
@@ -2211,7 +2216,7 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
}
}
DDPRINTF("Disconnected.\n");
- rdma->cm_id = NULL;
+ rdma->connected = false;
}
g_free(rdma->block);
@@ -2233,7 +2238,7 @@ static void qemu_rdma_cleanup(RDMAContext *rdma)
}
if (rdma->qp) {
- ibv_destroy_qp(rdma->qp);
+ rdma_destroy_qp(rdma->cm_id);
rdma->qp = NULL;
}
if (rdma->cq) {
@@ -2370,6 +2375,7 @@ static int qemu_rdma_connect(RDMAContext *rdma, Error **errp)
rdma->cm_id = NULL;
goto err_rdma_source_connect;
}
+ rdma->connected = true;
memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
network_to_caps(&cap);
@@ -2904,6 +2910,7 @@ static int qemu_rdma_accept(RDMAContext *rdma)
}
rdma_ack_cm_event(cm_event);
+ rdma->connected = true;
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
if (ret) {
@@ -3405,7 +3412,7 @@ void rdma_start_outgoing_migration(void *opaque,
}
ret = qemu_rdma_source_init(rdma, &local_err,
- s->enabled_capabilities[MIGRATION_CAPABILITY_X_RDMA_PIN_ALL]);
+ s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]);
if (ret) {
goto err;