diff options
author | Fam Zheng <famz@redhat.com> | 2015-03-16 17:03:37 +0800 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-04-27 18:24:18 +0200 |
commit | e95205e1f9cd2c4262b7a7b1c992a94512c86d0e (patch) | |
tree | 1c03e53eeb46e2d7c15b9c15a1f1c9414e254e6e /exec.c | |
parent | 33b6c2edf6214f02b9beaea61b169506c01f90aa (diff) | |
download | qemu-e95205e1f9cd2c4262b7a7b1c992a94512c86d0e.tar.gz qemu-e95205e1f9cd2c4262b7a7b1c992a94512c86d0e.tar.bz2 qemu-e95205e1f9cd2c4262b7a7b1c992a94512c86d0e.zip |
dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel
If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:
continue_after_map_failure dma_aio_cancel
------------------------------------------------------------------
aio_bh_new
qemu_bh_delete
qemu_bh_schedule (use after free)
Also, the old code doesn't run the bh in the right AioContext.
Fix both problems by passing a QEMUBH to cpu_register_map_client.
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <1426496617-10702-6-git-send-email-famz@redhat.com>
[Remove unnecessary forward declaration. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 34 |
1 files changed, 21 insertions, 13 deletions
@@ -2479,8 +2479,7 @@ typedef struct { static BounceBuffer bounce; typedef struct MapClient { - void *opaque; - void (*callback)(void *opaque); + QEMUBH *bh; QLIST_ENTRY(MapClient) link; } MapClient; @@ -2488,31 +2487,34 @@ QemuMutex map_client_list_lock; static QLIST_HEAD(map_client_list, MapClient) map_client_list = QLIST_HEAD_INITIALIZER(map_client_list); -static void cpu_unregister_map_client(void *_client); +static void cpu_unregister_map_client_do(MapClient *client) +{ + QLIST_REMOVE(client, link); + g_free(client); +} + static void cpu_notify_map_clients_locked(void) { MapClient *client; while (!QLIST_EMPTY(&map_client_list)) { client = QLIST_FIRST(&map_client_list); - client->callback(client->opaque); - cpu_unregister_map_client(client); + qemu_bh_schedule(client->bh); + cpu_unregister_map_client_do(client); } } -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) +void cpu_register_map_client(QEMUBH *bh) { MapClient *client = g_malloc(sizeof(*client)); qemu_mutex_lock(&map_client_list_lock); - client->opaque = opaque; - client->callback = callback; + client->bh = bh; QLIST_INSERT_HEAD(&map_client_list, client, link); if (!atomic_read(&bounce.in_use)) { cpu_notify_map_clients_locked(); } qemu_mutex_unlock(&map_client_list_lock); - return client; } void cpu_exec_init_all(void) @@ -2523,12 +2525,18 @@ void cpu_exec_init_all(void) qemu_mutex_init(&map_client_list_lock); } -static void cpu_unregister_map_client(void *_client) +void cpu_unregister_map_client(QEMUBH *bh) { - MapClient *client = (MapClient *)_client; + MapClient *client; - QLIST_REMOVE(client, link); - g_free(client); + qemu_mutex_lock(&map_client_list_lock); + QLIST_FOREACH(client, &map_client_list, link) { + if (client->bh == bh) { + cpu_unregister_map_client_do(client); + break; + } + } + qemu_mutex_unlock(&map_client_list_lock); } static void cpu_notify_map_clients(void) |