diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2013-09-11 16:42:35 +0200 |
---|---|---|
committer | SeokYeon Hwang <syeon.hwang@samsung.com> | 2013-11-07 13:48:37 +0900 |
commit | c3e7987208782b79c91432703a61bdfa53467556 (patch) | |
tree | 7682fd7a54fdfaece6abdcc06661f4e64f4168a2 | |
parent | 8d8f3a817f65bddef628a40045e1ab5333f1e0e9 (diff) | |
download | qemu-c3e7987208782b79c91432703a61bdfa53467556.tar.gz qemu-c3e7987208782b79c91432703a61bdfa53467556.tar.bz2 qemu-c3e7987208782b79c91432703a61bdfa53467556.zip |
coroutine: add ./configure --disable-coroutine-pool
The 'gthread' coroutine backend was written before the freelist (aka
pool) existed in qemu-coroutine.c.
This means that every thread is expected to exit when its coroutine
terminates. It is not possible to reuse threads from a pool.
This patch automatically disables the pool when 'gthread' is used. This
allows the 'gthread' backend to work again (for example,
tests/test-coroutine completes successfully instead of hanging).
I considered implementing thread reuse but I don't want quirks like CPU
affinity differences due to coroutine threads being recycled. The
'gthread' backend is a reference backend and it's therefore okay to skip
the pool optimization.
Note this patch also makes it easy to toggle the pool for benchmarking
purposes:
./configure --with-coroutine-backend=ucontext \
--disable-coroutine-pool
Reported-by: Gabriel Kerneis <gabriel@kerneis.info>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Gabriel Kerneis <gabriel@kerneis.info>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit 70c60c089fdc6bf8a79324e492c13e8c08d55942)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
-rwxr-xr-x | configure | 24 | ||||
-rw-r--r-- | qemu-coroutine.c | 34 |
2 files changed, 43 insertions, 15 deletions
@@ -241,6 +241,7 @@ guest_agent="" want_tools="yes" libiscsi="" coroutine="" +coroutine_pool="" seccomp="" gl="yes" @@ -895,6 +896,10 @@ for opt do ;; --with-coroutine=*) coroutine="$optarg" ;; + --disable-coroutine-pool) coroutine_pool="no" + ;; + --enable-coroutine-pool) coroutine_pool="yes" + ;; --disable-docs) docs="no" ;; --enable-docs) docs="yes" @@ -1218,6 +1223,8 @@ echo " --disable-seccomp disable seccomp support" echo " --enable-seccomp enables seccomp support" echo " --with-coroutine=BACKEND coroutine backend. Supported options:" echo " gthread, ucontext, sigaltstack, windows" +echo " --disable-coroutine-pool disable coroutine freelist (worse performance)" +echo " --enable-coroutine-pool enable coroutine freelist (better performance)" echo " --enable-glusterfs enable GlusterFS backend" echo " --disable-glusterfs disable GlusterFS backend" echo " --enable-gcov enable test coverage analysis with gcov" @@ -3310,6 +3317,17 @@ else esac fi +if test "$coroutine_pool" = ""; then + if test "$coroutine" = "gthread"; then + coroutine_pool=no + else + coroutine_pool=yes + fi +fi +if test "$coroutine" = "gthread" -a "$coroutine_pool" = "yes"; then + error_exit "'gthread' coroutine backend does not support pool (use --disable-coroutine-pool)" +fi + ########################################## # check if we have open_by_handle_at @@ -3682,6 +3700,7 @@ echo "libiscsi support $libiscsi" echo "build guest agent $guest_agent" echo "seccomp support $seccomp" echo "coroutine backend $coroutine" +echo "coroutine pool $coroutine_pool" echo "GlusterFS support $glusterfs" echo "virtio-blk-data-plane $virtio_blk_data_plane" echo "gcov $gcov_tool" @@ -4065,6 +4084,11 @@ if test "$rbd" = "yes" ; then fi echo "CONFIG_COROUTINE_BACKEND=$coroutine" >> $config_host_mak +if test "$coroutine_pool" = "yes" ; then + echo "CONFIG_COROUTINE_POOL=1" >> $config_host_mak +else + echo "CONFIG_COROUTINE_POOL=0" >> $config_host_mak +fi if test "$open_by_handle_at" = "yes" ; then echo "CONFIG_OPEN_BY_HANDLE=y" >> $config_host_mak diff --git a/qemu-coroutine.c b/qemu-coroutine.c index 423430d3a0..470852100a 100644 --- a/qemu-coroutine.c +++ b/qemu-coroutine.c @@ -30,15 +30,17 @@ static unsigned int pool_size; Coroutine *qemu_coroutine_create(CoroutineEntry *entry) { - Coroutine *co; - - qemu_mutex_lock(&pool_lock); - co = QSLIST_FIRST(&pool); - if (co) { - QSLIST_REMOVE_HEAD(&pool, pool_next); - pool_size--; + Coroutine *co = NULL; + + if (CONFIG_COROUTINE_POOL) { + qemu_mutex_lock(&pool_lock); + co = QSLIST_FIRST(&pool); + if (co) { + QSLIST_REMOVE_HEAD(&pool, pool_next); + pool_size--; + } + qemu_mutex_unlock(&pool_lock); } - qemu_mutex_unlock(&pool_lock); if (!co) { co = qemu_coroutine_new(); @@ -51,15 +53,17 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry) static void coroutine_delete(Coroutine *co) { - qemu_mutex_lock(&pool_lock); - if (pool_size < POOL_MAX_SIZE) { - QSLIST_INSERT_HEAD(&pool, co, pool_next); - co->caller = NULL; - pool_size++; + if (CONFIG_COROUTINE_POOL) { + qemu_mutex_lock(&pool_lock); + if (pool_size < POOL_MAX_SIZE) { + QSLIST_INSERT_HEAD(&pool, co, pool_next); + co->caller = NULL; + pool_size++; + qemu_mutex_unlock(&pool_lock); + return; + } qemu_mutex_unlock(&pool_lock); - return; } - qemu_mutex_unlock(&pool_lock); qemu_coroutine_delete(co); } |