summaryrefslogtreecommitdiff
path: root/cpus.c
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2011-08-22 08:24:58 -0500
committerAnthony Liguori <aliguori@us.ibm.com>2011-09-02 10:34:55 -0500
commit75812e92f5533a5b243cc41ea2e2fbac2bbe8837 (patch)
tree848fe9cb11b82145fae05ee05aace4f90a3564af /cpus.c
parent655cb10ed9df20de7a5c1d9f69a797dc65d09907 (diff)
downloadqemu-75812e92f5533a5b243cc41ea2e2fbac2bbe8837.tar.gz
qemu-75812e92f5533a5b243cc41ea2e2fbac2bbe8837.tar.bz2
qemu-75812e92f5533a5b243cc41ea2e2fbac2bbe8837.zip
main: force enabling of I/O thread
Enabling the I/O thread by default seems like an important part of declaring 1.0. Besides allowing true SMP support with KVM, the I/O thread means that the TCG VCPU doesn't have to multiplex itself with the I/O dispatch routines which currently requires a (racey) signal based alarm system. I know there have been concerns about performance. I think so far the ones that have come up (virtio-net) are most likely due to secondary reasons like decreased batching. I think we ought to force enabling I/O thread early in 1.0 development and commit to resolving any lingering issues. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'cpus.c')
-rw-r--r--cpus.c143
1 files changed, 0 insertions, 143 deletions
diff --git a/cpus.c b/cpus.c
index b163efe4a8..54c188cf5c 100644
--- a/cpus.c
+++ b/cpus.c
@@ -173,12 +173,9 @@ static void cpu_handle_guest_debug(CPUState *env)
{
gdb_set_stop_cpu(env);
qemu_system_debug_request();
-#ifdef CONFIG_IOTHREAD
env->stopped = 1;
-#endif
}
-#ifdef CONFIG_IOTHREAD
static void cpu_signal(int sig)
{
if (cpu_single_env) {
@@ -186,7 +183,6 @@ static void cpu_signal(int sig)
}
exit_request = 1;
}
-#endif
#ifdef CONFIG_LINUX
static void sigbus_reraise(void)
@@ -262,12 +258,6 @@ static void qemu_kvm_eat_signals(CPUState *env)
exit(1);
}
} while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
-
-#ifndef CONFIG_IOTHREAD
- if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
- qemu_notify_event();
- }
-#endif
}
#else /* !CONFIG_LINUX */
@@ -390,7 +380,6 @@ static int qemu_signal_init(void)
int sigfd;
sigset_t set;
-#ifdef CONFIG_IOTHREAD
/* SIGUSR2 used by posix-aio-compat.c */
sigemptyset(&set);
sigaddset(&set, SIGUSR2);
@@ -409,18 +398,6 @@ static int qemu_signal_init(void)
sigaddset(&set, SIGIO);
sigaddset(&set, SIGALRM);
sigaddset(&set, SIGBUS);
-#else
- sigemptyset(&set);
- sigaddset(&set, SIGBUS);
- if (kvm_enabled()) {
- /*
- * We need to process timer signals synchronously to avoid a race
- * between exit_request check and KVM vcpu entry.
- */
- sigaddset(&set, SIGIO);
- sigaddset(&set, SIGALRM);
- }
-#endif
pthread_sigmask(SIG_BLOCK, &set, NULL);
sigfd = qemu_signalfd(&set);
@@ -447,7 +424,6 @@ static void qemu_kvm_init_cpu_signals(CPUState *env)
sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL);
-#ifdef CONFIG_IOTHREAD
pthread_sigmask(SIG_BLOCK, NULL, &set);
sigdelset(&set, SIG_IPI);
sigdelset(&set, SIGBUS);
@@ -456,17 +432,7 @@ static void qemu_kvm_init_cpu_signals(CPUState *env)
fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
exit(1);
}
-#else
- sigemptyset(&set);
- sigaddset(&set, SIG_IPI);
- sigaddset(&set, SIGIO);
- sigaddset(&set, SIGALRM);
- pthread_sigmask(SIG_BLOCK, &set, NULL);
- pthread_sigmask(SIG_BLOCK, NULL, &set);
- sigdelset(&set, SIGIO);
- sigdelset(&set, SIGALRM);
-#endif
sigdelset(&set, SIG_IPI);
sigdelset(&set, SIGBUS);
r = kvm_set_signal_mask(env, &set);
@@ -478,7 +444,6 @@ static void qemu_kvm_init_cpu_signals(CPUState *env)
static void qemu_tcg_init_cpu_signals(void)
{
-#ifdef CONFIG_IOTHREAD
sigset_t set;
struct sigaction sigact;
@@ -489,7 +454,6 @@ static void qemu_tcg_init_cpu_signals(void)
sigemptyset(&set);
sigaddset(&set, SIG_IPI);
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
-#endif
}
#else /* _WIN32 */
@@ -535,106 +499,6 @@ static void qemu_tcg_init_cpu_signals(void)
}
#endif /* _WIN32 */
-#ifndef CONFIG_IOTHREAD
-int qemu_init_main_loop(void)
-{
- int ret;
-
- ret = qemu_signal_init();
- if (ret) {
- return ret;
- }
-
- qemu_init_sigbus();
-
- return qemu_event_init();
-}
-
-void qemu_main_loop_start(void)
-{
-}
-
-void qemu_init_vcpu(void *_env)
-{
- CPUState *env = _env;
- int r;
-
- env->nr_cores = smp_cores;
- env->nr_threads = smp_threads;
-
- if (kvm_enabled()) {
- r = kvm_init_vcpu(env);
- if (r < 0) {
- fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
- exit(1);
- }
- qemu_kvm_init_cpu_signals(env);
- } else {
- qemu_tcg_init_cpu_signals();
- }
-}
-
-int qemu_cpu_is_self(void *env)
-{
- return 1;
-}
-
-void run_on_cpu(CPUState *env, void (*func)(void *data), void *data)
-{
- func(data);
-}
-
-void resume_all_vcpus(void)
-{
-}
-
-void pause_all_vcpus(void)
-{
-}
-
-void qemu_cpu_kick(void *env)
-{
-}
-
-void qemu_cpu_kick_self(void)
-{
-#ifndef _WIN32
- assert(cpu_single_env);
-
- raise(SIG_IPI);
-#else
- abort();
-#endif
-}
-
-void qemu_notify_event(void)
-{
- CPUState *env = cpu_single_env;
-
- qemu_event_increment ();
- if (env) {
- cpu_exit(env);
- }
- if (next_cpu && env != next_cpu) {
- cpu_exit(next_cpu);
- }
- exit_request = 1;
-}
-
-void qemu_mutex_lock_iothread(void) {}
-void qemu_mutex_unlock_iothread(void) {}
-
-void cpu_stop_current(void)
-{
-}
-
-void vm_stop(int reason)
-{
- do_vm_stop(reason);
-}
-
-#else /* CONFIG_IOTHREAD */
-
QemuMutex qemu_global_mutex;
static QemuCond qemu_io_proceeded_cond;
static bool iothread_requesting_mutex;
@@ -1028,8 +892,6 @@ void vm_stop(int reason)
do_vm_stop(reason);
}
-#endif
-
static int tcg_cpu_exec(CPUState *env)
{
int ret;
@@ -1084,11 +946,6 @@ bool cpu_exec_all(void)
qemu_clock_enable(vm_clock,
(env->singlestep_enabled & SSTEP_NOTIMER) == 0);
-#ifndef CONFIG_IOTHREAD
- if (qemu_alarm_pending()) {
- break;
- }
-#endif
if (cpu_can_run(env)) {
if (kvm_enabled()) {
r = kvm_cpu_exec(env);