summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-03-21 16:32:05 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2015-03-25 13:37:10 +0100
commit8a5956ad6392f115521dad774055c737c49fb0dd (patch)
tree7c9b30c6fefc1ee2a6d484b44fa2a863e54ebe44 /tests
parent549c4e49788bbb16bdac3fb5480a2177019899e2 (diff)
downloadqemu-8a5956ad6392f115521dad774055c737c49fb0dd.tar.gz
qemu-8a5956ad6392f115521dad774055c737c49fb0dd.tar.bz2
qemu-8a5956ad6392f115521dad774055c737c49fb0dd.zip
rcu tests: fix compilation on 32-bit ppc
32-bit PPC cannot do atomic operations on long long. Inside the loops, we are already using local counters that are summed at the end of the run---with some exceptions (rcu_stress_count for rcutorture, n_nodes for test-rcu-list): fix them to use the same technique. For test-rcu-list, remove the mostly unused member "val" from the list. Then, use a mutex to protect the global counts. Performance does not matter there because every thread will only enter the critical section once. Remaining uses of atomic instructions are for ints or pointers. Reported-by: Andreas Faerber <afaerber@suse.de> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/rcutorture.c20
-rw-r--r--tests/test-rcu-list.c50
2 files changed, 44 insertions, 26 deletions
diff --git a/tests/rcutorture.c b/tests/rcutorture.c
index 60a2ccfe2e..d6b304d000 100644
--- a/tests/rcutorture.c
+++ b/tests/rcutorture.c
@@ -82,6 +82,7 @@ static volatile int goflag = GOFLAG_INIT;
#define RCU_READ_RUN 1000
#define NR_THREADS 100
+static QemuMutex counts_mutex;
static QemuThread threads[NR_THREADS];
static struct rcu_reader_data *data[NR_THREADS];
static int n_threads;
@@ -130,7 +131,9 @@ static void *rcu_read_perf_test(void *arg)
}
n_reads_local += RCU_READ_RUN;
}
- atomic_add(&n_reads, n_reads_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_reads += n_reads_local;
+ qemu_mutex_unlock(&counts_mutex);
rcu_unregister_thread();
return NULL;
@@ -151,7 +154,9 @@ static void *rcu_update_perf_test(void *arg)
synchronize_rcu();
n_updates_local++;
}
- atomic_add(&n_updates, n_updates_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_updates += n_updates_local;
+ qemu_mutex_unlock(&counts_mutex);
rcu_unregister_thread();
return NULL;
@@ -241,6 +246,7 @@ static void *rcu_read_stress_test(void *arg)
struct rcu_stress *p;
int pc;
long long n_reads_local = 0;
+ long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 };
volatile int garbage = 0;
rcu_register_thread();
@@ -265,13 +271,18 @@ static void *rcu_read_stress_test(void *arg)
if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
pc = RCU_STRESS_PIPE_LEN;
}
- atomic_inc(&rcu_stress_count[pc]);
+ rcu_stress_local[pc]++;
n_reads_local++;
if ((++itercnt % 0x1000) == 0) {
synchronize_rcu();
}
}
- atomic_add(&n_reads, n_reads_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_reads += n_reads_local;
+ for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
+ rcu_stress_count[i] += rcu_stress_local[i];
+ }
+ qemu_mutex_unlock(&counts_mutex);
rcu_unregister_thread();
return NULL;
@@ -419,6 +430,7 @@ int main(int argc, char *argv[])
int nreaders = 1;
int duration = 1;
+ qemu_mutex_init(&counts_mutex);
if (argc >= 2 && argv[1][0] == '-') {
g_test_init(&argc, &argv, NULL);
if (g_test_quick()) {
diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
index 46b5e263e5..4c5f62e996 100644
--- a/tests/test-rcu-list.c
+++ b/tests/test-rcu-list.c
@@ -35,16 +35,15 @@
* Test variables.
*/
-long long n_reads = 0LL;
-long long n_updates = 0LL;
-long long n_reclaims = 0LL;
-long long n_nodes_removed = 0LL;
-long long n_nodes = 0LL;
-int g_test_in_charge = 0;
+static QemuMutex counts_mutex;
+static long long n_reads = 0LL;
+static long long n_updates = 0LL;
+static long long n_reclaims = 0LL;
+static long long n_nodes_removed = 0LL;
+static long long n_nodes = 0LL;
+static int g_test_in_charge = 0;
-int nthreadsrunning;
-
-char argsbuf[64];
+static int nthreadsrunning;
#define GOFLAG_INIT 0
#define GOFLAG_RUN 1
@@ -92,21 +91,21 @@ static void wait_all_threads(void)
struct list_element {
QLIST_ENTRY(list_element) entry;
struct rcu_head rcu;
- long long val;
};
static void reclaim_list_el(struct rcu_head *prcu)
{
struct list_element *el = container_of(prcu, struct list_element, rcu);
g_free(el);
- atomic_add(&n_reclaims, 1);
+ /* Accessed only from call_rcu thread. */
+ n_reclaims++;
}
static QLIST_HEAD(q_list_head, list_element) Q_list_head;
static void *rcu_q_reader(void *arg)
{
- long long j, n_reads_local = 0;
+ long long n_reads_local = 0;
struct list_element *el;
*(struct rcu_reader_data **)arg = &rcu_reader;
@@ -118,8 +117,6 @@ static void *rcu_q_reader(void *arg)
while (goflag == GOFLAG_RUN) {
rcu_read_lock();
QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
- j = atomic_read(&el->val);
- (void)j;
n_reads_local++;
if (goflag == GOFLAG_STOP) {
break;
@@ -129,7 +126,9 @@ static void *rcu_q_reader(void *arg)
g_usleep(100);
}
- atomic_add(&n_reads, n_reads_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_reads += n_reads_local;
+ qemu_mutex_unlock(&counts_mutex);
return NULL;
}
@@ -137,6 +136,7 @@ static void *rcu_q_reader(void *arg)
static void *rcu_q_updater(void *arg)
{
int j, target_el;
+ long long n_nodes_local = 0;
long long n_updates_local = 0;
long long n_removed_local = 0;
struct list_element *el, *prev_el;
@@ -170,8 +170,7 @@ static void *rcu_q_updater(void *arg)
j++;
if (target_el == j) {
prev_el = g_new(struct list_element, 1);
- atomic_add(&n_nodes, 1);
- prev_el->val = atomic_read(&n_nodes);
+ n_nodes += n_nodes_local;
QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
break;
}
@@ -181,8 +180,11 @@ static void *rcu_q_updater(void *arg)
synchronize_rcu();
}
synchronize_rcu();
- atomic_add(&n_updates, n_updates_local);
- atomic_add(&n_nodes_removed, n_removed_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_nodes += n_nodes_local;
+ n_updates += n_updates_local;
+ n_nodes_removed += n_removed_local;
+ qemu_mutex_unlock(&counts_mutex);
return NULL;
}
@@ -194,10 +196,11 @@ static void rcu_qtest_init(void)
srand(time(0));
for (i = 0; i < RCU_Q_LEN; i++) {
new_el = g_new(struct list_element, 1);
- new_el->val = i;
QLIST_INSERT_HEAD_RCU(&Q_list_head, new_el, entry);
}
- atomic_add(&n_nodes, RCU_Q_LEN);
+ qemu_mutex_lock(&counts_mutex);
+ n_nodes += RCU_Q_LEN;
+ qemu_mutex_unlock(&counts_mutex);
}
static void rcu_qtest_run(int duration, int nreaders)
@@ -233,7 +236,9 @@ static void rcu_qtest(const char *test, int duration, int nreaders)
call_rcu1(&prev_el->rcu, reclaim_list_el);
n_removed_local++;
}
- atomic_add(&n_nodes_removed, n_removed_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_nodes_removed += n_removed_local;
+ qemu_mutex_unlock(&counts_mutex);
synchronize_rcu();
while (n_nodes_removed > n_reclaims) {
g_usleep(100);
@@ -277,6 +282,7 @@ int main(int argc, char *argv[])
{
int duration = 0, readers = 0;
+ qemu_mutex_init(&counts_mutex);
if (argc >= 2) {
if (argv[1][0] == '-') {
g_test_init(&argc, &argv, NULL);