summaryrefslogtreecommitdiff
path: root/tests/rcutorture.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-03-09 12:12:29 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2018-03-12 16:12:47 +0100
commitb9b758175424857a900c3253ffb8e55fa0a3fdd6 (patch)
treebbd765277510d62f1080278c1dc4b23bb4a37f7a /tests/rcutorture.c
parent148b2ba1145af29a7afac349b8169790e06d6df4 (diff)
downloadqemu-b9b758175424857a900c3253ffb8e55fa0a3fdd6.tar.gz
rcutorture: remove synchronize_rcu from readers
This gives much worse numbers for readers, especially if synchronize_rcu is made more expensive as is the case with --enable-membarrier. Before: $ tests/rcutorture 10 stress 10 n_reads: 98304 n_updates: 529 n_mberror: 0 rcu_stress_count: 98302 2 0 0 0 0 0 0 0 0 0 After: $ tests/rcutorture 10 stress 10 n_reads: 165158482 n_updates: 429 n_mberror: 0 rcu_stress_count: 165154364 4118 0 0 0 0 0 0 0 0 0 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tests/rcutorture.c')
-rw-r--r--tests/rcutorture.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/tests/rcutorture.c b/tests/rcutorture.c
index 4002ecf123..49311c82ea 100644
--- a/tests/rcutorture.c
+++ b/tests/rcutorture.c
@@ -238,7 +238,6 @@ long long rcu_stress_count[RCU_STRESS_PIPE_LEN + 1];
static void *rcu_read_stress_test(void *arg)
{
int i;
- int itercnt = 0;
struct rcu_stress *p;
int pc;
long long n_reads_local = 0;
@@ -269,9 +268,6 @@ static void *rcu_read_stress_test(void *arg)
}
rcu_stress_local[pc]++;
n_reads_local++;
- if ((++itercnt % 0x1000) == 0) {
- synchronize_rcu();
- }
}
qemu_mutex_lock(&counts_mutex);
n_reads += n_reads_local;