summaryrefslogtreecommitdiff
path: root/hw/ppc
diff options
context:
space:
mode:
authorGreg Kurz <gkurz@linux.vnet.ibm.com>2016-03-11 19:48:47 +0100
committerDavid Gibson <david@gibson.dropbear.id.au>2016-03-16 09:55:06 +1100
commitf1a6cf3ef734aab142d5f7ce52e219474ababf6b (patch)
treef9203f810a454c9c8a74b9262f1d7295e2eb96f2 /hw/ppc
parentc18ad9a54b75495ce61e8b28d353f8eec51768fc (diff)
downloadqemu-f1a6cf3ef734aab142d5f7ce52e219474ababf6b.tar.gz
spapr_rng: fix race with main loop
Since commit "60253ed1e6ec rng: add request queue support to rng-random", the use of a spapr_rng device may hang vCPU threads. The following path is taken without holding the lock to the main loop mutex: h_random() rng_backend_request_entropy() rng_random_request_entropy() qemu_set_fd_handler() The consequence is that entropy_available() may be called before the vCPU thread could even queue the request: depending on the scheduling, it may happen that entropy_available() does not call random_recv()->qemu_sem_post(). The vCPU thread will then sleep forever in h_random()->qemu_sem_wait(). This could not happen before 60253ed1e6ec because entropy_available() used to call random_recv() unconditionally. This patch ensures the lock is held to avoid the race. Signed-off-by: Greg Kurz <gkurz@linux.vnet.ibm.com> Reviewed-by: Cédric Le Goater <clg@fr.ibm.com> Reviewed-by: Thomas Huth <thuth@redhat.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'hw/ppc')
-rw-r--r--hw/ppc/spapr_rng.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c
index a39d472b66..02d6be49f5 100644
--- a/hw/ppc/spapr_rng.c
+++ b/hw/ppc/spapr_rng.c
@@ -77,13 +77,13 @@ static target_ulong h_random(PowerPCCPU *cpu, sPAPRMachineState *spapr,
hrdata.val.v64 = 0;
hrdata.received = 0;
- qemu_mutex_unlock_iothread();
while (hrdata.received < 8) {
rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received,
random_recv, &hrdata);
+ qemu_mutex_unlock_iothread();
qemu_sem_wait(&hrdata.sem);
+ qemu_mutex_lock_iothread();
}
- qemu_mutex_lock_iothread();
qemu_sem_destroy(&hrdata.sem);
args[0] = hrdata.val.v64;