summaryrefslogtreecommitdiff
path: root/qemu-barrier.h
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2012-12-17 18:20:00 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2012-12-19 08:32:39 +0100
commit1de7afc984b49af164e2619e6850b9732b173b34 (patch)
tree60cd16f527440fcfcdb81d9bea1af5d9147604c4 /qemu-barrier.h
parent14cccb618508a0aa70eb9ccf366703a019a45ff0 (diff)
downloadqemu-1de7afc984b49af164e2619e6850b9732b173b34.tar.gz
misc: move include files to include/qemu/
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'qemu-barrier.h')
-rw-r--r--qemu-barrier.h67
1 files changed, 0 insertions, 67 deletions
diff --git a/qemu-barrier.h b/qemu-barrier.h
deleted file mode 100644
index faa83d265e..0000000000
--- a/qemu-barrier.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __QEMU_BARRIER_H
-#define __QEMU_BARRIER_H 1
-
-/* Compiler barrier */
-#define barrier() asm volatile("" ::: "memory")
-
-#if defined(__i386__)
-
-#include "compiler.h" /* QEMU_GNUC_PREREQ */
-
-/*
- * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
- * on x86(well, a compiler barrier only). Well, at least as long as
- * qemu doesn't do accesses to write-combining memory or non-temporal
- * load/stores from C code.
- */
-#define smp_wmb() barrier()
-#define smp_rmb() barrier()
-/*
- * We use GCC builtin if it's available, as that can use
- * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
- * However, on i386, there seem to be known bugs as recently as 4.3.
- * */
-#if QEMU_GNUC_PREREQ(4, 4)
-#define smp_mb() __sync_synchronize()
-#else
-#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
-#endif
-
-#elif defined(__x86_64__)
-
-#define smp_wmb() barrier()
-#define smp_rmb() barrier()
-#define smp_mb() asm volatile("mfence" ::: "memory")
-
-#elif defined(_ARCH_PPC)
-
-/*
- * We use an eieio() for wmb() on powerpc. This assumes we don't
- * need to order cacheable and non-cacheable stores with respect to
- * each other
- */
-#define smp_wmb() asm volatile("eieio" ::: "memory")
-
-#if defined(__powerpc64__)
-#define smp_rmb() asm volatile("lwsync" ::: "memory")
-#else
-#define smp_rmb() asm volatile("sync" ::: "memory")
-#endif
-
-#define smp_mb() asm volatile("sync" ::: "memory")
-
-#else
-
-/*
- * For (host) platforms we don't have explicit barrier definitions
- * for, we use the gcc __sync_synchronize() primitive to generate a
- * full barrier. This should be safe on all platforms, though it may
- * be overkill for wmb() and rmb().
- */
-#define smp_wmb() __sync_synchronize()
-#define smp_mb() __sync_synchronize()
-#define smp_rmb() __sync_synchronize()
-
-#endif
-
-#endif