summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-06-27 14:50:47 +1000
committerAnthony Liguori <aliguori@us.ibm.com>2012-06-27 16:33:26 -0500
commit7a0bac4da9c6a2e36d388412f3b4074b10429e8e (patch)
tree314c04cb25abc8f6144f2f9036f5efbd758c2864
parentedded454067bd9bda8a4a2a3ef54ae19474407da (diff)
downloadqemu-7a0bac4da9c6a2e36d388412f3b4074b10429e8e.tar.gz
Add a memory barrier to DMA functions
The emulated devices can run simultaneously with the guest, so we need to be careful with ordering of load and stores done by them to the guest system memory, which need to be observed in the right order by the guest operating system. This adds a barrier call to the basic DMA read/write ops which is currently implemented as a smp_mb(), but could be later improved for more fine grained control of barriers. Additionally, a _relaxed() variant of the accessors is provided to easily convert devices who would be performance sensitive and negatively impacted by the change. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
-rw-r--r--dma-helpers.c2
-rw-r--r--dma.h53
2 files changed, 53 insertions, 2 deletions
diff --git a/dma-helpers.c b/dma-helpers.c
index 2e09ceb912..35cb500581 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -31,6 +31,8 @@ static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len)
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
{
+ dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE);
+
if (dma_has_iommu(dma)) {
return iommu_dma_memory_set(dma, addr, c, len);
}
diff --git a/dma.h b/dma.h
index f52a656fd5..acacf1bf27 100644
--- a/dma.h
+++ b/dma.h
@@ -13,6 +13,7 @@
#include <stdio.h>
#include "hw/hw.h"
#include "block.h"
+#include "kvm.h"
typedef struct DMAContext DMAContext;
typedef struct ScatterGatherEntry ScatterGatherEntry;
@@ -65,6 +66,31 @@ struct DMAContext {
DMAUnmapFunc *unmap;
};
+static inline void dma_barrier(DMAContext *dma, DMADirection dir)
+{
+ /*
+ * This is called before DMA read and write operations
+ * unless the _relaxed form is used and is responsible
+ * for providing some sane ordering of accesses vs
+ * concurrently running VCPUs.
+ *
+ * Users of map(), unmap() or lower level st/ld_*
+ * operations are responsible for providing their own
+ * ordering via barriers.
+ *
+ * This primitive implementation does a simple smp_mb()
+ * before each operation which provides pretty much full
+ * ordering.
+ *
+ * A smarter implementation can be devised if needed to
+ * use lighter barriers based on the direction of the
+ * transfer, the DMA context, etc...
+ */
+ if (kvm_enabled()) {
+ smp_mb();
+ }
+}
+
static inline bool dma_has_iommu(DMAContext *dma)
{
return !!dma;
@@ -88,8 +114,9 @@ static inline bool dma_memory_valid(DMAContext *dma,
int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
void *buf, dma_addr_t len, DMADirection dir);
-static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
- void *buf, dma_addr_t len, DMADirection dir)
+static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir)
{
if (!dma_has_iommu(dma)) {
/* Fast-path for no IOMMU */
@@ -101,6 +128,28 @@ static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
}
}
+static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+}
+
+static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+{
+ return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
+ DMA_DIRECTION_FROM_DEVICE);
+}
+
+static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir)
+{
+ dma_barrier(dma, dir);
+
+ return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
+}
+
static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
void *buf, dma_addr_t len)
{