From 4a2fdb78e794c1ad93aa9e160235d6a61a2125de Mon Sep 17 00:00:00 2001 From: Alistair Francis Date: Tue, 15 Aug 2017 07:57:14 -0700 Subject: target/arm: Require alignment for load exclusive According to the ARM ARM exclusive loads require the same alignment as exclusive stores. Let's update the memops used for the load to match that of the store. This adds the alignment requirement to the memops. Reviewed-by: Edgar E. Iglesias Signed-off-by: Alistair Francis Signed-off-by: Richard Henderson Message-id: 20170815145714.17635-4-richard.henderson@linaro.org [rth: Require 16-byte alignment for 64-bit LDXP.] Signed-off-by: Richard Henderson Signed-off-by: Peter Maydell --- target/arm/translate-a64.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'target/arm/translate-a64.c') diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index eac545e4f2..2200e25be0 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -1861,7 +1861,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, g_assert(size >= 2); if (size == 2) { /* The pair must be single-copy atomic for the doubleword. */ - memop |= MO_64; + memop |= MO_64 | MO_ALIGN; tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); if (s->be_data == MO_LE) { tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32); @@ -1871,10 +1871,11 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32); } } else { - /* The pair must be single-copy atomic for *each* doubleword, - but not the entire quadword. */ + /* The pair must be single-copy atomic for *each* doubleword, not + the entire quadword, however it must be quadword aligned. */ memop |= MO_64; - tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); + tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, + memop | MO_ALIGN_16); TCGv_i64 addr2 = tcg_temp_new_i64(); tcg_gen_addi_i64(addr2, addr, 8); @@ -1885,7 +1886,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high); } } else { - memop |= size; + memop |= size | MO_ALIGN; tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); } -- cgit v1.2.1