summaryrefslogtreecommitdiff
path: root/target/arm/translate.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2018-01-25 11:45:28 +0000
committerPeter Maydell <peter.maydell@linaro.org>2018-01-25 11:45:28 +0000
commit1a66ac61af45af04688d1d15896737310e366c06 (patch)
tree22288546b6028311bad12269bb49a40515bfa2cb /target/arm/translate.c
parentcf96a682481bbfb1e6b53d2436c3d51563d5dff8 (diff)
downloadqemu-1a66ac61af45af04688d1d15896737310e366c06.tar.gz
target/arm: Use pointers in crypto helpers
Rather than passing regnos to the helpers, pass pointers to the vector registers directly. This eliminates the need to pass in the environment pointer and reduces the number of places that directly access env->vfp.regs[]. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-id: 20180119045438.28582-3-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm/translate.c')
-rw-r--r--target/arm/translate.c68
1 files changed, 38 insertions, 30 deletions
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 781be1e219..7b5db15861 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -1559,6 +1559,13 @@ static inline void neon_store_reg64(TCGv_i64 var, int reg)
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
}
+static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
+{
+ TCGv_ptr ret = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
+ return ret;
+}
+
#define tcg_gen_ld_f32 tcg_gen_ld_i32
#define tcg_gen_ld_f64 tcg_gen_ld_i64
#define tcg_gen_st_f32 tcg_gen_st_i32
@@ -5597,6 +5604,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
int u;
uint32_t imm, mask;
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
+ TCGv_ptr ptr1, ptr2, ptr3;
TCGv_i64 tmp64;
/* FIXME: this access check should not take precedence over UNDEF
@@ -5643,34 +5651,34 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rn);
- tmp3 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rn);
+ ptr3 = vfp_reg_ptr(true, rm);
tmp4 = tcg_const_i32(size);
- gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
+ gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
tcg_temp_free_i32(tmp4);
} else { /* SHA-256 */
if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rn);
- tmp3 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rn);
+ ptr3 = vfp_reg_ptr(true, rm);
switch (size) {
case 0:
- gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
break;
case 1:
- gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
break;
case 2:
- gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
break;
}
}
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(tmp2);
- tcg_temp_free_i32(tmp3);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
+ tcg_temp_free_ptr(ptr3);
return 0;
}
if (size == 3 && op != NEON_3R_LOGIC) {
@@ -7159,8 +7167,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|| ((rm | rd) & 1)) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rm);
/* Bit 6 is the lowest opcode bit; it distinguishes between
* encryption (AESE/AESMC) and decryption (AESD/AESIMC)
@@ -7168,12 +7176,12 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tmp3 = tcg_const_i32(extract32(insn, 6, 1));
if (op == NEON_2RM_AESE) {
- gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_aese(ptr1, ptr2, tmp3);
} else {
- gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
+ gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
}
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
tcg_temp_free_i32(tmp3);
break;
case NEON_2RM_SHA1H:
@@ -7181,13 +7189,13 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
|| ((rm | rd) & 1)) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rm);
- gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
+ gen_helper_crypto_sha1h(ptr1, ptr2);
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
break;
case NEON_2RM_SHA1SU1:
if ((rm | rd) & 1) {
@@ -7201,15 +7209,15 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
} else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
return 1;
}
- tmp = tcg_const_i32(rd);
- tmp2 = tcg_const_i32(rm);
+ ptr1 = vfp_reg_ptr(true, rd);
+ ptr2 = vfp_reg_ptr(true, rm);
if (q) {
- gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
+ gen_helper_crypto_sha256su0(ptr1, ptr2);
} else {
- gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
+ gen_helper_crypto_sha1su1(ptr1, ptr2);
}
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
break;
default:
elementwise: