summaryrefslogtreecommitdiff
path: root/target-ppc/translate.c
diff options
context:
space:
mode:
authorTom Musta <tommusta@gmail.com>2013-11-01 08:21:18 -0500
committerAlexander Graf <agraf@suse.de>2013-12-20 01:58:05 +0100
commitbe574920b1285c0505ad116795d3a646422a1b8e (patch)
tree127e895ef8801b05270e73cf477fb71c130e3400 /target-ppc/translate.c
parentdf020ce07045413ab3205916a3cde64bb150694c (diff)
downloadqemu-be574920b1285c0505ad116795d3a646422a1b8e.tar.gz
Add VSX Vector Move Instructions
This patch adds the vector move instructions: - xvabsdp - Vector Absolute Value Double-Precision - xvnabsdp - Vector Negative Absolute Value Double-Precision - xvnegdp - Vector Negate Double-Precision - xvcpsgndp - Vector Copy Sign Double-Precision - xvabssp - Vector Absolute Value Single-Precision - xvnabssp - Vector Negative Absolute Value Single-Precision - xvnegsp - Vector Negate Single-Precision - xvcpsgnsp - Vector Copy Sign Single-Precision V3: Per Paolo Bonzini's suggestion, used a temporary for the sign mask and andc. Signed-off-by: Tom Musta <tommusta@gmail.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'target-ppc/translate.c')
-rw-r--r--target-ppc/translate.c71
1 files changed, 71 insertions, 0 deletions
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 446484a5ba..d1e30ff34f 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -7205,6 +7205,69 @@ VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
+#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 xbh, xbl, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xbh = tcg_temp_new(); \
+ xbl = tcg_temp_new(); \
+ sgm = tcg_temp_new(); \
+ tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xbh, xbh, sgm); \
+ tcg_gen_or_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xbh, xbh, sgm); \
+ tcg_gen_xor_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xah = tcg_temp_new(); \
+ TCGv_i64 xal = tcg_temp_new(); \
+ tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_and_i64(xah, xah, sgm); \
+ tcg_gen_and_i64(xal, xal, sgm); \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ tcg_gen_or_i64(xbh, xbh, xah); \
+ tcg_gen_or_i64(xbl, xbl, xal); \
+ tcg_temp_free(xah); \
+ tcg_temp_free(xal); \
+ break; \
+ } \
+ } \
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
+ tcg_temp_free(xbh); \
+ tcg_temp_free(xbl); \
+ tcg_temp_free(sgm); \
+ }
+
+VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+
+
/*** SPE extension ***/
/* Register moves */
@@ -9699,6 +9762,14 @@ GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX),
GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX),
GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX),
+GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX),
GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),
#undef GEN_SPE