summaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
authorTom Musta <tommusta@gmail.com>2014-02-12 15:23:00 -0600
committerAlexander Graf <agraf@suse.de>2014-03-05 03:06:54 +0100
commitaa9e930c8870d06a20b356785d3ec7d9a942a29f (patch)
tree2bb553f66befa7e3d5809386847b53baf994d967 /target-ppc
parent56eabc750862b985a6ddfc3905b534576eeee33e (diff)
downloadqemu-aa9e930c8870d06a20b356785d3ec7d9a942a29f.tar.gz
target-ppc: Altivec 2.07: Change VMUL_DO to Support 64-bit Integers
This VMUL_DO macro provides support for the various vmule* and vmulo* instructions. These instructions multiply vector elements, producing products that are one size larger; e.g. vmuleub multiplies unsigned 8-bit elements and produces a 16 bit unsigned element. The existing macro works correctly for the existing instructions (8-bit, and 16-bit source elements) but does not work correctly for 32-bit source elements. This patch adds an explicit cast to the multiplicands, forcing them to be of the target element type. This is required for the forthcoming patches that add the vmul[eo][us]w instructions. Signed-off-by: Tom Musta <tommusta@gmail.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/int_helper.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c
index 3e36c0aa3c..20d34e6231 100644
--- a/target-ppc/int_helper.c
+++ b/target-ppc/int_helper.c
@@ -983,28 +983,30 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
}
-#define VMUL_DO(name, mul_element, prod_element, evenp) \
+#define VMUL_DO(name, mul_element, prod_element, cast, evenp) \
void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i; \
\
VECTOR_FOR_INORDER_I(i, prod_element) { \
if (evenp) { \
- r->prod_element[i] = a->mul_element[i * 2 + HI_IDX] * \
- b->mul_element[i * 2 + HI_IDX]; \
+ r->prod_element[i] = \
+ (cast)a->mul_element[i * 2 + HI_IDX] * \
+ (cast)b->mul_element[i * 2 + HI_IDX]; \
} else { \
- r->prod_element[i] = a->mul_element[i * 2 + LO_IDX] * \
- b->mul_element[i * 2 + LO_IDX]; \
+ r->prod_element[i] = \
+ (cast)a->mul_element[i * 2 + LO_IDX] * \
+ (cast)b->mul_element[i * 2 + LO_IDX]; \
} \
} \
}
-#define VMUL(suffix, mul_element, prod_element) \
- VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
- VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
-VMUL(sb, s8, s16)
-VMUL(sh, s16, s32)
-VMUL(ub, u8, u16)
-VMUL(uh, u16, u32)
+#define VMUL(suffix, mul_element, prod_element, cast) \
+ VMUL_DO(mule##suffix, mul_element, prod_element, cast, 1) \
+ VMUL_DO(mulo##suffix, mul_element, prod_element, cast, 0)
+VMUL(sb, s8, s16, int16_t)
+VMUL(sh, s16, s32, int32_t)
+VMUL(ub, u8, u16, uint16_t)
+VMUL(uh, u16, u32, uint32_t)
#undef VMUL_DO
#undef VMUL