summaryrefslogtreecommitdiff
path: root/target-arm/neon_helper.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2011-04-04 11:46:33 +0100
committerAurelien Jarno <aurelien@aurel32.net>2011-04-04 20:18:06 +0200
commit2a3f75b42ac255be09ec2939b96c549ec830efd3 (patch)
tree817de30f8c8f20ce4248297931c7d6f1b6b5cddc /target-arm/neon_helper.c
parent348883d4828d7434e1053407818598f7fb15e594 (diff)
downloadqemu-2a3f75b42ac255be09ec2939b96c549ec830efd3.tar.gz
target-arm: Use global env in neon_helper.c helpers
Use the global 'env' variable in the helper functions in neon_helper.c. This means we don't need to pass env as an argument to them any more. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Diffstat (limited to 'target-arm/neon_helper.c')
-rw-r--r--target-arm/neon_helper.c144
1 files changed, 70 insertions, 74 deletions
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
index 71f1a7ead0..315e69337e 100644
--- a/target-arm/neon_helper.c
+++ b/target-arm/neon_helper.c
@@ -10,7 +10,7 @@
#include <stdio.h>
#include "cpu.h"
-#include "exec-all.h"
+#include "exec.h"
#include "helpers.h"
#define SIGNBIT (uint32_t)0x80000000
@@ -116,10 +116,6 @@ NEON_TYPE1(u32, uint32_t)
uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
NEON_VOP_BODY(vtype, n)
-#define NEON_VOP_ENV(name, vtype, n) \
-uint32_t HELPER(glue(neon_,name))(CPUState *env, uint32_t arg1, uint32_t arg2) \
-NEON_VOP_BODY(vtype, n)
-
/* Pairwise operations. */
/* For 32-bit elements each segment only contains a single element, so
the elementwise and pairwise operations are the same. */
@@ -168,14 +164,14 @@ uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
dest = tmp; \
}} while(0)
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
-NEON_VOP_ENV(qadd_u8, neon_u8, 4)
+NEON_VOP(qadd_u8, neon_u8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
-NEON_VOP_ENV(qadd_u16, neon_u16, 2)
+NEON_VOP(qadd_u16, neon_u16, 2)
#undef NEON_FN
#undef NEON_USAT
-uint32_t HELPER(neon_qadd_u32)(CPUState *env, uint32_t a, uint32_t b)
+uint32_t HELPER(neon_qadd_u32)(uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (res < a) {
@@ -185,7 +181,7 @@ uint32_t HELPER(neon_qadd_u32)(CPUState *env, uint32_t a, uint32_t b)
return res;
}
-uint64_t HELPER(neon_qadd_u64)(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t HELPER(neon_qadd_u64)(uint64_t src1, uint64_t src2)
{
uint64_t res;
@@ -210,14 +206,14 @@ uint64_t HELPER(neon_qadd_u64)(CPUState *env, uint64_t src1, uint64_t src2)
dest = tmp; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
-NEON_VOP_ENV(qadd_s8, neon_s8, 4)
+NEON_VOP(qadd_s8, neon_s8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
-NEON_VOP_ENV(qadd_s16, neon_s16, 2)
+NEON_VOP(qadd_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_SSAT
-uint32_t HELPER(neon_qadd_s32)(CPUState *env, uint32_t a, uint32_t b)
+uint32_t HELPER(neon_qadd_s32)(uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
@@ -227,7 +223,7 @@ uint32_t HELPER(neon_qadd_s32)(CPUState *env, uint32_t a, uint32_t b)
return res;
}
-uint64_t HELPER(neon_qadd_s64)(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t HELPER(neon_qadd_s64)(uint64_t src1, uint64_t src2)
{
uint64_t res;
@@ -248,14 +244,14 @@ uint64_t HELPER(neon_qadd_s64)(CPUState *env, uint64_t src1, uint64_t src2)
dest = tmp; \
}} while(0)
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
-NEON_VOP_ENV(qsub_u8, neon_u8, 4)
+NEON_VOP(qsub_u8, neon_u8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
-NEON_VOP_ENV(qsub_u16, neon_u16, 2)
+NEON_VOP(qsub_u16, neon_u16, 2)
#undef NEON_FN
#undef NEON_USAT
-uint32_t HELPER(neon_qsub_u32)(CPUState *env, uint32_t a, uint32_t b)
+uint32_t HELPER(neon_qsub_u32)(uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (res > a) {
@@ -265,7 +261,7 @@ uint32_t HELPER(neon_qsub_u32)(CPUState *env, uint32_t a, uint32_t b)
return res;
}
-uint64_t HELPER(neon_qsub_u64)(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t HELPER(neon_qsub_u64)(uint64_t src1, uint64_t src2)
{
uint64_t res;
@@ -291,14 +287,14 @@ uint64_t HELPER(neon_qsub_u64)(CPUState *env, uint64_t src1, uint64_t src2)
dest = tmp; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
-NEON_VOP_ENV(qsub_s8, neon_s8, 4)
+NEON_VOP(qsub_s8, neon_s8, 4)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
-NEON_VOP_ENV(qsub_s16, neon_s16, 2)
+NEON_VOP(qsub_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_SSAT
-uint32_t HELPER(neon_qsub_s32)(CPUState *env, uint32_t a, uint32_t b)
+uint32_t HELPER(neon_qsub_s32)(uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
@@ -308,7 +304,7 @@ uint32_t HELPER(neon_qsub_s32)(CPUState *env, uint32_t a, uint32_t b)
return res;
}
-uint64_t HELPER(neon_qsub_s64)(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t HELPER(neon_qsub_s64)(uint64_t src1, uint64_t src2)
{
uint64_t res;
@@ -659,12 +655,12 @@ uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
dest = ~0; \
} \
}} while (0)
-NEON_VOP_ENV(qshl_u8, neon_u8, 4)
-NEON_VOP_ENV(qshl_u16, neon_u16, 2)
-NEON_VOP_ENV(qshl_u32, neon_u32, 1)
+NEON_VOP(qshl_u8, neon_u8, 4)
+NEON_VOP(qshl_u16, neon_u16, 2)
+NEON_VOP(qshl_u32, neon_u32, 1)
#undef NEON_FN
-uint64_t HELPER(neon_qshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
+uint64_t HELPER(neon_qshl_u64)(uint64_t val, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
if (shift >= 64) {
@@ -714,12 +710,12 @@ uint64_t HELPER(neon_qshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
} \
} \
}} while (0)
-NEON_VOP_ENV(qshl_s8, neon_s8, 4)
-NEON_VOP_ENV(qshl_s16, neon_s16, 2)
-NEON_VOP_ENV(qshl_s32, neon_s32, 1)
+NEON_VOP(qshl_s8, neon_s8, 4)
+NEON_VOP(qshl_s16, neon_s16, 2)
+NEON_VOP(qshl_s32, neon_s32, 1)
#undef NEON_FN
-uint64_t HELPER(neon_qshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
+uint64_t HELPER(neon_qshl_s64)(uint64_t valop, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
int64_t val = valop;
@@ -769,26 +765,26 @@ uint64_t HELPER(neon_qshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
} \
} \
}} while (0)
-NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
-NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
+NEON_VOP(qshlu_s8, neon_u8, 4)
+NEON_VOP(qshlu_s16, neon_u16, 2)
#undef NEON_FN
-uint32_t HELPER(neon_qshlu_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
+uint32_t HELPER(neon_qshlu_s32)(uint32_t valop, uint32_t shiftop)
{
if ((int32_t)valop < 0) {
SET_QC();
return 0;
}
- return helper_neon_qshl_u32(env, valop, shiftop);
+ return helper_neon_qshl_u32(valop, shiftop);
}
-uint64_t HELPER(neon_qshlu_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
+uint64_t HELPER(neon_qshlu_s64)(uint64_t valop, uint64_t shiftop)
{
if ((int64_t)valop < 0) {
SET_QC();
return 0;
}
- return helper_neon_qshl_u64(env, valop, shiftop);
+ return helper_neon_qshl_u64(valop, shiftop);
}
/* FIXME: This is wrong. */
@@ -815,13 +811,13 @@ uint64_t HELPER(neon_qshlu_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
dest = ~0; \
} \
}} while (0)
-NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
-NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
+NEON_VOP(qrshl_u8, neon_u8, 4)
+NEON_VOP(qrshl_u16, neon_u16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
* intermediate 64 bits accumulator. */
-uint32_t HELPER(neon_qrshl_u32)(CPUState *env, uint32_t val, uint32_t shiftop)
+uint32_t HELPER(neon_qrshl_u32)(uint32_t val, uint32_t shiftop)
{
uint32_t dest;
int8_t shift = (int8_t)shiftop;
@@ -851,7 +847,7 @@ uint32_t HELPER(neon_qrshl_u32)(CPUState *env, uint32_t val, uint32_t shiftop)
/* Handling addition overflow with 64 bits inputs values is more
* tricky than with 32 bits values. */
-uint64_t HELPER(neon_qrshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
+uint64_t HELPER(neon_qrshl_u64)(uint64_t val, uint64_t shiftop)
{
int8_t shift = (int8_t)shiftop;
if (shift >= 64) {
@@ -912,13 +908,13 @@ uint64_t HELPER(neon_qrshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
} \
} \
}} while (0)
-NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
-NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
+NEON_VOP(qrshl_s8, neon_s8, 4)
+NEON_VOP(qrshl_s16, neon_s16, 2)
#undef NEON_FN
/* The addition of the rounding constant may overflow, so we use an
* intermediate 64 bits accumulator. */
-uint32_t HELPER(neon_qrshl_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
+uint32_t HELPER(neon_qrshl_s32)(uint32_t valop, uint32_t shiftop)
{
int32_t dest;
int32_t val = (int32_t)valop;
@@ -947,7 +943,7 @@ uint32_t HELPER(neon_qrshl_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
/* Handling addition overflow with 64 bits inputs values is more
* tricky than with 32 bits values. */
-uint64_t HELPER(neon_qrshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
+uint64_t HELPER(neon_qrshl_s64)(uint64_t valop, uint64_t shiftop)
{
int8_t shift = (uint8_t)shiftop;
int64_t val = valop;
@@ -1156,10 +1152,10 @@ uint32_t HELPER(neon_cnt_u8)(uint32_t x)
dest = tmp >> 16; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
-NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
+NEON_VOP(qdmulh_s16, neon_s16, 2)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
-NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
+NEON_VOP(qrdmulh_s16, neon_s16, 2)
#undef NEON_FN
#undef NEON_QDMULH16
@@ -1182,10 +1178,10 @@ NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
dest = tmp >> 32; \
} while(0)
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
-NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
+NEON_VOP(qdmulh_s32, neon_s32, 1)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
-NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
+NEON_VOP(qrdmulh_s32, neon_s32, 1)
#undef NEON_FN
#undef NEON_QDMULH32
@@ -1226,7 +1222,7 @@ uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
}
-uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_unarrow_sat8)(uint64_t x)
{
uint16_t s;
uint8_t d;
@@ -1253,7 +1249,7 @@ uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
return res;
}
-uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_narrow_sat_u8)(uint64_t x)
{
uint16_t s;
uint8_t d;
@@ -1276,7 +1272,7 @@ uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
return res;
}
-uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_narrow_sat_s8)(uint64_t x)
{
int16_t s;
uint8_t d;
@@ -1299,7 +1295,7 @@ uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
return res;
}
-uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_unarrow_sat16)(uint64_t x)
{
uint32_t high;
uint32_t low;
@@ -1322,7 +1318,7 @@ uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
return low | (high << 16);
}
-uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_narrow_sat_u16)(uint64_t x)
{
uint32_t high;
uint32_t low;
@@ -1339,7 +1335,7 @@ uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
return low | (high << 16);
}
-uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_narrow_sat_s16)(uint64_t x)
{
int32_t low;
int32_t high;
@@ -1356,7 +1352,7 @@ uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
return (uint16_t)low | (high << 16);
}
-uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_unarrow_sat32)(uint64_t x)
{
if (x & 0x8000000000000000ull) {
SET_QC();
@@ -1369,7 +1365,7 @@ uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
return x;
}
-uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_narrow_sat_u32)(uint64_t x)
{
if (x > 0xffffffffu) {
SET_QC();
@@ -1378,7 +1374,7 @@ uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
return x;
}
-uint32_t HELPER(neon_narrow_sat_s32)(CPUState *env, uint64_t x)
+uint32_t HELPER(neon_narrow_sat_s32)(uint64_t x)
{
if ((int64_t)x != (int32_t)x) {
SET_QC();
@@ -1485,7 +1481,7 @@ uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
return (a - b) ^ mask;
}
-uint64_t HELPER(neon_addl_saturate_s32)(CPUState *env, uint64_t a, uint64_t b)
+uint64_t HELPER(neon_addl_saturate_s32)(uint64_t a, uint64_t b)
{
uint32_t x, y;
uint32_t low, high;
@@ -1507,7 +1503,7 @@ uint64_t HELPER(neon_addl_saturate_s32)(CPUState *env, uint64_t a, uint64_t b)
return low | ((uint64_t)high << 32);
}
-uint64_t HELPER(neon_addl_saturate_s64)(CPUState *env, uint64_t a, uint64_t b)
+uint64_t HELPER(neon_addl_saturate_s64)(uint64_t a, uint64_t b)
{
uint64_t result;
@@ -1679,7 +1675,7 @@ uint64_t HELPER(neon_negl_u64)(uint64_t x)
} else if (x < 0) { \
x = -x; \
}} while (0)
-uint32_t HELPER(neon_qabs_s8)(CPUState *env, uint32_t x)
+uint32_t HELPER(neon_qabs_s8)(uint32_t x)
{
neon_s8 vec;
NEON_UNPACK(neon_s8, vec, x);
@@ -1699,7 +1695,7 @@ uint32_t HELPER(neon_qabs_s8)(CPUState *env, uint32_t x)
} else { \
x = -x; \
}} while (0)
-uint32_t HELPER(neon_qneg_s8)(CPUState *env, uint32_t x)
+uint32_t HELPER(neon_qneg_s8)(uint32_t x)
{
neon_s8 vec;
NEON_UNPACK(neon_s8, vec, x);
@@ -1719,7 +1715,7 @@ uint32_t HELPER(neon_qneg_s8)(CPUState *env, uint32_t x)
} else if (x < 0) { \
x = -x; \
}} while (0)
-uint32_t HELPER(neon_qabs_s16)(CPUState *env, uint32_t x)
+uint32_t HELPER(neon_qabs_s16)(uint32_t x)
{
neon_s16 vec;
NEON_UNPACK(neon_s16, vec, x);
@@ -1737,7 +1733,7 @@ uint32_t HELPER(neon_qabs_s16)(CPUState *env, uint32_t x)
} else { \
x = -x; \
}} while (0)
-uint32_t HELPER(neon_qneg_s16)(CPUState *env, uint32_t x)
+uint32_t HELPER(neon_qneg_s16)(uint32_t x)
{
neon_s16 vec;
NEON_UNPACK(neon_s16, vec, x);
@@ -1748,7 +1744,7 @@ uint32_t HELPER(neon_qneg_s16)(CPUState *env, uint32_t x)
}
#undef DO_QNEG16
-uint32_t HELPER(neon_qabs_s32)(CPUState *env, uint32_t x)
+uint32_t HELPER(neon_qabs_s32)(uint32_t x)
{
if (x == SIGNBIT) {
SET_QC();
@@ -1759,7 +1755,7 @@ uint32_t HELPER(neon_qabs_s32)(CPUState *env, uint32_t x)
return x;
}
-uint32_t HELPER(neon_qneg_s32)(CPUState *env, uint32_t x)
+uint32_t HELPER(neon_qneg_s32)(uint32_t x)
{
if (x == SIGNBIT) {
SET_QC();
@@ -1842,7 +1838,7 @@ uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
-void HELPER(neon_qunzip8)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_qunzip8)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
@@ -1870,7 +1866,7 @@ void HELPER(neon_qunzip8)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_qunzip16)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_qunzip16)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
@@ -1890,7 +1886,7 @@ void HELPER(neon_qunzip16)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_qunzip32)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_qunzip32)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
@@ -1906,7 +1902,7 @@ void HELPER(neon_qunzip32)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_unzip8)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_unzip8)(uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
@@ -1922,7 +1918,7 @@ void HELPER(neon_unzip8)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd] = make_float64(d0);
}
-void HELPER(neon_unzip16)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_unzip16)(uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
@@ -1934,7 +1930,7 @@ void HELPER(neon_unzip16)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd] = make_float64(d0);
}
-void HELPER(neon_qzip8)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_qzip8)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
@@ -1962,7 +1958,7 @@ void HELPER(neon_qzip8)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_qzip16)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_qzip16)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
@@ -1982,7 +1978,7 @@ void HELPER(neon_qzip16)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_qzip32)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_qzip32)(uint32_t rd, uint32_t rm)
{
uint64_t zm0 = float64_val(env->vfp.regs[rm]);
uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
@@ -1998,7 +1994,7 @@ void HELPER(neon_qzip32)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd + 1] = make_float64(d1);
}
-void HELPER(neon_zip8)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_zip8)(uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);
@@ -2014,7 +2010,7 @@ void HELPER(neon_zip8)(CPUState *env, uint32_t rd, uint32_t rm)
env->vfp.regs[rd] = make_float64(d0);
}
-void HELPER(neon_zip16)(CPUState *env, uint32_t rd, uint32_t rm)
+void HELPER(neon_zip16)(uint32_t rd, uint32_t rm)
{
uint64_t zm = float64_val(env->vfp.regs[rm]);
uint64_t zd = float64_val(env->vfp.regs[rd]);