diff options
author | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2013-12-03 13:57:02 +0200 |
---|---|---|
committer | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2013-12-03 13:57:02 +0200 |
commit | 80896bc8f5e6ed9a627374e34f040ad5f3617584 (patch) | |
tree | 4025da8b0b8be08e1c479c6f39696805e83353ba /mpi | |
parent | d4ce0cfe0d35d7ec69c115456848b5b735c928ea (diff) | |
download | libgcrypt-80896bc8f5e6ed9a627374e34f040ad5f3617584.tar.gz |
Add aarch64 (arm64) mpi assembly
* mpi/aarch64/mpi-asm-defs.h: New.
* mpi/aarch64/mpih-add1.S: New.
* mpi/aarch64/mpih-mul1.S: New.
* mpi/aarch64/mpih-mul2.S: New.
* mpi/aarch64/mpih-mul3.S: New.
* mpi/aarch64/mpih-sub1.S: New.
* mpi/config.links [host=aarch64-*-*]: Add configguration for aarch64
assembly.
* mpi/longlong.h [__aarch64__] (add_ssaaaa, sub_ddmmss, umul_ppmm)
(count_leading_zeros): New.
--
Add preliminary aarch64 assembly implementations for mpi.
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
Diffstat (limited to 'mpi')
-rw-r--r-- | mpi/aarch64/mpi-asm-defs.h | 4 | ||||
-rw-r--r-- | mpi/aarch64/mpih-add1.S | 71 | ||||
-rw-r--r-- | mpi/aarch64/mpih-mul1.S | 96 | ||||
-rw-r--r-- | mpi/aarch64/mpih-mul2.S | 108 | ||||
-rw-r--r-- | mpi/aarch64/mpih-mul3.S | 121 | ||||
-rw-r--r-- | mpi/aarch64/mpih-sub1.S | 71 | ||||
-rw-r--r-- | mpi/config.links | 5 | ||||
-rw-r--r-- | mpi/longlong.h | 38 |
8 files changed, 514 insertions, 0 deletions
diff --git a/mpi/aarch64/mpi-asm-defs.h b/mpi/aarch64/mpi-asm-defs.h new file mode 100644 index 00000000..65190653 --- /dev/null +++ b/mpi/aarch64/mpi-asm-defs.h @@ -0,0 +1,4 @@ +/* This file defines some basic constants for the MPI machinery. We + * need to define the types on a per-CPU basis, so it is done with + * this file here. */ +#define BYTES_PER_MPI_LIMB (SIZEOF_UNSIGNED_LONG_LONG) diff --git a/mpi/aarch64/mpih-add1.S b/mpi/aarch64/mpih-add1.S new file mode 100644 index 00000000..9f7e2e69 --- /dev/null +++ b/mpi/aarch64/mpih-add1.S @@ -0,0 +1,71 @@ +/* ARM64 add_n -- Add two limb vectors of the same length > 0 and store + * sum in a third limb vector. + * + * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "sysdep.h" +#include "asm-syntax.h" + +/******************* + * mpi_limb_t + * _gcry_mpih_add_n( mpi_ptr_t res_ptr, x0 + * mpi_ptr_t s1_ptr, x1 + * mpi_ptr_t s2_ptr, x2 + * mpi_size_t size) x3 + */ + +.text + +.globl _gcry_mpih_add_n +.type _gcry_mpih_add_n,%function +_gcry_mpih_add_n: + and x5, x3, #3; + adds xzr, xzr, xzr; /* clear carry flag */ + + cbz x5, .Large_loop; + +.Loop: + ldr x4, [x1], #8; + sub x3, x3, #1; + ldr x11, [x2], #8; + and x5, x3, #3; + adcs x4, x4, x11; + str x4, [x0], #8; + cbz x3, .Lend; + cbnz x5, .Loop; + +.Large_loop: + ldp x4, x6, [x1], #16; + ldp x5, x7, [x2], #16; + ldp x8, x10, [x1], #16; + ldp x9, x11, [x2], #16; + sub x3, x3, #4; + adcs x4, x4, x5; + adcs x6, x6, x7; + adcs x8, x8, x9; + adcs x10, x10, x11; + stp x4, x6, [x0], #16; + stp x8, x10, [x0], #16; + cbnz x3, .Large_loop; + +.Lend: + adc x0, xzr, xzr; + ret; +.size _gcry_mpih_add_n,.-_gcry_mpih_add_n; diff --git a/mpi/aarch64/mpih-mul1.S b/mpi/aarch64/mpih-mul1.S new file mode 100644 index 00000000..cbb333ff --- /dev/null +++ b/mpi/aarch64/mpih-mul1.S @@ -0,0 +1,96 @@ +/* ARM64 mul_1 -- Multiply a limb vector with a limb and store the result in + * a second limb vector. + * + * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "sysdep.h" +#include "asm-syntax.h" + +/******************* + * mpi_limb_t + * _gcry_mpih_mul_1( mpi_ptr_t res_ptr, x0 + * mpi_ptr_t s1_ptr, x1 + * mpi_size_t s1_size, x2 + * mpi_limb_t s2_limb) x3 + */ + +.text + +.globl _gcry_mpih_mul_1 +.type _gcry_mpih_mul_1,%function +_gcry_mpih_mul_1: + and x5, x2, #3; + mov x4, xzr; + + cbz x5, .Large_loop; + +.Loop: + ldr x5, [x1], #8; + sub x2, x2, #1; + mul x9, x5, x3; + umulh x10, x5, x3; + and x5, x2, #3; + adds x4, x4, x9; + str x4, [x0], #8; + adc x4, x10, xzr; + + cbz x2, .Lend; + cbnz x5, .Loop; + +.Large_loop: + ldp x5, x6, [x1]; + sub x2, x2, #4; + + mul x9, x5, x3; + ldp x7, x8, [x1, #16]; + umulh x10, x5, x3; + add x1, x1, #32; + + adds x4, x4, x9; + str x4, [x0], #8; + mul x11, x6, x3; + adc x4, x10, xzr; + + umulh x12, x6, x3; + + adds x4, x4, x11; + str x4, [x0], #8; + mul x13, x7, x3; + adc x4, x12, xzr; + + umulh x14, x7, x3; + + adds x4, x4, x13; + str x4, [x0], #8; + mul x15, x8, x3; + adc x4, x14, xzr; + + umulh x16, x8, x3; + + adds x4, x4, x15; + str x4, [x0], #8; + adc x4, x16, xzr; + + cbnz x2, .Large_loop; + +.Lend: + mov x0, x4; + ret; +.size _gcry_mpih_mul_1,.-_gcry_mpih_mul_1; diff --git a/mpi/aarch64/mpih-mul2.S b/mpi/aarch64/mpih-mul2.S new file mode 100644 index 00000000..bfb35718 --- /dev/null +++ b/mpi/aarch64/mpih-mul2.S @@ -0,0 +1,108 @@ +/* ARM64 mul_2 -- Multiply a limb vector with a limb and add the result to + * a second limb vector. + * + * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "sysdep.h" +#include "asm-syntax.h" + +/******************* + * mpi_limb_t + * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, x0 + * mpi_ptr_t s1_ptr, x1 + * mpi_size_t s1_size, x2 + * mpi_limb_t s2_limb) x3 + */ + +.text + +.globl _gcry_mpih_addmul_1 +.type _gcry_mpih_addmul_1,%function +_gcry_mpih_addmul_1: + and x5, x2, #3; + mov x6, xzr; + mov x7, xzr; + + cbz x5, .Large_loop; + +.Loop: + ldr x5, [x1], #8; + + mul x12, x5, x3; + ldr x4, [x0]; + umulh x13, x5, x3; + sub x2, x2, #1; + + adds x12, x12, x4; + and x5, x2, #3; + adc x13, x13, x7; + adds x12, x12, x6; + str x12, [x0], #8; + adc x6, x7, x13; + + cbz x2, .Lend; + cbnz x5, .Loop; + +.Large_loop: + ldp x5, x9, [x1], #16; + sub x2, x2, #4; + ldp x4, x8, [x0]; + + mul x12, x5, x3; + umulh x13, x5, x3; + + adds x12, x12, x4; + mul x14, x9, x3; + adc x13, x13, x7; + adds x12, x12, x6; + umulh x15, x9, x3; + str x12, [x0], #8; + adc x6, x7, x13; + + adds x14, x14, x8; + ldp x5, x9, [x1], #16; + adc x15, x15, x7; + adds x14, x14, x6; + mul x12, x5, x3; + str x14, [x0], #8; + ldp x4, x8, [x0]; + umulh x13, x5, x3; + adc x6, x7, x15; + + adds x12, x12, x4; + mul x14, x9, x3; + adc x13, x13, x7; + adds x12, x12, x6; + umulh x15, x9, x3; + str x12, [x0], #8; + adc x6, x7, x13; + + adds x14, x14, x8; + adc x15, x15, x7; + adds x14, x14, x6; + str x14, [x0], #8; + adc x6, x7, x15; + + cbnz x2, .Large_loop; + +.Lend: + mov x0, x6; + ret; +.size _gcry_mpih_addmul_1,.-_gcry_mpih_addmul_1; diff --git a/mpi/aarch64/mpih-mul3.S b/mpi/aarch64/mpih-mul3.S new file mode 100644 index 00000000..6f12b7b1 --- /dev/null +++ b/mpi/aarch64/mpih-mul3.S @@ -0,0 +1,121 @@ +/* ARM mul_3 -- Multiply a limb vector with a limb and subtract the result + * from a second limb vector. + * + * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "sysdep.h" +#include "asm-syntax.h" + +/******************* + * mpi_limb_t + * _gcry_mpih_submul_1( mpi_ptr_t res_ptr, x0 + * mpi_ptr_t s1_ptr, x1 + * mpi_size_t s1_size, x2 + * mpi_limb_t s2_limb) x3 + */ + +.text + +.globl _gcry_mpih_submul_1 +.type _gcry_mpih_submul_1,%function +_gcry_mpih_submul_1: + and x5, x2, #3; + mov x7, xzr; + cbz x5, .Large_loop; + + subs xzr, xzr, xzr; + +.Loop: + ldr x4, [x1], #8; + cinc x7, x7, cc; + ldr x5, [x0]; + sub x2, x2, #1; + + mul x6, x4, x3; + subs x5, x5, x7; + umulh x4, x4, x3; + and x10, x2, #3; + + cset x7, cc; + subs x5, x5, x6; + add x7, x7, x4; + str x5, [x0], #8; + + cbz x2, .Loop_end; + cbnz x10, .Loop; + + cinc x7, x7, cc; + +.Large_loop: + ldp x4, x8, [x1], #16; + sub x2, x2, #4; + ldp x5, x9, [x0]; + + mul x6, x4, x3; + subs x5, x5, x7; + umulh x4, x4, x3; + + cset x7, cc; + subs x5, x5, x6; + mul x6, x8, x3; + add x7, x7, x4; + str x5, [x0], #8; + cinc x7, x7, cc; + + umulh x8, x8, x3; + + subs x9, x9, x7; + cset x7, cc; + subs x9, x9, x6; + ldp x4, x10, [x1], #16; + str x9, [x0], #8; + add x7, x7, x8; + ldp x5, x9, [x0]; + cinc x7, x7, cc; + + mul x6, x4, x3; + subs x5, x5, x7; + umulh x4, x4, x3; + + cset x7, cc; + subs x5, x5, x6; + mul x6, x10, x3; + add x7, x7, x4; + str x5, [x0], #8; + cinc x7, x7, cc; + + umulh x10, x10, x3; + + subs x9, x9, x7; + cset x7, cc; + subs x9, x9, x6; + add x7, x7, x10; + str x9, [x0], #8; + cinc x7, x7, cc; + + cbnz x2, .Large_loop; + + mov x0, x7; + ret; + +.Loop_end: + cinc x0, x7, cc; + ret; +.size _gcry_mpih_submul_1,.-_gcry_mpih_submul_1; diff --git a/mpi/aarch64/mpih-sub1.S b/mpi/aarch64/mpih-sub1.S new file mode 100644 index 00000000..f18b1cd7 --- /dev/null +++ b/mpi/aarch64/mpih-sub1.S @@ -0,0 +1,71 @@ +/* ARM64 sub_n -- Subtract two limb vectors of the same length > 0 and store + * sum in a third limb vector. + * + * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include "sysdep.h" +#include "asm-syntax.h" + +/******************* + * mpi_limb_t + * _gcry_mpih_sub_n( mpi_ptr_t res_ptr, x0 + * mpi_ptr_t s1_ptr, x1 + * mpi_ptr_t s2_ptr, x2 + * mpi_size_t size) x3 + */ + +.text + +.globl _gcry_mpih_sub_n +.type _gcry_mpih_sub_n,%function +_gcry_mpih_sub_n: + and x5, x3, #3; + subs xzr, xzr, xzr; /* prepare carry flag for sub */ + + cbz x5, .Large_loop; + +.Loop: + ldr x4, [x1], #8; + sub x3, x3, #1; + ldr x11, [x2], #8; + and x5, x3, #3; + sbcs x4, x4, x11; + str x4, [x0], #8; + cbz x3, .Lend; + cbnz x5, .Loop; + +.Large_loop: + ldp x4, x6, [x1], #16; + ldp x5, x7, [x2], #16; + ldp x8, x10, [x1], #16; + ldp x9, x11, [x2], #16; + sub x3, x3, #4; + sbcs x4, x4, x5; + sbcs x6, x6, x7; + sbcs x8, x8, x9; + sbcs x10, x10, x11; + stp x4, x6, [x0], #16; + stp x8, x10, [x0], #16; + cbnz x3, .Large_loop; + +.Lend: + cset x0, cc; + ret; +.size _gcry_mpih_sub_n,.-_gcry_mpih_sub_n; diff --git a/mpi/config.links b/mpi/config.links index 90d10770..a79b03ba 100644 --- a/mpi/config.links +++ b/mpi/config.links @@ -136,6 +136,11 @@ case "${host}" in mpi_extra_modules="udiv-qrnnd" mpi_cpu_arch="alpha" ;; + aarch64-*-*) + echo '/* configured for aarch64 */' >>./mpi/asm-syntax.h + path="aarch64" + mpi_cpu_arch="aarch64" + ;; arm*-*-*) if test "$gcry_cv_gcc_arm_platform_as_ok" = "yes" ; then echo '/* configured for arm */' >>./mpi/asm-syntax.h diff --git a/mpi/longlong.h b/mpi/longlong.h index 146830b0..8dd8fe8d 100644 --- a/mpi/longlong.h +++ b/mpi/longlong.h @@ -268,6 +268,44 @@ extern UDItype __udiv_qrnnd (); #endif /* __arm__ */ /*************************************** + ********** ARM64 / Aarch64 ********** + ***************************************/ +#if defined(__aarch64__) && W_TYPE_SIZE == 64 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("adds %1, %4, %5\n" \ + "adc %0, %2, %3\n" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "r" ((UDItype)(ah)), \ + "r" ((UDItype)(bh)), \ + "r" ((UDItype)(al)), \ + "r" ((UDItype)(bl)) __CLOBBER_CC) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subs %1, %4, %5\n" \ + "sbc %0, %2, %3\n" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "r" ((UDItype)(ah)), \ + "r" ((UDItype)(bh)), \ + "r" ((UDItype)(al)), \ + "r" ((UDItype)(bl)) __CLOBBER_CC) +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + UDItype __m0 = (m0), __m1 = (m1), __ph; \ + (pl) = __m0 * __m1; \ + __asm__ ("umulh %0,%1,%2" \ + : "=r" (__ph) \ + : "r" (__m0), \ + "r" (__m1)); \ + (ph) = __ph; \ + } while (0) +#define count_leading_zeros(count, x) \ + __asm__ ("clz %0, %1\n" \ + : "=r" ((count)) \ + : "r" ((UDItype)(x))) +#endif /* __aarch64__ */ + +/*************************************** ************** CLIPPER ************** ***************************************/ #if defined (__clipper__) && W_TYPE_SIZE == 32 |