diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2021-05-02 16:23:05 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2021-05-02 16:23:05 +0100 |
commit | 15106f7dc3290ff3254611f265849a314a93eb0e (patch) | |
tree | 85d40a24a95c1b70bb20f277b77896971ae47254 /tests | |
parent | 53c5433e84e8935abed8e91d4a2eb813168a0ecf (diff) | |
parent | e628c0156be74dd14a261bbd18674bacd1afcc7d (diff) | |
download | qemu-15106f7dc3290ff3254611f265849a314a93eb0e.zip qemu-15106f7dc3290ff3254611f265849a314a93eb0e.tar.gz qemu-15106f7dc3290ff3254611f265849a314a93eb0e.tar.bz2 |
Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-hex-20210502' into staging
Minor cleanups.
Finish the rest of the hexagon integer instructions.
# gpg: Signature made Sun 02 May 2021 15:38:17 BST
# gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg: issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F
* remotes/rth-gitlab/tags/pull-hex-20210502: (31 commits)
Hexagon (target/hexagon) CABAC decode bin
Hexagon (target/hexagon) load into shifted register instructions
Hexagon (target/hexagon) load and unpack bytes instructions
Hexagon (target/hexagon) bit reverse (brev) addressing
Hexagon (target/hexagon) circular addressing
Hexagon (target/hexagon) add A4_addp_c/A4_subp_c
Hexagon (target/hexagon) add A6_vminub_RdP
Hexagon (target/hexagon) add A5_ACS (vacsh)
Hexagon (target/hexagon) add F2_sfinvsqrta
Hexagon (target/hexagon) add F2_sfrecipa instruction
Hexagon (target/hexagon) compile all debug code
Hexagon (target/hexagon) move QEMU_GENERATE to only be on during macros.h
Hexagon (target/hexagon) cleanup reg_field_info definition
Hexagon (target/hexagon) cleanup ternary operators in semantics
Hexagon (target/hexagon) use softfloat for float-to-int conversions
Hexagon (target/hexagon) replace float32_mul_pow2 with float32_scalbn
Hexagon (target/hexagon) use softfloat default NaN and tininess
Hexagon (target/hexagon) change type of softfloat_roundingmodes
Hexagon (target/hexagon) remove unused carry_from_add64 function
Hexagon (target/hexagon) change variables from int to bool when appropriate
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/tcg/hexagon/Makefile.target | 6 | ||||
-rw-r--r-- | tests/tcg/hexagon/brev.c | 190 | ||||
-rw-r--r-- | tests/tcg/hexagon/circ.c | 486 | ||||
-rw-r--r-- | tests/tcg/hexagon/fpstuff.c | 242 | ||||
-rw-r--r-- | tests/tcg/hexagon/load_align.c | 415 | ||||
-rw-r--r-- | tests/tcg/hexagon/load_unpack.c | 474 | ||||
-rw-r--r-- | tests/tcg/hexagon/misc.c | 47 | ||||
-rw-r--r-- | tests/tcg/hexagon/multi_result.c | 282 |
8 files changed, 2142 insertions, 0 deletions
diff --git a/tests/tcg/hexagon/Makefile.target b/tests/tcg/hexagon/Makefile.target index 616af69..0992787 100644 --- a/tests/tcg/hexagon/Makefile.target +++ b/tests/tcg/hexagon/Makefile.target @@ -28,6 +28,7 @@ endif CFLAGS += -Wno-incompatible-pointer-types -Wno-undefined-internal +CFLAGS += -fno-unroll-loops HEX_SRC=$(SRC_PATH)/tests/tcg/hexagon VPATH += $(HEX_SRC) @@ -39,7 +40,12 @@ HEX_TESTS = first HEX_TESTS += misc HEX_TESTS += preg_alias HEX_TESTS += dual_stores +HEX_TESTS += multi_result HEX_TESTS += mem_noshuf +HEX_TESTS += circ +HEX_TESTS += brev +HEX_TESTS += load_unpack +HEX_TESTS += load_align HEX_TESTS += atomics HEX_TESTS += fpstuff diff --git a/tests/tcg/hexagon/brev.c b/tests/tcg/hexagon/brev.c new file mode 100644 index 0000000..9736a24 --- /dev/null +++ b/tests/tcg/hexagon/brev.c @@ -0,0 +1,190 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdio.h> +#include <string.h> + +int err; + +#define NBITS 8 +#define SIZE (1 << NBITS) + +long long dbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; +int wbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; +short hbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; +unsigned char bbuf[SIZE] __attribute__((aligned(1 << 16))) = {0}; + +/* + * We use the C preporcessor to deal with the combinations of types + */ + +#define BREV_LOAD(SZ, RES, ADDR, INC) \ + __asm__( \ + "m0 = %2\n\t" \ + "%0 = mem" #SZ "(%1++m0:brev)\n\t" \ + : "=r"(RES), "+r"(ADDR) \ + : "r"(INC) \ + : "m0") + +#define BREV_LOAD_b(RES, ADDR, INC) \ + BREV_LOAD(b, RES, ADDR, INC) +#define BREV_LOAD_ub(RES, ADDR, INC) \ + BREV_LOAD(ub, RES, ADDR, INC) +#define BREV_LOAD_h(RES, ADDR, INC) \ + BREV_LOAD(h, RES, ADDR, INC) +#define BREV_LOAD_uh(RES, ADDR, INC) \ + BREV_LOAD(uh, RES, ADDR, INC) +#define BREV_LOAD_w(RES, ADDR, INC) \ + BREV_LOAD(w, RES, ADDR, INC) +#define BREV_LOAD_d(RES, ADDR, INC) \ + BREV_LOAD(d, RES, ADDR, INC) + +#define BREV_STORE(SZ, PART, ADDR, VAL, INC) \ + __asm__( \ + "m0 = %2\n\t" \ + "mem" #SZ "(%0++m0:brev) = %1" PART "\n\t" \ + : "+r"(ADDR) \ + : "r"(VAL), "r"(INC) \ + : "m0", "memory") + +#define BREV_STORE_b(ADDR, VAL, INC) \ + BREV_STORE(b, "", ADDR, VAL, INC) +#define BREV_STORE_h(ADDR, VAL, INC) \ + BREV_STORE(h, "", ADDR, VAL, INC) +#define BREV_STORE_f(ADDR, VAL, INC) \ + BREV_STORE(h, ".H", ADDR, VAL, INC) +#define BREV_STORE_w(ADDR, VAL, INC) \ + BREV_STORE(w, "", ADDR, VAL, INC) +#define BREV_STORE_d(ADDR, VAL, INC) \ + BREV_STORE(d, "", ADDR, VAL, INC) + +#define BREV_STORE_NEW(SZ, ADDR, VAL, INC) \ + __asm__( \ + "m0 = %2\n\t" \ + "{\n\t" \ + " r5 = %1\n\t" \ + " mem" #SZ "(%0++m0:brev) = r5.new\n\t" \ + "}\n\t" \ + : "+r"(ADDR) \ + : "r"(VAL), "r"(INC) \ + : "r5", "m0", "memory") + +#define BREV_STORE_bnew(ADDR, VAL, INC) \ + BREV_STORE_NEW(b, ADDR, VAL, INC) +#define BREV_STORE_hnew(ADDR, VAL, INC) \ + BREV_STORE_NEW(h, ADDR, VAL, INC) +#define BREV_STORE_wnew(ADDR, VAL, INC) \ + BREV_STORE_NEW(w, ADDR, VAL, INC) + +int bitreverse(int x) +{ + int result = 0; + int i; + for (i = 0; i < NBITS; i++) { + result <<= 1; + result |= x & 1; + x >>= 1; + } + return result; +} + +int sext8(int x) +{ + return (x << 24) >> 24; +} + +void check(int i, long long result, long long expect) +{ + if (result != expect) { + printf("ERROR(%d): 0x%04llx != 0x%04llx\n", i, result, expect); + err++; + } +} + +#define TEST_BREV_LOAD(SZ, TYPE, BUF, SHIFT, EXP) \ + do { \ + p = BUF; \ + for (i = 0; i < SIZE; i++) { \ + TYPE result; \ + BREV_LOAD_##SZ(result, p, 1 << (SHIFT - NBITS)); \ + check(i, result, EXP); \ + } \ + } while (0) + +#define TEST_BREV_STORE(SZ, TYPE, BUF, VAL, SHIFT) \ + do { \ + p = BUF; \ + memset(BUF, 0xff, sizeof(BUF)); \ + for (i = 0; i < SIZE; i++) { \ + BREV_STORE_##SZ(p, (TYPE)(VAL), 1 << (SHIFT - NBITS)); \ + } \ + for (i = 0; i < SIZE; i++) { \ + check(i, BUF[i], bitreverse(i)); \ + } \ + } while (0) + +#define TEST_BREV_STORE_NEW(SZ, BUF, SHIFT) \ + do { \ + p = BUF; \ + memset(BUF, 0xff, sizeof(BUF)); \ + for (i = 0; i < SIZE; i++) { \ + BREV_STORE_##SZ(p, i, 1 << (SHIFT - NBITS)); \ + } \ + for (i = 0; i < SIZE; i++) { \ + check(i, BUF[i], bitreverse(i)); \ + } \ + } while (0) + +/* + * We'll set high_half[i] = i << 16 for use in the .H form of store + * which stores from the high half of the word. + */ +int high_half[SIZE]; + +int main() +{ + void *p; + int i; + + for (i = 0; i < SIZE; i++) { + bbuf[i] = bitreverse(i); + hbuf[i] = bitreverse(i); + wbuf[i] = bitreverse(i); + dbuf[i] = bitreverse(i); + high_half[i] = i << 16; + } + + TEST_BREV_LOAD(b, int, bbuf, 16, sext8(i)); + TEST_BREV_LOAD(ub, int, bbuf, 16, i); + TEST_BREV_LOAD(h, int, hbuf, 15, i); + TEST_BREV_LOAD(uh, int, hbuf, 15, i); + TEST_BREV_LOAD(w, int, wbuf, 14, i); + TEST_BREV_LOAD(d, long long, dbuf, 13, i); + + TEST_BREV_STORE(b, int, bbuf, i, 16); + TEST_BREV_STORE(h, int, hbuf, i, 15); + TEST_BREV_STORE(f, int, hbuf, high_half[i], 15); + TEST_BREV_STORE(w, int, wbuf, i, 14); + TEST_BREV_STORE(d, long long, dbuf, i, 13); + + TEST_BREV_STORE_NEW(bnew, bbuf, 16); + TEST_BREV_STORE_NEW(hnew, hbuf, 15); + TEST_BREV_STORE_NEW(wnew, wbuf, 14); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/circ.c b/tests/tcg/hexagon/circ.c new file mode 100644 index 0000000..67a1aa3 --- /dev/null +++ b/tests/tcg/hexagon/circ.c @@ -0,0 +1,486 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdio.h> + +#define DEBUG 0 +#define DEBUG_PRINTF(...) \ + do { \ + if (DEBUG) { \ + printf(__VA_ARGS__); \ + } \ + } while (0) + + +#define NBYTES (1 << 8) +#define NHALFS (NBYTES / sizeof(short)) +#define NWORDS (NBYTES / sizeof(int)) +#define NDOBLS (NBYTES / sizeof(long long)) + +long long dbuf[NDOBLS] __attribute__((aligned(1 << 12))) = {0}; +int wbuf[NWORDS] __attribute__((aligned(1 << 12))) = {0}; +short hbuf[NHALFS] __attribute__((aligned(1 << 12))) = {0}; +unsigned char bbuf[NBYTES] __attribute__((aligned(1 << 12))) = {0}; + +/* + * We use the C preporcessor to deal with the combinations of types + */ + +#define INIT(BUF, N) \ + void init_##BUF(void) \ + { \ + int i; \ + for (i = 0; i < N; i++) { \ + BUF[i] = i; \ + } \ + } \ + +INIT(bbuf, NBYTES) +INIT(hbuf, NHALFS) +INIT(wbuf, NWORDS) +INIT(dbuf, NDOBLS) + +/* + * Macros for performing circular load + * RES result + * ADDR address + * START start address of buffer + * LEN length of buffer (in bytes) + * INC address increment (in bytes for IMM, elements for REG) + */ +#define CIRC_LOAD_IMM(SIZE, RES, ADDR, START, LEN, INC) \ + __asm__( \ + "r4 = %3\n\t" \ + "m0 = r4\n\t" \ + "cs0 = %2\n\t" \ + "%0 = mem" #SIZE "(%1++#" #INC ":circ(M0))\n\t" \ + : "=r"(RES), "+r"(ADDR) \ + : "r"(START), "r"(LEN) \ + : "r4", "m0", "cs0") +#define CIRC_LOAD_IMM_b(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_IMM(b, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_IMM_ub(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_IMM(ub, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_IMM_h(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_IMM(h, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_IMM_uh(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_IMM(uh, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_IMM_w(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_IMM(w, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_IMM_d(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_IMM(d, RES, ADDR, START, LEN, INC) + +/* + * The mreg has the following pieces + * mreg[31:28] increment[10:7] + * mreg[27:24] K value (used Hexagon v3 and earlier) + * mreg[23:17] increment[6:0] + * mreg[16:0] circular buffer length + */ +static int build_mreg(int inc, int K, int len) +{ + return ((inc & 0x780) << 21) | + ((K & 0xf) << 24) | + ((inc & 0x7f) << 17) | + (len & 0x1ffff); +} + +#define CIRC_LOAD_REG(SIZE, RES, ADDR, START, LEN, INC) \ + __asm__( \ + "r4 = %2\n\t" \ + "m1 = r4\n\t" \ + "cs1 = %3\n\t" \ + "%0 = mem" #SIZE "(%1++I:circ(M1))\n\t" \ + : "=r"(RES), "+r"(ADDR) \ + : "r"(build_mreg((INC), 0, (LEN))), \ + "r"(START) \ + : "r4", "m1", "cs1") +#define CIRC_LOAD_REG_b(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_REG(b, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_REG_ub(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_REG(ub, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_REG_h(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_REG(h, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_REG_uh(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_REG(uh, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_REG_w(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_REG(w, RES, ADDR, START, LEN, INC) +#define CIRC_LOAD_REG_d(RES, ADDR, START, LEN, INC) \ + CIRC_LOAD_REG(d, RES, ADDR, START, LEN, INC) + +/* + * Macros for performing circular store + * VAL value to store + * ADDR address + * START start address of buffer + * LEN length of buffer (in bytes) + * INC address increment (in bytes for IMM, elements for REG) + */ +#define CIRC_STORE_IMM(SIZE, PART, VAL, ADDR, START, LEN, INC) \ + __asm__( \ + "r4 = %3\n\t" \ + "m0 = r4\n\t" \ + "cs0 = %1\n\t" \ + "mem" #SIZE "(%0++#" #INC ":circ(M0)) = %2" PART "\n\t" \ + : "+r"(ADDR) \ + : "r"(START), "r"(VAL), "r"(LEN) \ + : "r4", "m0", "cs0", "memory") +#define CIRC_STORE_IMM_b(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_IMM(b, "", VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_IMM_h(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_IMM(h, "", VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_IMM_f(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_IMM(h, ".H", VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_IMM_w(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_IMM(w, "", VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_IMM_d(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_IMM(d, "", VAL, ADDR, START, LEN, INC) + +#define CIRC_STORE_NEW_IMM(SIZE, VAL, ADDR, START, LEN, INC) \ + __asm__( \ + "r4 = %3\n\t" \ + "m0 = r4\n\t" \ + "cs0 = %1\n\t" \ + "{\n\t" \ + " r5 = %2\n\t" \ + " mem" #SIZE "(%0++#" #INC ":circ(M0)) = r5.new\n\t" \ + "}\n\t" \ + : "+r"(ADDR) \ + : "r"(START), "r"(VAL), "r"(LEN) \ + : "r4", "r5", "m0", "cs0", "memory") +#define CIRC_STORE_IMM_bnew(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_NEW_IMM(b, VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_IMM_hnew(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_NEW_IMM(h, VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_IMM_wnew(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_NEW_IMM(w, VAL, ADDR, START, LEN, INC) + +#define CIRC_STORE_REG(SIZE, PART, VAL, ADDR, START, LEN, INC) \ + __asm__( \ + "r4 = %1\n\t" \ + "m1 = r4\n\t" \ + "cs1 = %2\n\t" \ + "mem" #SIZE "(%0++I:circ(M1)) = %3" PART "\n\t" \ + : "+r"(ADDR) \ + : "r"(build_mreg((INC), 0, (LEN))), \ + "r"(START), \ + "r"(VAL) \ + : "r4", "m1", "cs1", "memory") +#define CIRC_STORE_REG_b(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_REG(b, "", VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_REG_h(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_REG(h, "", VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_REG_f(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_REG(h, ".H", VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_REG_w(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_REG(w, "", VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_REG_d(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_REG(d, "", VAL, ADDR, START, LEN, INC) + +#define CIRC_STORE_NEW_REG(SIZE, VAL, ADDR, START, LEN, INC) \ + __asm__( \ + "r4 = %1\n\t" \ + "m1 = r4\n\t" \ + "cs1 = %2\n\t" \ + "{\n\t" \ + " r5 = %3\n\t" \ + " mem" #SIZE "(%0++I:circ(M1)) = r5.new\n\t" \ + "}\n\t" \ + : "+r"(ADDR) \ + : "r"(build_mreg((INC), 0, (LEN))), \ + "r"(START), \ + "r"(VAL) \ + : "r4", "r5", "m1", "cs1", "memory") +#define CIRC_STORE_REG_bnew(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_NEW_REG(b, VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_REG_hnew(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_NEW_REG(h, VAL, ADDR, START, LEN, INC) +#define CIRC_STORE_REG_wnew(VAL, ADDR, START, LEN, INC) \ + CIRC_STORE_NEW_REG(w, VAL, ADDR, START, LEN, INC) + + +int err; + +/* We'll test increments +1 and -1 */ +void check_load(int i, long long result, int inc, int size) +{ + int expect = (i * inc); + while (expect >= size) { + expect -= size; + } + while (expect < 0) { + expect += size; + } + if (result != expect) { + printf("ERROR(%d): %lld != %d\n", i, result, expect); + err++; + } +} + +#define TEST_LOAD_IMM(SZ, TYPE, BUF, BUFSIZE, INC, FMT) \ +void circ_test_load_imm_##SZ(void) \ +{ \ + TYPE *p = (TYPE *)BUF; \ + int size = 10; \ + int i; \ + for (i = 0; i < BUFSIZE; i++) { \ + TYPE element; \ + CIRC_LOAD_IMM_##SZ(element, p, BUF, size * sizeof(TYPE), (INC)); \ + DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2" #FMT "\n", \ + i, p, element); \ + check_load(i, element, ((INC) / (int)sizeof(TYPE)), size); \ + } \ + p = (TYPE *)BUF; \ + for (i = 0; i < BUFSIZE; i++) { \ + TYPE element; \ + CIRC_LOAD_IMM_##SZ(element, p, BUF, size * sizeof(TYPE), -(INC)); \ + DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2" #FMT "\n", \ + i, p, element); \ + check_load(i, element, (-(INC) / (int)sizeof(TYPE)), size); \ + } \ +} + +TEST_LOAD_IMM(b, char, bbuf, NBYTES, 1, d) +TEST_LOAD_IMM(ub, unsigned char, bbuf, NBYTES, 1, d) +TEST_LOAD_IMM(h, short, hbuf, NHALFS, 2, d) +TEST_LOAD_IMM(uh, unsigned short, hbuf, NHALFS, 2, d) +TEST_LOAD_IMM(w, int, wbuf, NWORDS, 4, d) +TEST_LOAD_IMM(d, long long, dbuf, NDOBLS, 8, lld) + +#define TEST_LOAD_REG(SZ, TYPE, BUF, BUFSIZE, FMT) \ +void circ_test_load_reg_##SZ(void) \ +{ \ + TYPE *p = (TYPE *)BUF; \ + int size = 13; \ + int i; \ + for (i = 0; i < BUFSIZE; i++) { \ + TYPE element; \ + CIRC_LOAD_REG_##SZ(element, p, BUF, size * sizeof(TYPE), 1); \ + DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2" #FMT "\n", \ + i, p, element); \ + check_load(i, element, 1, size); \ + } \ + p = (TYPE *)BUF; \ + for (i = 0; i < BUFSIZE; i++) { \ + TYPE element; \ + CIRC_LOAD_REG_##SZ(element, p, BUF, size * sizeof(TYPE), -1); \ + DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2" #FMT "\n", \ + i, p, element); \ + check_load(i, element, -1, size); \ + } \ +} + +TEST_LOAD_REG(b, char, bbuf, NBYTES, d) +TEST_LOAD_REG(ub, unsigned char, bbuf, NBYTES, d) +TEST_LOAD_REG(h, short, hbuf, NHALFS, d) +TEST_LOAD_REG(uh, unsigned short, hbuf, NHALFS, d) +TEST_LOAD_REG(w, int, wbuf, NWORDS, d) +TEST_LOAD_REG(d, long long, dbuf, NDOBLS, lld) + +/* The circular stores will wrap around somewhere inside the buffer */ +#define CIRC_VAL(SZ, TYPE, BUFSIZE) \ +TYPE circ_val_##SZ(int i, int inc, int size) \ +{ \ + int mod = BUFSIZE % size; \ + int elem = i * inc; \ + if (elem < 0) { \ + if (-elem <= size - mod) { \ + return (elem + BUFSIZE - mod); \ + } else { \ + return (elem + BUFSIZE + size - mod); \ + } \ + } else if (elem < mod) {\ + return (elem + BUFSIZE - mod); \ + } else { \ + return (elem + BUFSIZE - size - mod); \ + } \ +} + +CIRC_VAL(b, unsigned char, NBYTES) +CIRC_VAL(h, short, NHALFS) +CIRC_VAL(w, int, NWORDS) +CIRC_VAL(d, long long, NDOBLS) + +/* + * Circular stores should only write to the first "size" elements of the buffer + * the remainder of the elements should have BUF[i] == i + */ +#define CHECK_STORE(SZ, BUF, BUFSIZE, FMT) \ +void check_store_##SZ(int inc, int size) \ +{ \ + int i; \ + for (i = 0; i < size; i++) { \ + DEBUG_PRINTF(#BUF "[%3d] = 0x%02" #FMT ", guess = 0x%02" #FMT "\n", \ + i, BUF[i], circ_val_##SZ(i, inc, size)); \ + if (BUF[i] != circ_val_##SZ(i, inc, size)) { \ + printf("ERROR(%3d): 0x%02" #FMT " != 0x%02" #FMT "\n", \ + i, BUF[i], circ_val_##SZ(i, inc, size)); \ + err++; \ + } \ + } \ + for (i = size; i < BUFSIZE; i++) { \ + if (BUF[i] != i) { \ + printf("ERROR(%3d): 0x%02" #FMT " != 0x%02x\n", i, BUF[i], i); \ + err++; \ + } \ + } \ +} + +CHECK_STORE(b, bbuf, NBYTES, x) +CHECK_STORE(h, hbuf, NHALFS, x) +CHECK_STORE(w, wbuf, NWORDS, x) +CHECK_STORE(d, dbuf, NDOBLS, llx) + +#define CIRC_TEST_STORE_IMM(SZ, CHK, TYPE, BUF, BUFSIZE, SHIFT, INC) \ +void circ_test_store_imm_##SZ(void) \ +{ \ + unsigned int size = 27; \ + TYPE *p = BUF; \ + TYPE val = 0; \ + int i; \ + init_##BUF(); \ + for (i = 0; i < BUFSIZE; i++) { \ + CIRC_STORE_IMM_##SZ(val << SHIFT, p, BUF, size * sizeof(TYPE), INC); \ + val++; \ + } \ + check_store_##CHK(((INC) / (int)sizeof(TYPE)), size); \ + p = BUF; \ + val = 0; \ + init_##BUF(); \ + for (i = 0; i < BUFSIZE; i++) { \ + CIRC_STORE_IMM_##SZ(val << SHIFT, p, BUF, size * sizeof(TYPE), \ + -(INC)); \ + val++; \ + } \ + check_store_##CHK((-(INC) / (int)sizeof(TYPE)), size); \ +} + +CIRC_TEST_STORE_IMM(b, b, unsigned char, bbuf, NBYTES, 0, 1) +CIRC_TEST_STORE_IMM(h, h, short, hbuf, NHALFS, 0, 2) +CIRC_TEST_STORE_IMM(f, h, short, hbuf, NHALFS, 16, 2) +CIRC_TEST_STORE_IMM(w, w, int, wbuf, NWORDS, 0, 4) +CIRC_TEST_STORE_IMM(d, d, long long, dbuf, NDOBLS, 0, 8) +CIRC_TEST_STORE_IMM(bnew, b, unsigned char, bbuf, NBYTES, 0, 1) +CIRC_TEST_STORE_IMM(hnew, h, short, hbuf, NHALFS, 0, 2) +CIRC_TEST_STORE_IMM(wnew, w, int, wbuf, NWORDS, 0, 4) + +#define CIRC_TEST_STORE_REG(SZ, CHK, TYPE, BUF, BUFSIZE, SHIFT) \ +void circ_test_store_reg_##SZ(void) \ +{ \ + TYPE *p = BUF; \ + unsigned int size = 19; \ + TYPE val = 0; \ + int i; \ + init_##BUF(); \ + for (i = 0; i < BUFSIZE; i++) { \ + CIRC_STORE_REG_##SZ(val << SHIFT, p, BUF, size * sizeof(TYPE), 1); \ + val++; \ + } \ + check_store_##CHK(1, size); \ + p = BUF; \ + val = 0; \ + init_##BUF(); \ + for (i = 0; i < BUFSIZE; i++) { \ + CIRC_STORE_REG_##SZ(val << SHIFT, p, BUF, size * sizeof(TYPE), -1); \ + val++; \ + } \ + check_store_##CHK(-1, size); \ +} + +CIRC_TEST_STORE_REG(b, b, unsigned char, bbuf, NBYTES, 0) +CIRC_TEST_STORE_REG(h, h, short, hbuf, NHALFS, 0) +CIRC_TEST_STORE_REG(f, h, short, hbuf, NHALFS, 16) +CIRC_TEST_STORE_REG(w, w, int, wbuf, NWORDS, 0) +CIRC_TEST_STORE_REG(d, d, long long, dbuf, NDOBLS, 0) +CIRC_TEST_STORE_REG(bnew, b, unsigned char, bbuf, NBYTES, 0) +CIRC_TEST_STORE_REG(hnew, h, short, hbuf, NHALFS, 0) +CIRC_TEST_STORE_REG(wnew, w, int, wbuf, NWORDS, 0) + +/* Test the old scheme used in Hexagon V3 */ +static void circ_test_v3(void) +{ + int *p = wbuf; + int size = 15; + int K = 4; /* 64 bytes */ + int element; + int i; + + init_wbuf(); + + for (i = 0; i < NWORDS; i++) { + __asm__( + "r4 = %2\n\t" + "m1 = r4\n\t" + "%0 = memw(%1++I:circ(M1))\n\t" + : "=r"(element), "+r"(p) + : "r"(build_mreg(1, K, size * sizeof(int))) + : "r4", "m1"); + DEBUG_PRINTF("i = %2d, p = 0x%p, element = %2d\n", i, p, element); + check_load(i, element, 1, size); + } +} + +int main() +{ + init_bbuf(); + init_hbuf(); + init_wbuf(); + init_dbuf(); + + DEBUG_PRINTF("NBYTES = %d\n", NBYTES); + DEBUG_PRINTF("Address of dbuf = 0x%p\n", dbuf); + DEBUG_PRINTF("Address of wbuf = 0x%p\n", wbuf); + DEBUG_PRINTF("Address of hbuf = 0x%p\n", hbuf); + DEBUG_PRINTF("Address of bbuf = 0x%p\n", bbuf); + + circ_test_load_imm_b(); + circ_test_load_imm_ub(); + circ_test_load_imm_h(); + circ_test_load_imm_uh(); + circ_test_load_imm_w(); + circ_test_load_imm_d(); + + circ_test_load_reg_b(); + circ_test_load_reg_ub(); + circ_test_load_reg_h(); + circ_test_load_reg_uh(); + circ_test_load_reg_w(); + circ_test_load_reg_d(); + + circ_test_store_imm_b(); + circ_test_store_imm_h(); + circ_test_store_imm_f(); + circ_test_store_imm_w(); + circ_test_store_imm_d(); + circ_test_store_imm_bnew(); + circ_test_store_imm_hnew(); + circ_test_store_imm_wnew(); + + circ_test_store_reg_b(); + circ_test_store_reg_h(); + circ_test_store_reg_f(); + circ_test_store_reg_w(); + circ_test_store_reg_d(); + circ_test_store_reg_bnew(); + circ_test_store_reg_hnew(); + circ_test_store_reg_wnew(); + + circ_test_v3(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/fpstuff.c b/tests/tcg/hexagon/fpstuff.c index e4f1a0e..0dff429 100644 --- a/tests/tcg/hexagon/fpstuff.c +++ b/tests/tcg/hexagon/fpstuff.c @@ -37,10 +37,12 @@ const int SF_NaN = 0x7fc00000; const int SF_NaN_special = 0x7f800001; const int SF_ANY = 0x3f800000; const int SF_HEX_NAN = 0xffffffff; +const int SF_small_neg = 0xab98fba8; const long long DF_NaN = 0x7ff8000000000000ULL; const long long DF_ANY = 0x3f80000000000000ULL; const long long DF_HEX_NAN = 0xffffffffffffffffULL; +const long long DF_small_neg = 0xbd731f7500000000ULL; int err; @@ -248,6 +250,87 @@ static void check_dfminmax(void) check_fpstatus(usr, FPINVF); } +static void check_recip_exception(void) +{ + int result; + int usr; + + /* + * Check that sfrecipa doesn't set status bits when + * a NaN with bit 22 non-zero is passed + */ + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) : "r"(SF_NaN), "r"(SF_ANY) + : "r2", "p0", "usr"); + check32(result, SF_HEX_NAN); + check_fpstatus(usr, 0); + + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) : "r"(SF_ANY), "r"(SF_NaN) + : "r2", "p0", "usr"); + check32(result, SF_HEX_NAN); + check_fpstatus(usr, 0); + + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %2)\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) : "r"(SF_NaN) + : "r2", "p0", "usr"); + check32(result, SF_HEX_NAN); + check_fpstatus(usr, 0); + + /* + * Check that sfrecipa doesn't set status bits when + * a NaN with bit 22 zero is passed + */ + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) : "r"(SF_NaN_special), "r"(SF_ANY) + : "r2", "p0", "usr"); + check32(result, SF_HEX_NAN); + check_fpstatus(usr, FPINVF); + + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) : "r"(SF_ANY), "r"(SF_NaN_special) + : "r2", "p0", "usr"); + check32(result, SF_HEX_NAN); + check_fpstatus(usr, FPINVF); + + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %2)\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) : "r"(SF_NaN_special) + : "r2", "p0", "usr"); + check32(result, SF_HEX_NAN); + check_fpstatus(usr, FPINVF); + + /* + * Check that sfrecipa properly sets divid-by-zero + */ + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) : "r"(0x885dc960), "r"(0x80000000) + : "r2", "p0", "usr"); + check32(result, 0x3f800000); + check_fpstatus(usr, FPDBZF); + + asm (CLEAR_FPSTATUS + "%0,p0 = sfrecipa(%2, %3)\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) : "r"(0x7f800000), "r"(SF_ZERO) + : "r2", "p0", "usr"); + check32(result, 0x3f800000); + check_fpstatus(usr, 0); +} + static void check_canonical_NaN(void) { int sf_result; @@ -358,12 +441,171 @@ static void check_canonical_NaN(void) check_fpstatus(usr, 0); } +static void check_invsqrta(void) +{ + int result; + int predval; + + asm volatile("%0,p0 = sfinvsqrta(%2)\n\t" + "%1 = p0\n\t" + : "+r"(result), "=r"(predval) + : "r"(0x7f800000) + : "p0"); + check32(result, 0xff800000); + check32(predval, 0x0); +} + +static void check_float2int_convs() +{ + int res32; + long long res64; + int usr; + + /* + * Check that the various forms of float-to-unsigned + * check sign before rounding + */ + asm(CLEAR_FPSTATUS + "%0 = convert_sf2uw(%2)\n\t" + "%1 = usr\n\t" + : "=r"(res32), "=r"(usr) : "r"(SF_small_neg) + : "r2", "usr"); + check32(res32, 0); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_sf2uw(%2):chop\n\t" + "%1 = usr\n\t" + : "=r"(res32), "=r"(usr) : "r"(SF_small_neg) + : "r2", "usr"); + check32(res32, 0); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_sf2ud(%2)\n\t" + "%1 = usr\n\t" + : "=r"(res64), "=r"(usr) : "r"(SF_small_neg) + : "r2", "usr"); + check64(res64, 0); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_sf2ud(%2):chop\n\t" + "%1 = usr\n\t" + : "=r"(res64), "=r"(usr) : "r"(SF_small_neg) + : "r2", "usr"); + check64(res64, 0); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_df2uw(%2)\n\t" + "%1 = usr\n\t" + : "=r"(res32), "=r"(usr) : "r"(DF_small_neg) + : "r2", "usr"); + check32(res32, 0); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_df2uw(%2):chop\n\t" + "%1 = usr\n\t" + : "=r"(res32), "=r"(usr) : "r"(DF_small_neg) + : "r2", "usr"); + check32(res32, 0); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_df2ud(%2)\n\t" + "%1 = usr\n\t" + : "=r"(res64), "=r"(usr) : "r"(DF_small_neg) + : "r2", "usr"); + check64(res64, 0); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_df2ud(%2):chop\n\t" + "%1 = usr\n\t" + : "=r"(res64), "=r"(usr) : "r"(DF_small_neg) + : "r2", "usr"); + check64(res64, 0); + check_fpstatus(usr, FPINVF); + + /* + * Check that the various forms of float-to-signed return -1 for NaN + */ + asm(CLEAR_FPSTATUS + "%0 = convert_sf2w(%2)\n\t" + "%1 = usr\n\t" + : "=r"(res32), "=r"(usr) : "r"(SF_NaN) + : "r2", "usr"); + check32(res32, -1); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_sf2w(%2):chop\n\t" + "%1 = usr\n\t" + : "=r"(res32), "=r"(usr) : "r"(SF_NaN) + : "r2", "usr"); + check32(res32, -1); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_sf2d(%2)\n\t" + "%1 = usr\n\t" + : "=r"(res64), "=r"(usr) : "r"(SF_NaN) + : "r2", "usr"); + check64(res64, -1); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_sf2d(%2):chop\n\t" + "%1 = usr\n\t" + : "=r"(res64), "=r"(usr) : "r"(SF_NaN) + : "r2", "usr"); + check64(res64, -1); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_df2w(%2)\n\t" + "%1 = usr\n\t" + : "=r"(res32), "=r"(usr) : "r"(DF_NaN) + : "r2", "usr"); + check32(res32, -1); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_df2w(%2):chop\n\t" + "%1 = usr\n\t" + : "=r"(res32), "=r"(usr) : "r"(DF_NaN) + : "r2", "usr"); + check32(res32, -1); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_df2d(%2)\n\t" + "%1 = usr\n\t" + : "=r"(res64), "=r"(usr) : "r"(DF_NaN) + : "r2", "usr"); + check64(res64, -1); + check_fpstatus(usr, FPINVF); + + asm(CLEAR_FPSTATUS + "%0 = convert_df2d(%2):chop\n\t" + "%1 = usr\n\t" + : "=r"(res64), "=r"(usr) : "r"(DF_NaN) + : "r2", "usr"); + check64(res64, -1); + check_fpstatus(usr, FPINVF); +} + int main() { check_compare_exception(); check_sfminmax(); check_dfminmax(); + check_recip_exception(); check_canonical_NaN(); + check_invsqrta(); + check_float2int_convs(); puts(err ? "FAIL" : "PASS"); return err ? 1 : 0; diff --git a/tests/tcg/hexagon/load_align.c b/tests/tcg/hexagon/load_align.c new file mode 100644 index 0000000..12fc9cb --- /dev/null +++ b/tests/tcg/hexagon/load_align.c @@ -0,0 +1,415 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Test load align instructions + * + * Example + * r1:0 = memh_fifo(r1+#0) + * loads a half word from memory, shifts the destination register + * right by one half word and inserts the loaded value into the high + * half word of the destination. + * + * There are 8 addressing modes and byte and half word variants, for a + * total of 16 instructions to test + */ + +#include <stdio.h> +#include <string.h> + +int err; + +char buf[16] __attribute__((aligned(1 << 16))); + +void init_buf(void) +{ + int i; + for (i = 0; i < 16; i++) { + buf[i] = i + 1; + } +} + +void __check(int line, long long result, long long expect) +{ + if (result != expect) { + printf("ERROR at line %d: 0x%016llx != 0x%016llx\n", + line, result, expect); + err++; + } +} + +#define check(RES, EXP) __check(__LINE__, RES, EXP) + +void __checkp(int line, void *p, void *expect) +{ + if (p != expect) { + printf("ERROR at line %d: 0x%p != 0x%p\n", line, p, expect); + err++; + } +} + +#define checkp(RES, EXP) __checkp(__LINE__, RES, EXP) + +/* + **************************************************************************** + * _io addressing mode (addr + offset) + */ +#define LOAD_io(SZ, RES, ADDR, OFF) \ + __asm__( \ + "%0 = mem" #SZ "_fifo(%1+#" #OFF ")\n\t" \ + : "+r"(RES) \ + : "r"(ADDR)) +#define LOAD_io_b(RES, ADDR, OFF) \ + LOAD_io(b, RES, ADDR, OFF) +#define LOAD_io_h(RES, ADDR, OFF) \ + LOAD_io(h, RES, ADDR, OFF) + +#define TEST_io(NAME, SZ, SIZE, EXP1, EXP2, EXP3, EXP4) \ +void test_##NAME(void) \ +{ \ + long long result = ~0LL; \ + LOAD_io_##SZ(result, buf, 0 * (SIZE)); \ + check(result, (EXP1)); \ + LOAD_io_##SZ(result, buf, 1 * (SIZE)); \ + check(result, (EXP2)); \ + LOAD_io_##SZ(result, buf, 2 * (SIZE)); \ + check(result, (EXP3)); \ + LOAD_io_##SZ(result, buf, 3 * (SIZE)); \ + check(result, (EXP4)); \ +} + +TEST_io(loadalignb_io, b, 1, + 0x01ffffffffffffffLL, 0x0201ffffffffffffLL, + 0x030201ffffffffffLL, 0x04030201ffffffffLL) +TEST_io(loadalignh_io, h, 2, + 0x0201ffffffffffffLL, 0x04030201ffffffffLL, + 0x060504030201ffffLL, 0x0807060504030201LL) + +/* + **************************************************************************** + * _ur addressing mode (index << offset + base) + */ +#define LOAD_ur(SZ, RES, SHIFT, IDX) \ + __asm__( \ + "%0 = mem" #SZ "_fifo(%1<<#" #SHIFT " + ##buf)\n\t" \ + : "+r"(RES) \ + : "r"(IDX)) +#define LOAD_ur_b(RES, SHIFT, IDX) \ + LOAD_ur(b, RES, SHIFT, IDX) +#define LOAD_ur_h(RES, SHIFT, IDX) \ + LOAD_ur(h, RES, SHIFT, IDX) + +#define TEST_ur(NAME, SZ, SHIFT, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + long long result = ~0LL; \ + LOAD_ur_##SZ(result, (SHIFT), 0); \ + check(result, (RES1)); \ + LOAD_ur_##SZ(result, (SHIFT), 1); \ + check(result, (RES2)); \ + LOAD_ur_##SZ(result, (SHIFT), 2); \ + check(result, (RES3)); \ + LOAD_ur_##SZ(result, (SHIFT), 3); \ + check(result, (RES4)); \ +} + +TEST_ur(loadalignb_ur, b, 1, + 0x01ffffffffffffffLL, 0x0301ffffffffffffLL, + 0x050301ffffffffffLL, 0x07050301ffffffffLL) +TEST_ur(loadalignh_ur, h, 1, + 0x0201ffffffffffffLL, 0x04030201ffffffffLL, + 0x060504030201ffffLL, 0x0807060504030201LL) + +/* + **************************************************************************** + * _ap addressing mode (addr = base) + */ +#define LOAD_ap(SZ, RES, PTR, ADDR) \ + __asm__( \ + "%0 = mem" #SZ "_fifo(%1 = ##" #ADDR ")\n\t" \ + : "+r"(RES), "=r"(PTR)) +#define LOAD_ap_b(RES, PTR, ADDR) \ + LOAD_ap(b, RES, PTR, ADDR) +#define LOAD_ap_h(RES, PTR, ADDR) \ + LOAD_ap(h, RES, PTR, ADDR) + +#define TEST_ap(NAME, SZ, SIZE, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + long long result = ~0LL; \ + void *ptr; \ + LOAD_ap_##SZ(result, ptr, (buf + 0 * (SIZE))); \ + check(result, (RES1)); \ + checkp(ptr, &buf[0 * (SIZE)]); \ + LOAD_ap_##SZ(result, ptr, (buf + 1 * (SIZE))); \ + check(result, (RES2)); \ + checkp(ptr, &buf[1 * (SIZE)]); \ + LOAD_ap_##SZ(result, ptr, (buf + 2 * (SIZE))); \ + check(result, (RES3)); \ + checkp(ptr, &buf[2 * (SIZE)]); \ + LOAD_ap_##SZ(result, ptr, (buf + 3 * (SIZE))); \ + check(result, (RES4)); \ + checkp(ptr, &buf[3 * (SIZE)]); \ +} + +TEST_ap(loadalignb_ap, b, 1, + 0x01ffffffffffffffLL, 0x0201ffffffffffffLL, + 0x030201ffffffffffLL, 0x04030201ffffffffLL) +TEST_ap(loadalignh_ap, h, 2, + 0x0201ffffffffffffLL, 0x04030201ffffffffLL, + 0x060504030201ffffLL, 0x0807060504030201LL) + +/* + **************************************************************************** + * _rp addressing mode (addr ++ modifer-reg) + */ +#define LOAD_pr(SZ, RES, PTR, INC) \ + __asm__( \ + "m0 = %2\n\t" \ + "%0 = mem" #SZ "_fifo(%1++m0)\n\t" \ + : "+r"(RES), "+r"(PTR) \ + : "r"(INC) \ + : "m0") +#define LOAD_pr_b(RES, PTR, INC) \ + LOAD_pr(b, RES, PTR, INC) +#define LOAD_pr_h(RES, PTR, INC) \ + LOAD_pr(h, RES, PTR, INC) + +#define TEST_pr(NAME, SZ, SIZE, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + long long result = ~0LL; \ + void *ptr = buf; \ + LOAD_pr_##SZ(result, ptr, (SIZE)); \ + check(result, (RES1)); \ + checkp(ptr, &buf[1 * (SIZE)]); \ + LOAD_pr_##SZ(result, ptr, (SIZE)); \ + check(result, (RES2)); \ + checkp(ptr, &buf[2 * (SIZE)]); \ + LOAD_pr_##SZ(result, ptr, (SIZE)); \ + check(result, (RES3)); \ + checkp(ptr, &buf[3 * (SIZE)]); \ + LOAD_pr_##SZ(result, ptr, (SIZE)); \ + check(result, (RES4)); \ + checkp(ptr, &buf[4 * (SIZE)]); \ +} + +TEST_pr(loadalignb_pr, b, 1, + 0x01ffffffffffffffLL, 0x0201ffffffffffffLL, + 0x030201ffffffffffLL, 0x04030201ffffffffLL) +TEST_pr(loadalignh_pr, h, 2, + 0x0201ffffffffffffLL, 0x04030201ffffffffLL, + 0x060504030201ffffLL, 0x0807060504030201LL) + +/* + **************************************************************************** + * _pbr addressing mode (addr ++ modifer-reg:brev) + */ +#define LOAD_pbr(SZ, RES, PTR) \ + __asm__( \ + "r4 = #(1 << (16 - 3))\n\t" \ + "m0 = r4\n\t" \ + "%0 = mem" #SZ "_fifo(%1++m0:brev)\n\t" \ + : "+r"(RES), "+r"(PTR) \ + : \ + : "r4", "m0") +#define LOAD_pbr_b(RES, PTR) \ + LOAD_pbr(b, RES, PTR) +#define LOAD_pbr_h(RES, PTR) \ + LOAD_pbr(h, RES, PTR) + +#define TEST_pbr(NAME, SZ, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + long long result = ~0LL; \ + void *ptr = buf; \ + LOAD_pbr_##SZ(result, ptr); \ + check(result, (RES1)); \ + LOAD_pbr_##SZ(result, ptr); \ + check(result, (RES2)); \ + LOAD_pbr_##SZ(result, ptr); \ + check(result, (RES3)); \ + LOAD_pbr_##SZ(result, ptr); \ + check(result, (RES4)); \ +} + +TEST_pbr(loadalignb_pbr, b, + 0x01ffffffffffffffLL, 0x0501ffffffffffffLL, + 0x030501ffffffffffLL, 0x07030501ffffffffLL) +TEST_pbr(loadalignh_pbr, h, + 0x0201ffffffffffffLL, 0x06050201ffffffffLL, + 0x040306050201ffffLL, 0x0807040306050201LL) + +/* + **************************************************************************** + * _pi addressing mode (addr ++ inc) + */ +#define LOAD_pi(SZ, RES, PTR, INC) \ + __asm__( \ + "%0 = mem" #SZ "_fifo(%1++#" #INC ")\n\t" \ + : "+r"(RES), "+r"(PTR)) +#define LOAD_pi_b(RES, PTR, INC) \ + LOAD_pi(b, RES, PTR, INC) +#define LOAD_pi_h(RES, PTR, INC) \ + LOAD_pi(h, RES, PTR, INC) + +#define TEST_pi(NAME, SZ, INC, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + long long result = ~0LL; \ + void *ptr = buf; \ + LOAD_pi_##SZ(result, ptr, (INC)); \ + check(result, (RES1)); \ + checkp(ptr, &buf[1 * (INC)]); \ + LOAD_pi_##SZ(result, ptr, (INC)); \ + check(result, (RES2)); \ + checkp(ptr, &buf[2 * (INC)]); \ + LOAD_pi_##SZ(result, ptr, (INC)); \ + check(result, (RES3)); \ + checkp(ptr, &buf[3 * (INC)]); \ + LOAD_pi_##SZ(result, ptr, (INC)); \ + check(result, (RES4)); \ + checkp(ptr, &buf[4 * (INC)]); \ +} + +TEST_pi(loadalignb_pi, b, 1, + 0x01ffffffffffffffLL, 0x0201ffffffffffffLL, + 0x030201ffffffffffLL, 0x04030201ffffffffLL) +TEST_pi(loadalignh_pi, h, 2, + 0x0201ffffffffffffLL, 0x04030201ffffffffLL, + 0x060504030201ffffLL, 0x0807060504030201LL) + +/* + **************************************************************************** + * _pci addressing mode (addr ++ inc:circ) + */ +#define LOAD_pci(SZ, RES, PTR, START, LEN, INC) \ + __asm__( \ + "r4 = %3\n\t" \ + "m0 = r4\n\t" \ + "cs0 = %2\n\t" \ + "%0 = mem" #SZ "_fifo(%1++#" #INC ":circ(m0))\n\t" \ + : "+r"(RES), "+r"(PTR) \ + : "r"(START), "r"(LEN) \ + : "r4", "m0", "cs0") +#define LOAD_pci_b(RES, PTR, START, LEN, INC) \ + LOAD_pci(b, RES, PTR, START, LEN, INC) +#define LOAD_pci_h(RES, PTR, START, LEN, INC) \ + LOAD_pci(h, RES, PTR, START, LEN, INC) + +#define TEST_pci(NAME, SZ, LEN, INC, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + long long result = ~0LL; \ + void *ptr = buf; \ + LOAD_pci_##SZ(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES1)); \ + checkp(ptr, &buf[(1 * (INC)) % (LEN)]); \ + LOAD_pci_##SZ(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES2)); \ + checkp(ptr, &buf[(2 * (INC)) % (LEN)]); \ + LOAD_pci_##SZ(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES3)); \ + checkp(ptr, &buf[(3 * (INC)) % (LEN)]); \ + LOAD_pci_##SZ(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES4)); \ + checkp(ptr, &buf[(4 * (INC)) % (LEN)]); \ +} + +TEST_pci(loadalignb_pci, b, 2, 1, + 0x01ffffffffffffffLL, 0x0201ffffffffffffLL, + 0x010201ffffffffffLL, 0x02010201ffffffffLL) +TEST_pci(loadalignh_pci, h, 4, 2, + 0x0201ffffffffffffLL, 0x04030201ffffffffLL, + 0x020104030201ffffLL, 0x0403020104030201LL) + +/* + **************************************************************************** + * _pcr addressing mode (addr ++ I:circ(modifier-reg)) + */ +#define LOAD_pcr(SZ, RES, PTR, START, LEN, INC) \ + __asm__( \ + "r4 = %2\n\t" \ + "m1 = r4\n\t" \ + "cs1 = %3\n\t" \ + "%0 = mem" #SZ "_fifo(%1++I:circ(m1))\n\t" \ + : "+r"(RES), "+r"(PTR) \ + : "r"((((INC) & 0x7f) << 17) | ((LEN) & 0x1ffff)), \ + "r"(START) \ + : "r4", "m1", "cs1") +#define LOAD_pcr_b(RES, PTR, START, LEN, INC) \ + LOAD_pcr(b, RES, PTR, START, LEN, INC) +#define LOAD_pcr_h(RES, PTR, START, LEN, INC) \ + LOAD_pcr(h, RES, PTR, START, LEN, INC) + +#define TEST_pcr(NAME, SZ, SIZE, LEN, INC, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + long long result = ~0LL; \ + void *ptr = buf; \ + LOAD_pcr_##SZ(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES1)); \ + checkp(ptr, &buf[(1 * (INC) * (SIZE)) % (LEN)]); \ + LOAD_pcr_##SZ(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES2)); \ + checkp(ptr, &buf[(2 * (INC) * (SIZE)) % (LEN)]); \ + LOAD_pcr_##SZ(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES3)); \ + checkp(ptr, &buf[(3 * (INC) * (SIZE)) % (LEN)]); \ + LOAD_pcr_##SZ(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES4)); \ + checkp(ptr, &buf[(4 * (INC) * (SIZE)) % (LEN)]); \ +} + +TEST_pcr(loadalignb_pcr, b, 1, 2, 1, + 0x01ffffffffffffffLL, 0x0201ffffffffffffLL, + 0x010201ffffffffffLL, 0x02010201ffffffffLL) +TEST_pcr(loadalignh_pcr, h, 2, 4, 1, + 0x0201ffffffffffffLL, 0x04030201ffffffffLL, + 0x020104030201ffffLL, 0x0403020104030201LL) + +int main() +{ + init_buf(); + + test_loadalignb_io(); + test_loadalignh_io(); + + test_loadalignb_ur(); + test_loadalignh_ur(); + + test_loadalignb_ap(); + test_loadalignh_ap(); + + test_loadalignb_pr(); + test_loadalignh_pr(); + + test_loadalignb_pbr(); + test_loadalignh_pbr(); + + test_loadalignb_pi(); + test_loadalignh_pi(); + + test_loadalignb_pci(); + test_loadalignh_pci(); + + test_loadalignb_pcr(); + test_loadalignh_pcr(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/load_unpack.c b/tests/tcg/hexagon/load_unpack.c new file mode 100644 index 0000000..3575a37 --- /dev/null +++ b/tests/tcg/hexagon/load_unpack.c @@ -0,0 +1,474 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Test load unpack instructions + * + * Example + * r0 = memubh(r1+#0) + * loads a half word from memory and zero-extends the 2 bytes to form a word + * + * For each addressing mode, there are 4 tests + * bzw2 unsigned 2 elements + * bsw2 signed 2 elements + * bzw4 unsigned 4 elements + * bsw4 signed 4 elements + * There are 8 addressing modes, for a total of 32 instructions to test + */ + +#include <stdio.h> +#include <string.h> + +int err; + +char buf[16] __attribute__((aligned(1 << 16))); + +void init_buf(void) +{ + int i; + for (i = 0; i < 16; i++) { + int sign = i % 2 == 0 ? 0x80 : 0; + buf[i] = sign | (i + 1); + } +} + +void __check(int line, long long result, long long expect) +{ + if (result != expect) { + printf("ERROR at line %d: 0x%08llx != 0x%08llx\n", + line, result, expect); + err++; + } +} + +#define check(RES, EXP) __check(__LINE__, RES, EXP) + +void __checkp(int line, void *p, void *expect) +{ + if (p != expect) { + printf("ERROR at line %d: 0x%p != 0x%p\n", line, p, expect); + err++; + } +} + +#define checkp(RES, EXP) __checkp(__LINE__, RES, EXP) + +/* + **************************************************************************** + * _io addressing mode (addr + offset) + */ +#define BxW_LOAD_io(SZ, RES, ADDR, OFF) \ + __asm__( \ + "%0 = mem" #SZ "(%1+#" #OFF ")\n\t" \ + : "=r"(RES) \ + : "r"(ADDR)) +#define BxW_LOAD_io_Z(RES, ADDR, OFF) \ + BxW_LOAD_io(ubh, RES, ADDR, OFF) +#define BxW_LOAD_io_S(RES, ADDR, OFF) \ + BxW_LOAD_io(bh, RES, ADDR, OFF) + +#define TEST_io(NAME, TYPE, SIGN, SIZE, EXT, EXP1, EXP2, EXP3, EXP4) \ +void test_##NAME(void) \ +{ \ + TYPE result; \ + init_buf(); \ + BxW_LOAD_io_##SIGN(result, buf, 0 * (SIZE)); \ + check(result, (EXP1) | (EXT)); \ + BxW_LOAD_io_##SIGN(result, buf, 1 * (SIZE)); \ + check(result, (EXP2) | (EXT)); \ + BxW_LOAD_io_##SIGN(result, buf, 2 * (SIZE)); \ + check(result, (EXP3) | (EXT)); \ + BxW_LOAD_io_##SIGN(result, buf, 3 * (SIZE)); \ + check(result, (EXP4) | (EXT)); \ +} + + +TEST_io(loadbzw2_io, int, Z, 2, 0x00000000, + 0x00020081, 0x00040083, 0x00060085, 0x00080087) +TEST_io(loadbsw2_io, int, S, 2, 0x0000ff00, + 0x00020081, 0x00040083, 0x00060085, 0x00080087) +TEST_io(loadbzw4_io, long long, Z, 4, 0x0000000000000000LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) +TEST_io(loadbsw4_io, long long, S, 4, 0x0000ff000000ff00LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) + +/* + **************************************************************************** + * _ur addressing mode (index << offset + base) + */ +#define BxW_LOAD_ur(SZ, RES, SHIFT, IDX) \ + __asm__( \ + "%0 = mem" #SZ "(%1<<#" #SHIFT " + ##buf)\n\t" \ + : "=r"(RES) \ + : "r"(IDX)) +#define BxW_LOAD_ur_Z(RES, SHIFT, IDX) \ + BxW_LOAD_ur(ubh, RES, SHIFT, IDX) +#define BxW_LOAD_ur_S(RES, SHIFT, IDX) \ + BxW_LOAD_ur(bh, RES, SHIFT, IDX) + +#define TEST_ur(NAME, TYPE, SIGN, SHIFT, EXT, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + TYPE result; \ + init_buf(); \ + BxW_LOAD_ur_##SIGN(result, (SHIFT), 0); \ + check(result, (RES1) | (EXT)); \ + BxW_LOAD_ur_##SIGN(result, (SHIFT), 1); \ + check(result, (RES2) | (EXT)); \ + BxW_LOAD_ur_##SIGN(result, (SHIFT), 2); \ + check(result, (RES3) | (EXT)); \ + BxW_LOAD_ur_##SIGN(result, (SHIFT), 3); \ + check(result, (RES4) | (EXT)); \ +} \ + +TEST_ur(loadbzw2_ur, int, Z, 1, 0x00000000, + 0x00020081, 0x00040083, 0x00060085, 0x00080087) +TEST_ur(loadbsw2_ur, int, S, 1, 0x0000ff00, + 0x00020081, 0x00040083, 0x00060085, 0x00080087) +TEST_ur(loadbzw4_ur, long long, Z, 2, 0x0000000000000000LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) +TEST_ur(loadbsw4_ur, long long, S, 2, 0x0000ff000000ff00LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) + +/* + **************************************************************************** + * _ap addressing mode (addr = base) + */ +#define BxW_LOAD_ap(SZ, RES, PTR, ADDR) \ + __asm__( \ + "%0 = mem" #SZ "(%1 = ##" #ADDR ")\n\t" \ + : "=r"(RES), "=r"(PTR)) +#define BxW_LOAD_ap_Z(RES, PTR, ADDR) \ + BxW_LOAD_ap(ubh, RES, PTR, ADDR) +#define BxW_LOAD_ap_S(RES, PTR, ADDR) \ + BxW_LOAD_ap(bh, RES, PTR, ADDR) + +#define TEST_ap(NAME, TYPE, SIGN, SIZE, EXT, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + TYPE result; \ + void *ptr; \ + init_buf(); \ + BxW_LOAD_ap_##SIGN(result, ptr, (buf + 0 * (SIZE))); \ + check(result, (RES1) | (EXT)); \ + checkp(ptr, &buf[0 * (SIZE)]); \ + BxW_LOAD_ap_##SIGN(result, ptr, (buf + 1 * (SIZE))); \ + check(result, (RES2) | (EXT)); \ + checkp(ptr, &buf[1 * (SIZE)]); \ + BxW_LOAD_ap_##SIGN(result, ptr, (buf + 2 * (SIZE))); \ + check(result, (RES3) | (EXT)); \ + checkp(ptr, &buf[2 * (SIZE)]); \ + BxW_LOAD_ap_##SIGN(result, ptr, (buf + 3 * (SIZE))); \ + check(result, (RES4) | (EXT)); \ + checkp(ptr, &buf[3 * (SIZE)]); \ +} + +TEST_ap(loadbzw2_ap, int, Z, 2, 0x00000000, + 0x00020081, 0x00040083, 0x00060085, 0x00080087) +TEST_ap(loadbsw2_ap, int, S, 2, 0x0000ff00, + 0x00020081, 0x00040083, 0x00060085, 0x00080087) +TEST_ap(loadbzw4_ap, long long, Z, 4, 0x0000000000000000LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) +TEST_ap(loadbsw4_ap, long long, S, 4, 0x0000ff000000ff00LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) + +/* + **************************************************************************** + * _rp addressing mode (addr ++ modifer-reg) + */ +#define BxW_LOAD_pr(SZ, RES, PTR, INC) \ + __asm__( \ + "m0 = %2\n\t" \ + "%0 = mem" #SZ "(%1++m0)\n\t" \ + : "=r"(RES), "+r"(PTR) \ + : "r"(INC) \ + : "m0") +#define BxW_LOAD_pr_Z(RES, PTR, INC) \ + BxW_LOAD_pr(ubh, RES, PTR, INC) +#define BxW_LOAD_pr_S(RES, PTR, INC) \ + BxW_LOAD_pr(bh, RES, PTR, INC) + +#define TEST_pr(NAME, TYPE, SIGN, SIZE, EXT, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + TYPE result; \ + void *ptr = buf; \ + init_buf(); \ + BxW_LOAD_pr_##SIGN(result, ptr, (SIZE)); \ + check(result, (RES1) | (EXT)); \ + checkp(ptr, &buf[1 * (SIZE)]); \ + BxW_LOAD_pr_##SIGN(result, ptr, (SIZE)); \ + check(result, (RES2) | (EXT)); \ + checkp(ptr, &buf[2 * (SIZE)]); \ + BxW_LOAD_pr_##SIGN(result, ptr, (SIZE)); \ + check(result, (RES3) | (EXT)); \ + checkp(ptr, &buf[3 * (SIZE)]); \ + BxW_LOAD_pr_##SIGN(result, ptr, (SIZE)); \ + check(result, (RES4) | (EXT)); \ + checkp(ptr, &buf[4 * (SIZE)]); \ +} + +TEST_pr(loadbzw2_pr, int, Z, 2, 0x00000000, + 0x00020081, 0x0040083, 0x00060085, 0x00080087) +TEST_pr(loadbsw2_pr, int, S, 2, 0x0000ff00, + 0x00020081, 0x0040083, 0x00060085, 0x00080087) +TEST_pr(loadbzw4_pr, long long, Z, 4, 0x0000000000000000LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) +TEST_pr(loadbsw4_pr, long long, S, 4, 0x0000ff000000ff00LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) + +/* + **************************************************************************** + * _pbr addressing mode (addr ++ modifer-reg:brev) + */ +#define BxW_LOAD_pbr(SZ, RES, PTR) \ + __asm__( \ + "r4 = #(1 << (16 - 3))\n\t" \ + "m0 = r4\n\t" \ + "%0 = mem" #SZ "(%1++m0:brev)\n\t" \ + : "=r"(RES), "+r"(PTR) \ + : \ + : "r4", "m0") +#define BxW_LOAD_pbr_Z(RES, PTR) \ + BxW_LOAD_pbr(ubh, RES, PTR) +#define BxW_LOAD_pbr_S(RES, PTR) \ + BxW_LOAD_pbr(bh, RES, PTR) + +#define TEST_pbr(NAME, TYPE, SIGN, EXT, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + TYPE result; \ + void *ptr = buf; \ + init_buf(); \ + BxW_LOAD_pbr_##SIGN(result, ptr); \ + check(result, (RES1) | (EXT)); \ + BxW_LOAD_pbr_##SIGN(result, ptr); \ + check(result, (RES2) | (EXT)); \ + BxW_LOAD_pbr_##SIGN(result, ptr); \ + check(result, (RES3) | (EXT)); \ + BxW_LOAD_pbr_##SIGN(result, ptr); \ + check(result, (RES4) | (EXT)); \ +} + +TEST_pbr(loadbzw2_pbr, int, Z, 0x00000000, + 0x00020081, 0x00060085, 0x00040083, 0x00080087) +TEST_pbr(loadbsw2_pbr, int, S, 0x0000ff00, + 0x00020081, 0x00060085, 0x00040083, 0x00080087) +TEST_pbr(loadbzw4_pbr, long long, Z, 0x0000000000000000LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x0006008500040083LL, 0x000a008900080087LL) +TEST_pbr(loadbsw4_pbr, long long, S, 0x0000ff000000ff00LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x0006008500040083LL, 0x000a008900080087LL) + +/* + **************************************************************************** + * _pi addressing mode (addr ++ inc) + */ +#define BxW_LOAD_pi(SZ, RES, PTR, INC) \ + __asm__( \ + "%0 = mem" #SZ "(%1++#" #INC ")\n\t" \ + : "=r"(RES), "+r"(PTR)) +#define BxW_LOAD_pi_Z(RES, PTR, INC) \ + BxW_LOAD_pi(ubh, RES, PTR, INC) +#define BxW_LOAD_pi_S(RES, PTR, INC) \ + BxW_LOAD_pi(bh, RES, PTR, INC) + +#define TEST_pi(NAME, TYPE, SIGN, INC, EXT, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + TYPE result; \ + void *ptr = buf; \ + init_buf(); \ + BxW_LOAD_pi_##SIGN(result, ptr, (INC)); \ + check(result, (RES1) | (EXT)); \ + checkp(ptr, &buf[1 * (INC)]); \ + BxW_LOAD_pi_##SIGN(result, ptr, (INC)); \ + check(result, (RES2) | (EXT)); \ + checkp(ptr, &buf[2 * (INC)]); \ + BxW_LOAD_pi_##SIGN(result, ptr, (INC)); \ + check(result, (RES3) | (EXT)); \ + checkp(ptr, &buf[3 * (INC)]); \ + BxW_LOAD_pi_##SIGN(result, ptr, (INC)); \ + check(result, (RES4) | (EXT)); \ + checkp(ptr, &buf[4 * (INC)]); \ +} + +TEST_pi(loadbzw2_pi, int, Z, 2, 0x00000000, + 0x00020081, 0x00040083, 0x00060085, 0x00080087) +TEST_pi(loadbsw2_pi, int, S, 2, 0x0000ff00, + 0x00020081, 0x00040083, 0x00060085, 0x00080087) +TEST_pi(loadbzw4_pi, long long, Z, 4, 0x0000000000000000LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) +TEST_pi(loadbsw4_pi, long long, S, 4, 0x0000ff000000ff00LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x000c008b000a0089LL, 0x0010008f000e008dLL) + +/* + **************************************************************************** + * _pci addressing mode (addr ++ inc:circ) + */ +#define BxW_LOAD_pci(SZ, RES, PTR, START, LEN, INC) \ + __asm__( \ + "r4 = %3\n\t" \ + "m0 = r4\n\t" \ + "cs0 = %2\n\t" \ + "%0 = mem" #SZ "(%1++#" #INC ":circ(m0))\n\t" \ + : "=r"(RES), "+r"(PTR) \ + : "r"(START), "r"(LEN) \ + : "r4", "m0", "cs0") +#define BxW_LOAD_pci_Z(RES, PTR, START, LEN, INC) \ + BxW_LOAD_pci(ubh, RES, PTR, START, LEN, INC) +#define BxW_LOAD_pci_S(RES, PTR, START, LEN, INC) \ + BxW_LOAD_pci(bh, RES, PTR, START, LEN, INC) + +#define TEST_pci(NAME, TYPE, SIGN, LEN, INC, EXT, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + TYPE result; \ + void *ptr = buf; \ + init_buf(); \ + BxW_LOAD_pci_##SIGN(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES1) | (EXT)); \ + checkp(ptr, &buf[(1 * (INC)) % (LEN)]); \ + BxW_LOAD_pci_##SIGN(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES2) | (EXT)); \ + checkp(ptr, &buf[(2 * (INC)) % (LEN)]); \ + BxW_LOAD_pci_##SIGN(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES3) | (EXT)); \ + checkp(ptr, &buf[(3 * (INC)) % (LEN)]); \ + BxW_LOAD_pci_##SIGN(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES4) | (EXT)); \ + checkp(ptr, &buf[(4 * (INC)) % (LEN)]); \ +} + +TEST_pci(loadbzw2_pci, int, Z, 6, 2, 0x00000000, + 0x00020081, 0x00040083, 0x00060085, 0x00020081) +TEST_pci(loadbsw2_pci, int, S, 6, 2, 0x0000ff00, + 0x00020081, 0x00040083, 0x00060085, 0x00020081) +TEST_pci(loadbzw4_pci, long long, Z, 8, 4, 0x0000000000000000LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x0004008300020081LL, 0x0008008700060085LL) +TEST_pci(loadbsw4_pci, long long, S, 8, 4, 0x0000ff000000ff00LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x0004008300020081LL, 0x0008008700060085LL) + +/* + **************************************************************************** + * _pcr addressing mode (addr ++ I:circ(modifier-reg)) + */ +#define BxW_LOAD_pcr(SZ, RES, PTR, START, LEN, INC) \ + __asm__( \ + "r4 = %2\n\t" \ + "m1 = r4\n\t" \ + "cs1 = %3\n\t" \ + "%0 = mem" #SZ "(%1++I:circ(m1))\n\t" \ + : "=r"(RES), "+r"(PTR) \ + : "r"((((INC) & 0x7f) << 17) | ((LEN) & 0x1ffff)), \ + "r"(START) \ + : "r4", "m1", "cs1") +#define BxW_LOAD_pcr_Z(RES, PTR, START, LEN, INC) \ + BxW_LOAD_pcr(ubh, RES, PTR, START, LEN, INC) +#define BxW_LOAD_pcr_S(RES, PTR, START, LEN, INC) \ + BxW_LOAD_pcr(bh, RES, PTR, START, LEN, INC) + +#define TEST_pcr(NAME, TYPE, SIGN, SIZE, LEN, INC, \ + EXT, RES1, RES2, RES3, RES4) \ +void test_##NAME(void) \ +{ \ + TYPE result; \ + void *ptr = buf; \ + init_buf(); \ + BxW_LOAD_pcr_##SIGN(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES1) | (EXT)); \ + checkp(ptr, &buf[(1 * (INC) * (SIZE)) % (LEN)]); \ + BxW_LOAD_pcr_##SIGN(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES2) | (EXT)); \ + checkp(ptr, &buf[(2 * (INC) * (SIZE)) % (LEN)]); \ + BxW_LOAD_pcr_##SIGN(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES3) | (EXT)); \ + checkp(ptr, &buf[(3 * (INC) * (SIZE)) % (LEN)]); \ + BxW_LOAD_pcr_##SIGN(result, ptr, buf, (LEN), (INC)); \ + check(result, (RES4) | (EXT)); \ + checkp(ptr, &buf[(4 * (INC) * (SIZE)) % (LEN)]); \ +} + +TEST_pcr(loadbzw2_pcr, int, Z, 2, 8, 2, 0x00000000, + 0x00020081, 0x00060085, 0x00020081, 0x00060085) +TEST_pcr(loadbsw2_pcr, int, S, 2, 8, 2, 0x0000ff00, + 0x00020081, 0x00060085, 0x00020081, 0x00060085) +TEST_pcr(loadbzw4_pcr, long long, Z, 4, 8, 1, 0x0000000000000000LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x0004008300020081LL, 0x0008008700060085LL) +TEST_pcr(loadbsw4_pcr, long long, S, 4, 8, 1, 0x0000ff000000ff00LL, + 0x0004008300020081LL, 0x0008008700060085LL, + 0x0004008300020081LL, 0x0008008700060085LL) + +int main() +{ + test_loadbzw2_io(); + test_loadbsw2_io(); + test_loadbzw4_io(); + test_loadbsw4_io(); + + test_loadbzw2_ur(); + test_loadbsw2_ur(); + test_loadbzw4_ur(); + test_loadbsw4_ur(); + + test_loadbzw2_ap(); + test_loadbsw2_ap(); + test_loadbzw4_ap(); + test_loadbsw4_ap(); + + test_loadbzw2_pr(); + test_loadbsw2_pr(); + test_loadbzw4_pr(); + test_loadbsw4_pr(); + + test_loadbzw2_pbr(); + test_loadbsw2_pbr(); + test_loadbzw4_pbr(); + test_loadbsw4_pbr(); + + test_loadbzw2_pi(); + test_loadbsw2_pi(); + test_loadbzw4_pi(); + test_loadbsw4_pi(); + + test_loadbzw2_pci(); + test_loadbsw2_pci(); + test_loadbzw4_pci(); + test_loadbsw4_pci(); + + test_loadbzw2_pcr(); + test_loadbsw2_pcr(); + test_loadbzw4_pcr(); + test_loadbsw4_pcr(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/misc.c b/tests/tcg/hexagon/misc.c index 458759f..17c3919 100644 --- a/tests/tcg/hexagon/misc.c +++ b/tests/tcg/hexagon/misc.c @@ -231,6 +231,14 @@ static void check(int val, int expect) } } +static void check64(long long val, long long expect) +{ + if (val != expect) { + printf("ERROR: 0x%016llx != 0x%016llx\n", val, expect); + err++; + } +} + uint32_t init[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; uint32_t array[10]; @@ -264,8 +272,36 @@ static long long creg_pair(int x, int y) return retval; } +static long long decbin(long long x, long long y, int *pred) +{ + long long retval; + asm ("%0 = decbin(%2, %3)\n\t" + "%1 = p0\n\t" + : "=r"(retval), "=r"(*pred) + : "r"(x), "r"(y)); + return retval; +} + +/* Check that predicates are auto-and'ed in a packet */ +static int auto_and(void) +{ + int retval; + asm ("r5 = #1\n\t" + "{\n\t" + " p0 = cmp.eq(r1, #1)\n\t" + " p0 = cmp.eq(r1, #2)\n\t" + "}\n\t" + "%0 = p0\n\t" + : "=r"(retval) + : + : "r5", "p0"); + return retval; +} + int main() { + long long res64; + int pred; memcpy(array, init, sizeof(array)); S4_storerhnew_rr(array, 4, 0xffff); @@ -375,6 +411,17 @@ int main() res = test_clrtnew(2, 7); check(res, 7); + res64 = decbin(0xf0f1f2f3f4f5f6f7LL, 0x7f6f5f4f3f2f1f0fLL, &pred); + check64(res64, 0x357980003700010cLL); + check(pred, 0); + + res64 = decbin(0xfLL, 0x1bLL, &pred); + check64(res64, 0x78000100LL); + check(pred, 1); + + res = auto_and(); + check(res, 0); + puts(err ? "FAIL" : "PASS"); return err; } diff --git a/tests/tcg/hexagon/multi_result.c b/tests/tcg/hexagon/multi_result.c new file mode 100644 index 0000000..52997b3 --- /dev/null +++ b/tests/tcg/hexagon/multi_result.c @@ -0,0 +1,282 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdio.h> + +static int sfrecipa(int Rs, int Rt, int *pred_result) +{ + int result; + int predval; + + asm volatile("%0,p0 = sfrecipa(%2, %3)\n\t" + "%1 = p0\n\t" + : "+r"(result), "=r"(predval) + : "r"(Rs), "r"(Rt) + : "p0"); + *pred_result = predval; + return result; +} + +static int sfinvsqrta(int Rs, int *pred_result) +{ + int result; + int predval; + + asm volatile("%0,p0 = sfinvsqrta(%2)\n\t" + "%1 = p0\n\t" + : "+r"(result), "=r"(predval) + : "r"(Rs) + : "p0"); + *pred_result = predval; + return result; +} + +static long long vacsh(long long Rxx, long long Rss, long long Rtt, + int *pred_result, int *ovf_result) +{ + long long result = Rxx; + int predval; + int usr; + + /* + * This instruction can set bit 0 (OVF/overflow) in usr + * Clear the bit first, then return that bit to the caller + */ + asm volatile("r2 = usr\n\t" + "r2 = clrbit(r2, #0)\n\t" /* clear overflow bit */ + "usr = r2\n\t" + "%0,p0 = vacsh(%3, %4)\n\t" + "%1 = p0\n\t" + "%2 = usr\n\t" + : "+r"(result), "=r"(predval), "=r"(usr) + : "r"(Rss), "r"(Rtt) + : "r2", "p0", "usr"); + *pred_result = predval; + *ovf_result = (usr & 1); + return result; +} + +static long long vminub(long long Rtt, long long Rss, + int *pred_result) +{ + long long result; + int predval; + + asm volatile("%0,p0 = vminub(%2, %3)\n\t" + "%1 = p0\n\t" + : "=r"(result), "=r"(predval) + : "r"(Rtt), "r"(Rss) + : "p0"); + *pred_result = predval; + return result; +} + +static long long add_carry(long long Rss, long long Rtt, + int pred_in, int *pred_result) +{ + long long result; + int predval = pred_in; + + asm volatile("p0 = %1\n\t" + "%0 = add(%2, %3, p0):carry\n\t" + "%1 = p0\n\t" + : "=r"(result), "+r"(predval) + : "r"(Rss), "r"(Rtt) + : "p0"); + *pred_result = predval; + return result; +} + +static long long sub_carry(long long Rss, long long Rtt, + int pred_in, int *pred_result) +{ + long long result; + int predval = pred_in; + + asm volatile("p0 = !cmp.eq(%1, #0)\n\t" + "%0 = sub(%2, %3, p0):carry\n\t" + "%1 = p0\n\t" + : "=r"(result), "+r"(predval) + : "r"(Rss), "r"(Rtt) + : "p0"); + *pred_result = predval; + return result; +} + +int err; + +static void check_ll(long long val, long long expect) +{ + if (val != expect) { + printf("ERROR: 0x%016llx != 0x%016llx\n", val, expect); + err++; + } +} + +static void check(int val, int expect) +{ + if (val != expect) { + printf("ERROR: 0x%08x != 0x%08x\n", val, expect); + err++; + } +} + +static void check_p(int val, int expect) +{ + if (val != expect) { + printf("ERROR: 0x%02x != 0x%02x\n", val, expect); + err++; + } +} + +static void test_sfrecipa() +{ + int res; + int pred_result; + + res = sfrecipa(0x04030201, 0x05060708, &pred_result); + check(res, 0x59f38001); + check_p(pred_result, 0x00); +} + +static void test_sfinvsqrta() +{ + int res; + int pred_result; + + res = sfinvsqrta(0x04030201, &pred_result); + check(res, 0x4d330000); + check_p(pred_result, 0xe0); + + res = sfinvsqrta(0x0, &pred_result); + check(res, 0x3f800000); + check_p(pred_result, 0x0); +} + +static void test_vacsh() +{ + long long res64; + int pred_result; + int ovf_result; + + res64 = vacsh(0x0004000300020001LL, + 0x0001000200030004LL, + 0x0000000000000000LL, &pred_result, &ovf_result); + check_ll(res64, 0x0004000300030004LL); + check_p(pred_result, 0xf0); + check(ovf_result, 0); + + res64 = vacsh(0x0004000300020001LL, + 0x0001000200030004LL, + 0x000affff000d0000LL, &pred_result, &ovf_result); + check_ll(res64, 0x000e0003000f0004LL); + check_p(pred_result, 0xcc); + check(ovf_result, 0); + + res64 = vacsh(0x00047fff00020001LL, + 0x00017fff00030004LL, + 0x000a0fff000d0000LL, &pred_result, &ovf_result); + check_ll(res64, 0x000e7fff000f0004LL); + check_p(pred_result, 0xfc); + check(ovf_result, 1); + + res64 = vacsh(0x0004000300020001LL, + 0x0001000200030009LL, + 0x000affff000d0001LL, &pred_result, &ovf_result); + check_ll(res64, 0x000e0003000f0008LL); + check_p(pred_result, 0xcc); + check(ovf_result, 0); +} + +static void test_vminub() +{ + long long res64; + int pred_result; + + res64 = vminub(0x0807060504030201LL, + 0x0102030405060708LL, + &pred_result); + check_ll(res64, 0x0102030404030201LL); + check_p(pred_result, 0xf0); + + res64 = vminub(0x0802060405030701LL, + 0x0107030504060208LL, + &pred_result); + check_ll(res64, 0x0102030404030201LL); + check_p(pred_result, 0xaa); +} + +static void test_add_carry() +{ + long long res64; + int pred_result; + + res64 = add_carry(0x0000000000000000LL, + 0xffffffffffffffffLL, + 1, &pred_result); + check_ll(res64, 0x0000000000000000LL); + check_p(pred_result, 0xff); + + res64 = add_carry(0x0000000100000000LL, + 0xffffffffffffffffLL, + 0, &pred_result); + check_ll(res64, 0x00000000ffffffffLL); + check_p(pred_result, 0xff); + + res64 = add_carry(0x0000000100000000LL, + 0xffffffffffffffffLL, + 0, &pred_result); + check_ll(res64, 0x00000000ffffffffLL); + check_p(pred_result, 0xff); +} + +static void test_sub_carry() +{ + long long res64; + int pred_result; + + res64 = sub_carry(0x0000000000000000LL, + 0x0000000000000000LL, + 1, &pred_result); + check_ll(res64, 0x0000000000000000LL); + check_p(pred_result, 0xff); + + res64 = sub_carry(0x0000000100000000LL, + 0x0000000000000000LL, + 0, &pred_result); + check_ll(res64, 0x00000000ffffffffLL); + check_p(pred_result, 0xff); + + res64 = sub_carry(0x0000000100000000LL, + 0x0000000000000000LL, + 0, &pred_result); + check_ll(res64, 0x00000000ffffffffLL); + check_p(pred_result, 0xff); +} + +int main() +{ + test_sfrecipa(); + test_sfinvsqrta(); + test_vacsh(); + test_vminub(); + test_add_carry(); + test_sub_carry(); + + puts(err ? "FAIL" : "PASS"); + return err; +} |