[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 12/39] tests/tcg: i386: extend BMI test
From: |
Paolo Bonzini |
Subject: |
[PULL 12/39] tests/tcg: i386: extend BMI test |
Date: |
Thu, 1 Sep 2022 20:24:02 +0200 |
Cover all BMI1 and BMI2 instructions, both 32- and 64-bit.
Due to the use of inlines, the test now has to be compiled with -O2.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
tests/tcg/i386/Makefile.target | 1 +
tests/tcg/i386/test-i386-bmi2.c | 169 ++++++++++++++++++++++++++++++--
2 files changed, 162 insertions(+), 8 deletions(-)
diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target
index 5614838ffc..646b4ac13e 100644
--- a/tests/tcg/i386/Makefile.target
+++ b/tests/tcg/i386/Makefile.target
@@ -18,6 +18,7 @@ test-i386-pcmpistri: CFLAGS += -msse4.2
run-test-i386-pcmpistri: QEMU_OPTS += -cpu max
run-plugin-test-i386-pcmpistri-%: QEMU_OPTS += -cpu max
+test-i386-bmi2: CFLAGS=-O2
run-test-i386-bmi2: QEMU_OPTS += -cpu max
run-plugin-test-i386-bmi2-%: QEMU_OPTS += -cpu max
diff --git a/tests/tcg/i386/test-i386-bmi2.c b/tests/tcg/i386/test-i386-bmi2.c
index 935a4d2a73..5fadf47510 100644
--- a/tests/tcg/i386/test-i386-bmi2.c
+++ b/tests/tcg/i386/test-i386-bmi2.c
@@ -1,6 +1,66 @@
/* See if various BMI2 instructions give expected results */
#include <assert.h>
#include <stdint.h>
+#include <stdio.h>
+
+#define insn1q(name, arg0)
\
+static inline uint64_t name##q(uint64_t arg0)
\
+{
\
+ uint64_t result64;
\
+ asm volatile (#name "q %1, %0" : "=r"(result64) : "rm"(arg0));
\
+ return result64;
\
+}
+
+#define insn1l(name, arg0)
\
+static inline uint32_t name##l(uint32_t arg0)
\
+{
\
+ uint32_t result32;
\
+ asm volatile (#name "l %k1, %k0" : "=r"(result32) : "rm"(arg0));
\
+ return result32;
\
+}
+
+#define insn2q(name, arg0, c0, arg1, c1)
\
+static inline uint64_t name##q(uint64_t arg0, uint64_t arg1)
\
+{
\
+ uint64_t result64;
\
+ asm volatile (#name "q %2, %1, %0" : "=r"(result64) : c0(arg0),
c1(arg1)); \
+ return result64;
\
+}
+
+#define insn2l(name, arg0, c0, arg1, c1)
\
+static inline uint32_t name##l(uint32_t arg0, uint32_t arg1)
\
+{
\
+ uint32_t result32;
\
+ asm volatile (#name "l %k2, %k1, %k0" : "=r"(result32) : c0(arg0),
c1(arg1)); \
+ return result32;
\
+}
+
+#ifdef __x86_64
+insn2q(pext, src, "r", mask, "rm")
+insn2q(pdep, src, "r", mask, "rm")
+insn2q(andn, clear, "rm", val, "r")
+insn2q(bextr, range, "rm", val, "r")
+insn2q(bzhi, pos, "rm", val, "r")
+insn2q(rorx, val, "r", n, "i")
+insn2q(sarx, val, "rm", n, "r")
+insn2q(shlx, val, "rm", n, "r")
+insn2q(shrx, val, "rm", n, "r")
+insn1q(blsi, src)
+insn1q(blsmsk, src)
+insn1q(blsr, src)
+#endif
+insn2l(pext, src, "r", mask, "rm")
+insn2l(pdep, src, "r", mask, "rm")
+insn2l(andn, clear, "rm", val, "r")
+insn2l(bextr, range, "rm", val, "r")
+insn2l(bzhi, pos, "rm", val, "r")
+insn2l(rorx, val, "r", n, "i")
+insn2l(sarx, val, "rm", n, "r")
+insn2l(shlx, val, "rm", n, "r")
+insn2l(shrx, val, "rm", n, "r")
+insn1l(blsi, src)
+insn1l(blsmsk, src)
+insn1l(blsr, src)
int main(int argc, char *argv[]) {
uint64_t ehlo = 0x202020204f4c4845ull;
@@ -11,32 +71,125 @@ int main(int argc, char *argv[]) {
uint64_t result64;
/* 64 bits */
- asm volatile ("pextq %2, %1, %0" : "=r"(result64) : "r"(ehlo),
"m"(mask));
+ result64 = andnq(mask, ehlo);
+ assert(result64 == 0x002020204d4c4844);
+
+ result64 = pextq(ehlo, mask);
assert(result64 == 133);
- asm volatile ("pdepq %2, %1, %0" : "=r"(result64) : "r"(result64),
"m"(mask));
+ result64 = pdepq(result64, mask);
assert(result64 == (ehlo & mask));
- asm volatile ("pextq %2, %1, %0" : "=r"(result64) : "r"(-1ull),
"m"(mask));
+ result64 = pextq(-1ull, mask);
assert(result64 == 511); /* mask has 9 bits set */
- asm volatile ("pdepq %2, %1, %0" : "=r"(result64) : "r"(-1ull),
"m"(mask));
+ result64 = pdepq(-1ull, mask);
assert(result64 == mask);
+
+ result64 = bextrq(mask, 0x3f00);
+ assert(result64 == (mask & ~INT64_MIN));
+
+ result64 = bextrq(mask, 0x1038);
+ assert(result64 == 0xa0);
+
+ result64 = bextrq(mask, 0x10f8);
+ assert(result64 == 0);
+
+ result64 = blsiq(0x30);
+ assert(result64 == 0x10);
+
+ result64 = blsiq(0x30ull << 32);
+ assert(result64 == 0x10ull << 32);
+
+ result64 = blsmskq(0x30);
+ assert(result64 == 0x1f);
+
+ result64 = blsrq(0x30);
+ assert(result64 == 0x20);
+
+ result64 = blsrq(0x30ull << 32);
+ assert(result64 == 0x20ull << 32);
+
+ result64 = bzhiq(mask, 0x3f);
+ assert(result64 == (mask & ~INT64_MIN));
+
+ result64 = bzhiq(mask, 0x1f);
+ assert(result64 == (mask & ~(-1 << 30)));
+
+ result64 = rorxq(0x2132435465768798, 8);
+ assert(result64 == 0x9821324354657687);
+
+ result64 = sarxq(0xffeeddccbbaa9988, 8);
+ assert(result64 == 0xffffeeddccbbaa99);
+
+ result64 = sarxq(0x77eeddccbbaa9988, 8 | 64);
+ assert(result64 == 0x0077eeddccbbaa99);
+
+ result64 = shrxq(0xffeeddccbbaa9988, 8);
+ assert(result64 == 0x00ffeeddccbbaa99);
+
+ result64 = shrxq(0x77eeddccbbaa9988, 8 | 192);
+ assert(result64 == 0x0077eeddccbbaa99);
+
+ result64 = shlxq(0xffeeddccbbaa9988, 8);
+ assert(result64 == 0xeeddccbbaa998800);
#endif
/* 32 bits */
- asm volatile ("pextl %2, %k1, %k0" : "=r"(result32) : "r"((uint32_t)
ehlo), "m"(mask));
+ result32 = andnl(mask, ehlo);
+ assert(result32 == 0x04d4c4844);
+
+ result32 = pextl((uint32_t) ehlo, mask);
assert(result32 == 5);
- asm volatile ("pdepl %2, %k1, %k0" : "=r"(result32) : "r"(result32),
"m"(mask));
+ result32 = pdepl(result32, mask);
assert(result32 == (uint32_t)(ehlo & mask));
- asm volatile ("pextl %2, %k1, %k0" : "=r"(result32) : "r"(-1ull),
"m"(mask));
+ result32 = pextl(-1u, mask);
assert(result32 == 7); /* mask has 3 bits set */
- asm volatile ("pdepl %2, %k1, %k0" : "=r"(result32) : "r"(-1ull),
"m"(mask));
+ result32 = pdepl(-1u, mask);
assert(result32 == (uint32_t)mask);
+ result32 = bextrl(mask, 0x1f00);
+ assert(result32 == (mask & ~INT32_MIN));
+
+ result32 = bextrl(ehlo, 0x1018);
+ assert(result32 == 0x4f);
+
+ result32 = bextrl(mask, 0x1038);
+ assert(result32 == 0);
+
+ result32 = blsil(0xffff);
+ assert(result32 == 1);
+
+ result32 = blsmskl(0x300);
+ assert(result32 == 0x1ff);
+
+ result32 = blsrl(0xffc);
+ assert(result32 == 0xff8);
+
+ result32 = bzhil(mask, 0xf);
+ assert(result32 == 1);
+
+ result32 = rorxl(0x65768798, 8);
+ assert(result32 == 0x98657687);
+
+ result32 = sarxl(0xffeeddcc, 8);
+ assert(result32 == 0xffffeedd);
+
+ result32 = sarxl(0x77eeddcc, 8 | 32);
+ assert(result32 == 0x0077eedd);
+
+ result32 = shrxl(0xffeeddcc, 8);
+ assert(result32 == 0x00ffeedd);
+
+ result32 = shrxl(0x77eeddcc, 8 | 128);
+ assert(result32 == 0x0077eedd);
+
+ result32 = shlxl(0xffeeddcc, 8);
+ assert(result32 == 0xeeddcc00);
+
return 0;
}
--
2.37.2
- [PULL 04/39] i386: reset KVM nested state upon CPU reset, (continued)
- [PULL 04/39] i386: reset KVM nested state upon CPU reset, Paolo Bonzini, 2022/09/01
- [PULL 02/39] scsi: Add buf_len parameter to scsi_req_new(), Paolo Bonzini, 2022/09/01
- [PULL 06/39] configure: improve error for ucontext coroutine backend, Paolo Bonzini, 2022/09/01
- [PULL 05/39] i386: do kvm_put_msr_feature_control() first thing when vCPU is reset, Paolo Bonzini, 2022/09/01
- [PULL 07/39] meson: be strict for boolean options, Paolo Bonzini, 2022/09/01
- [PULL 08/39] meson: remove dead code, Paolo Bonzini, 2022/09/01
- [PULL 11/39] tests/tcg: x86_64: improve consistency with i386, Paolo Bonzini, 2022/09/01
- [PULL 10/39] KVM: dirty ring: add missing memory barrier, Paolo Bonzini, 2022/09/01
- [PULL 09/39] meson: remove dead assignments, Paolo Bonzini, 2022/09/01
- [PULL 14/39] target/i386: DPPS rounding fix, Paolo Bonzini, 2022/09/01
- [PULL 12/39] tests/tcg: i386: extend BMI test,
Paolo Bonzini <=
- [PULL 13/39] target/i386: fix PHSUB* instructions with dest=src, Paolo Bonzini, 2022/09/01
- [PULL 17/39] target/i386: formatting fixes, Paolo Bonzini, 2022/09/01
- [PULL 16/39] target/i386: do not use MOVL to move data between SSE registers, Paolo Bonzini, 2022/09/01
- [PULL 15/39] tests/tcg: i386: add SSE tests, Paolo Bonzini, 2022/09/01
- [PULL 19/39] target/i386: Rework sse_op_table1, Paolo Bonzini, 2022/09/01
- [PULL 21/39] target/i386: Move 3DNOW decoder, Paolo Bonzini, 2022/09/01
- [PULL 20/39] target/i386: Rework sse_op_table6/7, Paolo Bonzini, 2022/09/01
- [PULL 22/39] target/i386: check SSE table flags instead of hardcoding opcodes, Paolo Bonzini, 2022/09/01
- [PULL 18/39] target/i386: Add ZMM_OFFSET macro, Paolo Bonzini, 2022/09/01
- [PULL 23/39] target/i386: isolate MMX code more, Paolo Bonzini, 2022/09/01