[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 34/37] target/i386: implement VLDMXCSR/VSTMXCSR
From: |
Paolo Bonzini |
Subject: |
[PATCH 34/37] target/i386: implement VLDMXCSR/VSTMXCSR |
Date: |
Mon, 12 Sep 2022 01:04:14 +0200 |
These are exactly the same as the non-VEX version, but one has to be careful
that only VEX.L=0 is allowed.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/tcg/decode-new.c.inc | 25 +++++++++++++++++++++++++
target/i386/tcg/emit.c.inc | 20 ++++++++++++++++++++
2 files changed, 45 insertions(+)
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index 383a425ccd..e468a32787 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -80,6 +80,10 @@
#define X86_OP_ENTRY2(op, op0, s0, op1, s1, ...) \
X86_OP_ENTRY3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__)
+#define X86_OP_ENTRYw(op, op0, s0, ...) \
+ X86_OP_ENTRY3(op, op0, s0, None, None, None, None, ## __VA_ARGS__)
+#define X86_OP_ENTRYr(op, op0, s0, ...) \
+ X86_OP_ENTRY3(op, None, None, None, None, op0, s0, ## __VA_ARGS__)
#define X86_OP_ENTRY0(op, ...) \
X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
@@ -147,6 +151,25 @@ static inline const X86OpEntry
*decode_by_prefix(DisasContext *s, const X86OpEnt
}
}
+static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry
*entry, uint8_t *b)
+{
+ /* only includes ldmxcsr and stmxcsr, because they have AVX variants. */
+ static const X86OpEntry group15_reg[8] = {
+ };
+
+ static const X86OpEntry group15_mem[8] = {
+ [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5),
+ [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5),
+ };
+
+ uint8_t modrm = get_modrm(s, env);
+ if ((modrm >> 6) == 3) {
+ *entry = group15_reg[(modrm >> 3) & 7];
+ } else {
+ *entry = group15_mem[(modrm >> 3) & 7];
+ }
+}
+
static void decode_group17(DisasContext *s, CPUX86State *env, X86OpEntry
*entry, uint8_t *b)
{
static const X86GenFunc group17_gen[8] = {
@@ -754,6 +777,8 @@ static const X86OpEntry opcodes_0F[256] = {
[0x7e] = X86_OP_GROUP0(0F7E),
[0x7f] = X86_OP_GROUP3(0F6F, W,x, None,None, V,x, vex5 mmx
p_00_66_f3),
+ [0xae] = X86_OP_GROUP0(group15),
+
[0xc2] = X86_OP_ENTRY4(VCMP, V,x, H,x, W,x, vex2_rep3
p_00_66_f3_f2),
[0xc4] = X86_OP_ENTRY4(PINSRW, V,dq,H,dq,E,w, vex5 mmx p_00_66),
[0xc5] = X86_OP_ENTRY3(PEXTRW, G,d, U,dq,I,b, vex5 mmx p_00_66),
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index d61b43f21c..942766de0f 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -979,6 +979,16 @@ static void gen_LDDQU(DisasContext *s, CPUX86State *env,
X86DecodedInsn *decode)
gen_load_sse(s, s->T0, decode->op[0].ot, decode->op[0].offset);
}
+static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn
*decode)
+{
+ if (s->vex_l) {
+ gen_illegal_opcode(s);
+ return;
+ }
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1);
+ gen_helper_ldmxcsr(cpu_env, s->tmp2_i32);
+}
+
static void gen_MASKMOV(DisasContext *s, CPUX86State *env, X86DecodedInsn
*decode)
{
tcg_gen_mov_tl(s->A0, cpu_regs[R_EDI]);
@@ -1808,6 +1818,16 @@ static void gen_SSE4a_R(DisasContext *s, CPUX86State
*env, X86DecodedInsn *decod
}
}
+static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn
*decode)
+{
+ if (s->vex_l) {
+ gen_illegal_opcode(s);
+ return;
+ }
+ gen_helper_update_mxcsr(cpu_env);
+ tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
+}
+
static inline void gen_VAESIMC(DisasContext *s, CPUX86State *env,
X86DecodedInsn *decode)
{
assert(!s->vex_l);
--
2.37.2
[PATCH 32/37] target/i386: implement XSAVE and XRSTOR of AVX registers, Paolo Bonzini, 2022/09/11
[PATCH 33/37] target/i386: Enable AVX cpuid bits when using TCG, Paolo Bonzini, 2022/09/11
[PATCH 36/37] target/i386: move 3DNow completely out of gen_sse, Paolo Bonzini, 2022/09/11
[PATCH 34/37] target/i386: implement VLDMXCSR/VSTMXCSR,
Paolo Bonzini <=
[PATCH 35/37] tests/tcg: extend SSE tests to AVX, Paolo Bonzini, 2022/09/11
Re: [RFC PATCH 00/37] target/i386: new decoder + AVX implementation, Richard Henderson, 2022/09/13