qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 1/6 v6] target-tilegx: Firstly add TILE-Gx with mini


From: Chen Gang
Subject: [Qemu-devel] [PATCH 1/6 v6] target-tilegx: Firstly add TILE-Gx with minimized features
Date: Thu, 19 Mar 2015 00:34:30 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:31.0) Gecko/20100101 Thunderbird/31.5.0

It is the configure and build system support for TILE-Gx (tilegx will be
used in configure and real sub-directory name).

At present, it is linux-user only, and can finish the first system call
(uname) execution in __libc_start_main().

Signed-off-by: Chen Gang <address@hidden>
---
 configure                             |    3 +
 default-configs/tilegx-linux-user.mak |    1 +
 target-tilegx/Makefile.objs           |    1 +
 target-tilegx/cpu-qom.h               |   73 ++
 target-tilegx/cpu.c                   |  149 +++
 target-tilegx/cpu.h                   |   94 ++
 target-tilegx/helper.c                |   31 +
 target-tilegx/helper.h                |    1 +
 target-tilegx/opcode_tilegx.h         | 1406 ++++++++++++++++++++++++++
 target-tilegx/translate.c             | 1735 +++++++++++++++++++++++++++++++++
 10 files changed, 3494 insertions(+)
 create mode 100644 default-configs/tilegx-linux-user.mak
 create mode 100644 target-tilegx/Makefile.objs
 create mode 100644 target-tilegx/cpu-qom.h
 create mode 100644 target-tilegx/cpu.c
 create mode 100644 target-tilegx/cpu.h
 create mode 100644 target-tilegx/helper.c
 create mode 100644 target-tilegx/helper.h
 create mode 100644 target-tilegx/opcode_tilegx.h
 create mode 100644 target-tilegx/translate.c

diff --git a/configure b/configure
index f74a6fd..1121098 100755
--- a/configure
+++ b/configure
@@ -5215,6 +5215,9 @@ case "$target_name" in
   s390x)
     gdb_xml_files="s390x-core64.xml s390-acr.xml s390-fpr.xml"
   ;;
+  tilegx)
+    TARGET_ARCH=tilegx
+  ;;
   unicore32)
   ;;
   xtensa|xtensaeb)
diff --git a/default-configs/tilegx-linux-user.mak 
b/default-configs/tilegx-linux-user.mak
new file mode 100644
index 0000000..3e47493
--- /dev/null
+++ b/default-configs/tilegx-linux-user.mak
@@ -0,0 +1 @@
+# Default configuration for tilegx-linux-user
diff --git a/target-tilegx/Makefile.objs b/target-tilegx/Makefile.objs
new file mode 100644
index 0000000..8b3dc76
--- /dev/null
+++ b/target-tilegx/Makefile.objs
@@ -0,0 +1 @@
+obj-y += cpu.o translate.o helper.o
diff --git a/target-tilegx/cpu-qom.h b/target-tilegx/cpu-qom.h
new file mode 100644
index 0000000..5615c3b
--- /dev/null
+++ b/target-tilegx/cpu-qom.h
@@ -0,0 +1,73 @@
+/*
+ * QEMU TILE-Gx CPU
+ *
+ * Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_TILEGX_CPU_QOM_H
+#define QEMU_TILEGX_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_TILEGX_CPU "tilegx-cpu"
+
+#define TILEGX_CPU_CLASS(klass) \
+    OBJECT_CLASS_CHECK(TileGXCPUClass, (klass), TYPE_TILEGX_CPU)
+#define TILEGX_CPU(obj) \
+    OBJECT_CHECK(TileGXCPU, (obj), TYPE_TILEGX_CPU)
+#define TILEGX_CPU_GET_CLASS(obj) \
+    OBJECT_GET_CLASS(TileGXCPUClass, (obj), TYPE_TILEGX_CPU)
+
+/**
+ * TileGXCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A Tile-Gx CPU model.
+ */
+typedef struct TileGXCPUClass {
+    /*< private >*/
+    CPUClass parent_class;
+    /*< public >*/
+
+    DeviceRealize parent_realize;
+    void (*parent_reset)(CPUState *cpu);
+} TileGXCPUClass;
+
+/**
+ * TileGXCPU:
+ * @env: #CPUTLGState
+ *
+ * A Tile-GX CPU.
+ */
+typedef struct TileGXCPU {
+    /*< private >*/
+    CPUState parent_obj;
+    /*< public >*/
+
+    CPUTLGState env;
+} TileGXCPU;
+
+static inline TileGXCPU *tilegx_env_get_cpu(CPUTLGState *env)
+{
+    return container_of(env, TileGXCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(tilegx_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(TileGXCPU, env)
+
+#endif
diff --git a/target-tilegx/cpu.c b/target-tilegx/cpu.c
new file mode 100644
index 0000000..8255fdc
--- /dev/null
+++ b/target-tilegx/cpu.c
@@ -0,0 +1,149 @@
+/*
+ * QEMU TILE-Gx CPU
+ *
+ *  Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+
+TileGXCPU *cpu_tilegx_init(const char *cpu_model)
+{
+    TileGXCPU *cpu;
+
+    cpu = TILEGX_CPU(object_new(TYPE_TILEGX_CPU));
+
+    object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+    return cpu;
+}
+
+static void tilegx_cpu_set_pc(CPUState *cs, vaddr value)
+{
+    TileGXCPU *cpu = TILEGX_CPU(cs);
+
+    cpu->env.pc = value;
+}
+
+static bool tilegx_cpu_has_work(CPUState *cs)
+{
+    return true;
+}
+
+static void tilegx_cpu_reset(CPUState *s)
+{
+    TileGXCPU *cpu = TILEGX_CPU(s);
+    TileGXCPUClass *tcc = TILEGX_CPU_GET_CLASS(cpu);
+    CPUTLGState *env = &cpu->env;
+
+    tcc->parent_reset(s);
+
+    memset(env, 0, sizeof(CPUTLGState));
+    tlb_flush(s, 1);
+}
+
+static void tilegx_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+    CPUState *cs = CPU(dev);
+    TileGXCPUClass *tcc = TILEGX_CPU_GET_CLASS(dev);
+
+    cpu_reset(cs);
+    qemu_init_vcpu(cs);
+
+    tcc->parent_realize(dev, errp);
+}
+
+static void tilegx_cpu_initfn(Object *obj)
+{
+    CPUState *cs = CPU(obj);
+    TileGXCPU *cpu = TILEGX_CPU(obj);
+    CPUTLGState *env = &cpu->env;
+    static bool tcg_initialized;
+
+    cs->env_ptr = env;
+    cpu_exec_init(env);
+
+    if (tcg_enabled() && !tcg_initialized) {
+        tcg_initialized = true;
+        tilegx_tcg_init();
+    }
+}
+
+static const VMStateDescription vmstate_tilegx_cpu = {
+    .name = "cpu",
+    .unmigratable = 1,
+};
+
+static void tilegx_cpu_do_interrupt(CPUState *cs)
+{
+    cs->exception_index = -1;
+}
+
+static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+                                       int mmu_idx)
+{
+    cpu_dump_state(cs, stderr, fprintf, 0);
+    return 1;
+}
+
+static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+    if (interrupt_request & CPU_INTERRUPT_HARD) {
+        tilegx_cpu_do_interrupt(cs);
+        return true;
+    }
+    return false;
+}
+
+static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(oc);
+    CPUClass *cc = CPU_CLASS(oc);
+    TileGXCPUClass *tcc = TILEGX_CPU_CLASS(oc);
+
+    tcc->parent_realize = dc->realize;
+    dc->realize = tilegx_cpu_realizefn;
+
+    tcc->parent_reset = cc->reset;
+    cc->reset = tilegx_cpu_reset;
+
+    cc->has_work = tilegx_cpu_has_work;
+    cc->do_interrupt = tilegx_cpu_do_interrupt;
+    cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
+    cc->set_pc = tilegx_cpu_set_pc;
+    cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault;
+    dc->vmsd = &vmstate_tilegx_cpu;
+    cc->gdb_num_core_regs = 0;
+}
+
+static const TypeInfo tilegx_cpu_type_info = {
+    .name = TYPE_TILEGX_CPU,
+    .parent = TYPE_CPU,
+    .instance_size = sizeof(TileGXCPU),
+    .instance_init = tilegx_cpu_initfn,
+    .class_size = sizeof(TileGXCPUClass),
+    .class_init = tilegx_cpu_class_init,
+};
+
+static void tilegx_cpu_register_types(void)
+{
+    type_register_static(&tilegx_cpu_type_info);
+}
+
+type_init(tilegx_cpu_register_types)
diff --git a/target-tilegx/cpu.h b/target-tilegx/cpu.h
new file mode 100644
index 0000000..bf074a6
--- /dev/null
+++ b/target-tilegx/cpu.h
@@ -0,0 +1,94 @@
+/*
+ *  TILE-Gx virtual CPU header
+ *
+ *  Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_TILEGX_H
+#define CPU_TILEGX_H
+
+#include "config.h"
+#include "qemu-common.h"
+
+#define TARGET_LONG_BITS 64
+
+#define CPUArchState struct CPUTLGState
+
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+/* TILE-Gx register alias */
+#define TILEGX_R_RE    0   /*  0 register, for function/syscall return value */
+#define TILEGX_R_NR    10  /* 10 register, for syscall number */
+#define TILEGX_R_BP    52  /* 52 register, optional frame pointer */
+#define TILEGX_R_TP    53  /* TP register, thread local storage data */
+#define TILEGX_R_SP    54  /* SP register, stack pointer */
+#define TILEGX_R_LR    55  /* LR register, may save pc, but it is not pc */
+#define TILEGX_R_ZERO  63  /* Zero register, always zero */
+#define TILEGX_R_COUNT 56  /* Only 56 registers are really useful */
+#define TILEGX_R_NOREG 255 /* Invalid register value */
+
+
+typedef struct CPUTLGState {
+    uint64_t regs[TILEGX_R_COUNT]; /* Common used registers by outside */
+    uint64_t pc;                   /* Current pc */
+
+    CPU_COMMON
+} CPUTLGState;
+
+#include "cpu-qom.h"
+
+/* TILE-Gx memory attributes */
+#define TARGET_PAGE_BITS 16  /* TILE-Gx uses 64KB page size */
+#define MMAP_SHIFT TARGET_PAGE_BITS
+#define TARGET_PHYS_ADDR_SPACE_BITS 42 /* TILE-Gx is 42 bit physical address */
+#define TARGET_VIRT_ADDR_SPACE_BITS 64 /* TILE-Gx has 64 bit virtual address */
+#define MMU_USER_IDX    0  /* independent from both qemu and architecture */
+
+/* Exception numbers */
+enum {
+    TILEGX_EXCP_NONE = 0,
+    TILEGX_EXCP_SYSCALL = 1,
+    TILEGX_EXCP_OPCODE_UNKNOWN = 0x101,
+    TILEGX_EXCP_OPCODE_UNIMPLEMENTED = 0x102,
+    TILEGX_EXCP_REG_UNSUPPORTED = 0x181
+
+};
+
+#include "exec/cpu-all.h"
+
+void tilegx_tcg_init(void);
+int cpu_tilegx_exec(CPUTLGState *s);
+int cpu_tilegx_signal_handler(int host_signum, void *pinfo, void *puc);
+
+TileGXCPU *cpu_tilegx_init(const char *cpu_model);
+
+#define cpu_init(cpu_model) CPU(cpu_tilegx_init(cpu_model))
+
+#define cpu_exec cpu_tilegx_exec
+#define cpu_gen_code cpu_tilegx_gen_code
+#define cpu_signal_handler cpu_tilegx_signal_handler
+
+static inline void cpu_get_tb_cpu_state(CPUTLGState *env, target_ulong *pc,
+                                        target_ulong *cs_base, int *flags)
+{
+    *pc = env->pc;
+    *cs_base = 0;
+    *flags = 0;
+}
+
+#include "exec/exec-all.h"
+
+#endif
diff --git a/target-tilegx/helper.c b/target-tilegx/helper.c
new file mode 100644
index 0000000..ffac2b9
--- /dev/null
+++ b/target-tilegx/helper.c
@@ -0,0 +1,31 @@
+/*
+ * QEMU TILE-Gx helpers
+ *
+ *  Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+#include "exec/helper-proto.h"
+
+void helper_exception(CPUTLGState *env, uint32_t excp)
+{
+    CPUState *cs = CPU(tilegx_env_get_cpu(env));
+
+    cs->exception_index = excp;
+    cpu_loop_exit(cs);
+}
diff --git a/target-tilegx/helper.h b/target-tilegx/helper.h
new file mode 100644
index 0000000..36d1cd9
--- /dev/null
+++ b/target-tilegx/helper.h
@@ -0,0 +1 @@
+DEF_HELPER_2(exception, noreturn, env, i32)
diff --git a/target-tilegx/opcode_tilegx.h b/target-tilegx/opcode_tilegx.h
new file mode 100644
index 0000000..d76ff2d
--- /dev/null
+++ b/target-tilegx/opcode_tilegx.h
@@ -0,0 +1,1406 @@
+/* TILE-Gx opcode information.
+ *
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ *
+ *
+ *
+ *
+ */
+
+#ifndef __ARCH_OPCODE_H__
+#define __ARCH_OPCODE_H__
+
+#ifndef __ASSEMBLER__
+
+typedef unsigned long long tilegx_bundle_bits;
+
+/* These are the bits that determine if a bundle is in the X encoding. */
+#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62)
+
+enum
+{
+  /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
+  TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
+
+  /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
+  TILEGX_NUM_PIPELINE_ENCODINGS = 5,
+
+  /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */
+  TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
+
+  /* Instructions take this many bytes. */
+  TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES,
+
+  /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */
+  TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
+
+  /* Bundles should be aligned modulo this number of bytes. */
+  TILEGX_BUNDLE_ALIGNMENT_IN_BYTES =
+    (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
+
+  /* Number of registers (some are magic, such as network I/O). */
+  TILEGX_NUM_REGISTERS = 64,
+};
+
+/* Make a few "tile_" variables to simplify common code between
+   architectures.  */
+
+typedef tilegx_bundle_bits tile_bundle_bits;
+#define TILE_BUNDLE_SIZE_IN_BYTES TILEGX_BUNDLE_SIZE_IN_BYTES
+#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
+  TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
+
+/* 64-bit pattern for a { bpt ; nop } bundle. */
+#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
+
+static __inline unsigned int
+get_BFEnd_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline unsigned int
+get_BFOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 24)) & 0xf);
+}
+
+static __inline unsigned int
+get_BFStart_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 18)) & 0x3f);
+}
+
+static __inline unsigned int
+get_BrOff_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 31)) & 0x0000003f) |
+         (((unsigned int)(n >> 37)) & 0x0001ffc0);
+}
+
+static __inline unsigned int
+get_BrType_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 54)) & 0x1f);
+}
+
+static __inline unsigned int
+get_Dest_Imm8_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 31)) & 0x0000003f) |
+         (((unsigned int)(n >> 43)) & 0x000000c0);
+}
+
+static __inline unsigned int
+get_Dest_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 0)) & 0x3f);
+}
+
+static __inline unsigned int
+get_Dest_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 31)) & 0x3f);
+}
+
+static __inline unsigned int
+get_Dest_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 0)) & 0x3f);
+}
+
+static __inline unsigned int
+get_Dest_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 31)) & 0x3f);
+}
+
+static __inline unsigned int
+get_Imm16_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0xffff);
+}
+
+static __inline unsigned int
+get_Imm16_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0xffff);
+}
+
+static __inline unsigned int
+get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 20)) & 0xff);
+}
+
+static __inline unsigned int
+get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 51)) & 0xff);
+}
+
+static __inline unsigned int
+get_Imm8_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0xff);
+}
+
+static __inline unsigned int
+get_Imm8_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0xff);
+}
+
+static __inline unsigned int
+get_Imm8_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0xff);
+}
+
+static __inline unsigned int
+get_Imm8_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0xff);
+}
+
+static __inline unsigned int
+get_JumpOff_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 31)) & 0x7ffffff);
+}
+
+static __inline unsigned int
+get_JumpOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 58)) & 0x1);
+}
+
+static __inline unsigned int
+get_MF_Imm14_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 37)) & 0x3fff);
+}
+
+static __inline unsigned int
+get_MT_Imm14_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 31)) & 0x0000003f) |
+         (((unsigned int)(n >> 37)) & 0x00003fc0);
+}
+
+static __inline unsigned int
+get_Mode(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 62)) & 0x3);
+}
+
+static __inline unsigned int
+get_Opcode_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 28)) & 0x7);
+}
+
+static __inline unsigned int
+get_Opcode_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 59)) & 0x7);
+}
+
+static __inline unsigned int
+get_Opcode_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 27)) & 0xf);
+}
+
+static __inline unsigned int
+get_Opcode_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 58)) & 0xf);
+}
+
+static __inline unsigned int
+get_Opcode_Y2(tilegx_bundle_bits n)
+{
+  return (((n >> 26)) & 0x00000001) |
+         (((unsigned int)(n >> 56)) & 0x00000002);
+}
+
+static __inline unsigned int
+get_RRROpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 18)) & 0x3ff);
+}
+
+static __inline unsigned int
+get_RRROpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 49)) & 0x3ff);
+}
+
+static __inline unsigned int
+get_RRROpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 18)) & 0x3);
+}
+
+static __inline unsigned int
+get_RRROpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 49)) & 0x3);
+}
+
+static __inline unsigned int
+get_ShAmt_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline unsigned int
+get_ShAmt_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static __inline unsigned int
+get_ShAmt_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline unsigned int
+get_ShAmt_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static __inline unsigned int
+get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 18)) & 0x3ff);
+}
+
+static __inline unsigned int
+get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 49)) & 0x3ff);
+}
+
+static __inline unsigned int
+get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 18)) & 0x3);
+}
+
+static __inline unsigned int
+get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 49)) & 0x3);
+}
+
+static __inline unsigned int
+get_SrcA_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 6)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcA_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 37)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcA_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 6)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcA_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 37)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcA_Y2(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 20)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcBDest_Y2(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 51)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcB_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcB_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcB_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline unsigned int
+get_SrcB_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static __inline unsigned int
+get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline unsigned int
+get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static __inline unsigned int
+get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline unsigned int
+get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+  return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+
+static __inline int
+sign_extend(int n, int num_bits)
+{
+  int shift = (int)(sizeof(int) * 8 - num_bits);
+  return (n << shift) >> shift;
+}
+
+
+
+static __inline tilegx_bundle_bits
+create_BFEnd_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_BFOpcodeExtension_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0xf) << 24);
+}
+
+static __inline tilegx_bundle_bits
+create_BFStart_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_BrOff_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_BrType_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x1f)) << 54);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_Imm8_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x000000c0)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 0);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 0);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm16_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0xffff) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm16_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0xffff)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8OpcodeExtension_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0xff) << 20);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8OpcodeExtension_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0xff)) << 51);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0xff) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0xff)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0xff) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0xff)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_JumpOff_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31);
+}
+
+static __inline tilegx_bundle_bits
+create_JumpOpcodeExtension_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x1)) << 58);
+}
+
+static __inline tilegx_bundle_bits
+create_MF_Imm14_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3fff)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_MT_Imm14_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_Mode(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3)) << 62);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x7) << 28);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x7)) << 59);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0xf) << 27);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0xf)) << 58);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_Y2(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x00000001) << 26) |
+         (((tilegx_bundle_bits)(n & 0x00000002)) << 56);
+}
+
+static __inline tilegx_bundle_bits
+create_RRROpcodeExtension_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3ff) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_RRROpcodeExtension_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_RRROpcodeExtension_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_RRROpcodeExtension_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_ShAmt_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_ShAmt_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_ShAmt_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_ShAmt_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3ff) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 6);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 6);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_Y2(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 20);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcBDest_Y2(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 51);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcB_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcB_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcB_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcB_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_X0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_X1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_Y0(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_Y1(int num)
+{
+  const unsigned int n = (unsigned int)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+
+enum
+{
+  ADDI_IMM8_OPCODE_X0 = 1,
+  ADDI_IMM8_OPCODE_X1 = 1,
+  ADDI_OPCODE_Y0 = 0,
+  ADDI_OPCODE_Y1 = 1,
+  ADDLI_OPCODE_X0 = 1,
+  ADDLI_OPCODE_X1 = 0,
+  ADDXI_IMM8_OPCODE_X0 = 2,
+  ADDXI_IMM8_OPCODE_X1 = 2,
+  ADDXI_OPCODE_Y0 = 1,
+  ADDXI_OPCODE_Y1 = 2,
+  ADDXLI_OPCODE_X0 = 2,
+  ADDXLI_OPCODE_X1 = 1,
+  ADDXSC_RRR_0_OPCODE_X0 = 1,
+  ADDXSC_RRR_0_OPCODE_X1 = 1,
+  ADDX_RRR_0_OPCODE_X0 = 2,
+  ADDX_RRR_0_OPCODE_X1 = 2,
+  ADDX_RRR_0_OPCODE_Y0 = 0,
+  ADDX_SPECIAL_0_OPCODE_Y1 = 0,
+  ADD_RRR_0_OPCODE_X0 = 3,
+  ADD_RRR_0_OPCODE_X1 = 3,
+  ADD_RRR_0_OPCODE_Y0 = 1,
+  ADD_SPECIAL_0_OPCODE_Y1 = 1,
+  ANDI_IMM8_OPCODE_X0 = 3,
+  ANDI_IMM8_OPCODE_X1 = 3,
+  ANDI_OPCODE_Y0 = 2,
+  ANDI_OPCODE_Y1 = 3,
+  AND_RRR_0_OPCODE_X0 = 4,
+  AND_RRR_0_OPCODE_X1 = 4,
+  AND_RRR_5_OPCODE_Y0 = 0,
+  AND_RRR_5_OPCODE_Y1 = 0,
+  BEQZT_BRANCH_OPCODE_X1 = 16,
+  BEQZ_BRANCH_OPCODE_X1 = 17,
+  BFEXTS_BF_OPCODE_X0 = 4,
+  BFEXTU_BF_OPCODE_X0 = 5,
+  BFINS_BF_OPCODE_X0 = 6,
+  BF_OPCODE_X0 = 3,
+  BGEZT_BRANCH_OPCODE_X1 = 18,
+  BGEZ_BRANCH_OPCODE_X1 = 19,
+  BGTZT_BRANCH_OPCODE_X1 = 20,
+  BGTZ_BRANCH_OPCODE_X1 = 21,
+  BLBCT_BRANCH_OPCODE_X1 = 22,
+  BLBC_BRANCH_OPCODE_X1 = 23,
+  BLBST_BRANCH_OPCODE_X1 = 24,
+  BLBS_BRANCH_OPCODE_X1 = 25,
+  BLEZT_BRANCH_OPCODE_X1 = 26,
+  BLEZ_BRANCH_OPCODE_X1 = 27,
+  BLTZT_BRANCH_OPCODE_X1 = 28,
+  BLTZ_BRANCH_OPCODE_X1 = 29,
+  BNEZT_BRANCH_OPCODE_X1 = 30,
+  BNEZ_BRANCH_OPCODE_X1 = 31,
+  BRANCH_OPCODE_X1 = 2,
+  CMOVEQZ_RRR_0_OPCODE_X0 = 5,
+  CMOVEQZ_RRR_4_OPCODE_Y0 = 0,
+  CMOVNEZ_RRR_0_OPCODE_X0 = 6,
+  CMOVNEZ_RRR_4_OPCODE_Y0 = 1,
+  CMPEQI_IMM8_OPCODE_X0 = 4,
+  CMPEQI_IMM8_OPCODE_X1 = 4,
+  CMPEQI_OPCODE_Y0 = 3,
+  CMPEQI_OPCODE_Y1 = 4,
+  CMPEQ_RRR_0_OPCODE_X0 = 7,
+  CMPEQ_RRR_0_OPCODE_X1 = 5,
+  CMPEQ_RRR_3_OPCODE_Y0 = 0,
+  CMPEQ_RRR_3_OPCODE_Y1 = 2,
+  CMPEXCH4_RRR_0_OPCODE_X1 = 6,
+  CMPEXCH_RRR_0_OPCODE_X1 = 7,
+  CMPLES_RRR_0_OPCODE_X0 = 8,
+  CMPLES_RRR_0_OPCODE_X1 = 8,
+  CMPLES_RRR_2_OPCODE_Y0 = 0,
+  CMPLES_RRR_2_OPCODE_Y1 = 0,
+  CMPLEU_RRR_0_OPCODE_X0 = 9,
+  CMPLEU_RRR_0_OPCODE_X1 = 9,
+  CMPLEU_RRR_2_OPCODE_Y0 = 1,
+  CMPLEU_RRR_2_OPCODE_Y1 = 1,
+  CMPLTSI_IMM8_OPCODE_X0 = 5,
+  CMPLTSI_IMM8_OPCODE_X1 = 5,
+  CMPLTSI_OPCODE_Y0 = 4,
+  CMPLTSI_OPCODE_Y1 = 5,
+  CMPLTS_RRR_0_OPCODE_X0 = 10,
+  CMPLTS_RRR_0_OPCODE_X1 = 10,
+  CMPLTS_RRR_2_OPCODE_Y0 = 2,
+  CMPLTS_RRR_2_OPCODE_Y1 = 2,
+  CMPLTUI_IMM8_OPCODE_X0 = 6,
+  CMPLTUI_IMM8_OPCODE_X1 = 6,
+  CMPLTU_RRR_0_OPCODE_X0 = 11,
+  CMPLTU_RRR_0_OPCODE_X1 = 11,
+  CMPLTU_RRR_2_OPCODE_Y0 = 3,
+  CMPLTU_RRR_2_OPCODE_Y1 = 3,
+  CMPNE_RRR_0_OPCODE_X0 = 12,
+  CMPNE_RRR_0_OPCODE_X1 = 12,
+  CMPNE_RRR_3_OPCODE_Y0 = 1,
+  CMPNE_RRR_3_OPCODE_Y1 = 3,
+  CMULAF_RRR_0_OPCODE_X0 = 13,
+  CMULA_RRR_0_OPCODE_X0 = 14,
+  CMULFR_RRR_0_OPCODE_X0 = 15,
+  CMULF_RRR_0_OPCODE_X0 = 16,
+  CMULHR_RRR_0_OPCODE_X0 = 17,
+  CMULH_RRR_0_OPCODE_X0 = 18,
+  CMUL_RRR_0_OPCODE_X0 = 19,
+  CNTLZ_UNARY_OPCODE_X0 = 1,
+  CNTLZ_UNARY_OPCODE_Y0 = 1,
+  CNTTZ_UNARY_OPCODE_X0 = 2,
+  CNTTZ_UNARY_OPCODE_Y0 = 2,
+  CRC32_32_RRR_0_OPCODE_X0 = 20,
+  CRC32_8_RRR_0_OPCODE_X0 = 21,
+  DBLALIGN2_RRR_0_OPCODE_X0 = 22,
+  DBLALIGN2_RRR_0_OPCODE_X1 = 13,
+  DBLALIGN4_RRR_0_OPCODE_X0 = 23,
+  DBLALIGN4_RRR_0_OPCODE_X1 = 14,
+  DBLALIGN6_RRR_0_OPCODE_X0 = 24,
+  DBLALIGN6_RRR_0_OPCODE_X1 = 15,
+  DBLALIGN_RRR_0_OPCODE_X0 = 25,
+  DRAIN_UNARY_OPCODE_X1 = 1,
+  DTLBPR_UNARY_OPCODE_X1 = 2,
+  EXCH4_RRR_0_OPCODE_X1 = 16,
+  EXCH_RRR_0_OPCODE_X1 = 17,
+  FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26,
+  FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27,
+  FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28,
+  FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29,
+  FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30,
+  FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31,
+  FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32,
+  FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33,
+  FETCHADD4_RRR_0_OPCODE_X1 = 18,
+  FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19,
+  FETCHADDGEZ_RRR_0_OPCODE_X1 = 20,
+  FETCHADD_RRR_0_OPCODE_X1 = 21,
+  FETCHAND4_RRR_0_OPCODE_X1 = 22,
+  FETCHAND_RRR_0_OPCODE_X1 = 23,
+  FETCHOR4_RRR_0_OPCODE_X1 = 24,
+  FETCHOR_RRR_0_OPCODE_X1 = 25,
+  FINV_UNARY_OPCODE_X1 = 3,
+  FLUSHWB_UNARY_OPCODE_X1 = 4,
+  FLUSH_UNARY_OPCODE_X1 = 5,
+  FNOP_UNARY_OPCODE_X0 = 3,
+  FNOP_UNARY_OPCODE_X1 = 6,
+  FNOP_UNARY_OPCODE_Y0 = 3,
+  FNOP_UNARY_OPCODE_Y1 = 8,
+  FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34,
+  FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35,
+  FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36,
+  FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37,
+  FSINGLE_PACK1_UNARY_OPCODE_X0 = 4,
+  FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4,
+  FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38,
+  FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39,
+  ICOH_UNARY_OPCODE_X1 = 7,
+  ILL_UNARY_OPCODE_X1 = 8,
+  ILL_UNARY_OPCODE_Y1 = 9,
+  IMM8_OPCODE_X0 = 4,
+  IMM8_OPCODE_X1 = 3,
+  INV_UNARY_OPCODE_X1 = 9,
+  IRET_UNARY_OPCODE_X1 = 10,
+  JALRP_UNARY_OPCODE_X1 = 11,
+  JALRP_UNARY_OPCODE_Y1 = 10,
+  JALR_UNARY_OPCODE_X1 = 12,
+  JALR_UNARY_OPCODE_Y1 = 11,
+  JAL_JUMP_OPCODE_X1 = 0,
+  JRP_UNARY_OPCODE_X1 = 13,
+  JRP_UNARY_OPCODE_Y1 = 12,
+  JR_UNARY_OPCODE_X1 = 14,
+  JR_UNARY_OPCODE_Y1 = 13,
+  JUMP_OPCODE_X1 = 4,
+  J_JUMP_OPCODE_X1 = 1,
+  LD1S_ADD_IMM8_OPCODE_X1 = 7,
+  LD1S_OPCODE_Y2 = 0,
+  LD1S_UNARY_OPCODE_X1 = 15,
+  LD1U_ADD_IMM8_OPCODE_X1 = 8,
+  LD1U_OPCODE_Y2 = 1,
+  LD1U_UNARY_OPCODE_X1 = 16,
+  LD2S_ADD_IMM8_OPCODE_X1 = 9,
+  LD2S_OPCODE_Y2 = 2,
+  LD2S_UNARY_OPCODE_X1 = 17,
+  LD2U_ADD_IMM8_OPCODE_X1 = 10,
+  LD2U_OPCODE_Y2 = 3,
+  LD2U_UNARY_OPCODE_X1 = 18,
+  LD4S_ADD_IMM8_OPCODE_X1 = 11,
+  LD4S_OPCODE_Y2 = 1,
+  LD4S_UNARY_OPCODE_X1 = 19,
+  LD4U_ADD_IMM8_OPCODE_X1 = 12,
+  LD4U_OPCODE_Y2 = 2,
+  LD4U_UNARY_OPCODE_X1 = 20,
+  LDNA_UNARY_OPCODE_X1 = 21,
+  LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
+  LDNT1S_UNARY_OPCODE_X1 = 22,
+  LDNT1U_ADD_IMM8_OPCODE_X1 = 14,
+  LDNT1U_UNARY_OPCODE_X1 = 23,
+  LDNT2S_ADD_IMM8_OPCODE_X1 = 15,
+  LDNT2S_UNARY_OPCODE_X1 = 24,
+  LDNT2U_ADD_IMM8_OPCODE_X1 = 16,
+  LDNT2U_UNARY_OPCODE_X1 = 25,
+  LDNT4S_ADD_IMM8_OPCODE_X1 = 17,
+  LDNT4S_UNARY_OPCODE_X1 = 26,
+  LDNT4U_ADD_IMM8_OPCODE_X1 = 18,
+  LDNT4U_UNARY_OPCODE_X1 = 27,
+  LDNT_ADD_IMM8_OPCODE_X1 = 19,
+  LDNT_UNARY_OPCODE_X1 = 28,
+  LD_ADD_IMM8_OPCODE_X1 = 20,
+  LD_OPCODE_Y2 = 3,
+  LD_UNARY_OPCODE_X1 = 29,
+  LNK_UNARY_OPCODE_X1 = 30,
+  LNK_UNARY_OPCODE_Y1 = 14,
+  LWNA_ADD_IMM8_OPCODE_X1 = 21,
+  MFSPR_IMM8_OPCODE_X1 = 22,
+  MF_UNARY_OPCODE_X1 = 31,
+  MM_BF_OPCODE_X0 = 7,
+  MNZ_RRR_0_OPCODE_X0 = 40,
+  MNZ_RRR_0_OPCODE_X1 = 26,
+  MNZ_RRR_4_OPCODE_Y0 = 2,
+  MNZ_RRR_4_OPCODE_Y1 = 2,
+  MODE_OPCODE_YA2 = 1,
+  MODE_OPCODE_YB2 = 2,
+  MODE_OPCODE_YC2 = 3,
+  MTSPR_IMM8_OPCODE_X1 = 23,
+  MULAX_RRR_0_OPCODE_X0 = 41,
+  MULAX_RRR_3_OPCODE_Y0 = 2,
+  MULA_HS_HS_RRR_0_OPCODE_X0 = 42,
+  MULA_HS_HS_RRR_9_OPCODE_Y0 = 0,
+  MULA_HS_HU_RRR_0_OPCODE_X0 = 43,
+  MULA_HS_LS_RRR_0_OPCODE_X0 = 44,
+  MULA_HS_LU_RRR_0_OPCODE_X0 = 45,
+  MULA_HU_HU_RRR_0_OPCODE_X0 = 46,
+  MULA_HU_HU_RRR_9_OPCODE_Y0 = 1,
+  MULA_HU_LS_RRR_0_OPCODE_X0 = 47,
+  MULA_HU_LU_RRR_0_OPCODE_X0 = 48,
+  MULA_LS_LS_RRR_0_OPCODE_X0 = 49,
+  MULA_LS_LS_RRR_9_OPCODE_Y0 = 2,
+  MULA_LS_LU_RRR_0_OPCODE_X0 = 50,
+  MULA_LU_LU_RRR_0_OPCODE_X0 = 51,
+  MULA_LU_LU_RRR_9_OPCODE_Y0 = 3,
+  MULX_RRR_0_OPCODE_X0 = 52,
+  MULX_RRR_3_OPCODE_Y0 = 3,
+  MUL_HS_HS_RRR_0_OPCODE_X0 = 53,
+  MUL_HS_HS_RRR_8_OPCODE_Y0 = 0,
+  MUL_HS_HU_RRR_0_OPCODE_X0 = 54,
+  MUL_HS_LS_RRR_0_OPCODE_X0 = 55,
+  MUL_HS_LU_RRR_0_OPCODE_X0 = 56,
+  MUL_HU_HU_RRR_0_OPCODE_X0 = 57,
+  MUL_HU_HU_RRR_8_OPCODE_Y0 = 1,
+  MUL_HU_LS_RRR_0_OPCODE_X0 = 58,
+  MUL_HU_LU_RRR_0_OPCODE_X0 = 59,
+  MUL_LS_LS_RRR_0_OPCODE_X0 = 60,
+  MUL_LS_LS_RRR_8_OPCODE_Y0 = 2,
+  MUL_LS_LU_RRR_0_OPCODE_X0 = 61,
+  MUL_LU_LU_RRR_0_OPCODE_X0 = 62,
+  MUL_LU_LU_RRR_8_OPCODE_Y0 = 3,
+  MZ_RRR_0_OPCODE_X0 = 63,
+  MZ_RRR_0_OPCODE_X1 = 27,
+  MZ_RRR_4_OPCODE_Y0 = 3,
+  MZ_RRR_4_OPCODE_Y1 = 3,
+  NAP_UNARY_OPCODE_X1 = 32,
+  NOP_UNARY_OPCODE_X0 = 5,
+  NOP_UNARY_OPCODE_X1 = 33,
+  NOP_UNARY_OPCODE_Y0 = 5,
+  NOP_UNARY_OPCODE_Y1 = 15,
+  NOR_RRR_0_OPCODE_X0 = 64,
+  NOR_RRR_0_OPCODE_X1 = 28,
+  NOR_RRR_5_OPCODE_Y0 = 1,
+  NOR_RRR_5_OPCODE_Y1 = 1,
+  ORI_IMM8_OPCODE_X0 = 7,
+  ORI_IMM8_OPCODE_X1 = 24,
+  OR_RRR_0_OPCODE_X0 = 65,
+  OR_RRR_0_OPCODE_X1 = 29,
+  OR_RRR_5_OPCODE_Y0 = 2,
+  OR_RRR_5_OPCODE_Y1 = 2,
+  PCNT_UNARY_OPCODE_X0 = 6,
+  PCNT_UNARY_OPCODE_Y0 = 6,
+  REVBITS_UNARY_OPCODE_X0 = 7,
+  REVBITS_UNARY_OPCODE_Y0 = 7,
+  REVBYTES_UNARY_OPCODE_X0 = 8,
+  REVBYTES_UNARY_OPCODE_Y0 = 8,
+  ROTLI_SHIFT_OPCODE_X0 = 1,
+  ROTLI_SHIFT_OPCODE_X1 = 1,
+  ROTLI_SHIFT_OPCODE_Y0 = 0,
+  ROTLI_SHIFT_OPCODE_Y1 = 0,
+  ROTL_RRR_0_OPCODE_X0 = 66,
+  ROTL_RRR_0_OPCODE_X1 = 30,
+  ROTL_RRR_6_OPCODE_Y0 = 0,
+  ROTL_RRR_6_OPCODE_Y1 = 0,
+  RRR_0_OPCODE_X0 = 5,
+  RRR_0_OPCODE_X1 = 5,
+  RRR_0_OPCODE_Y0 = 5,
+  RRR_0_OPCODE_Y1 = 6,
+  RRR_1_OPCODE_Y0 = 6,
+  RRR_1_OPCODE_Y1 = 7,
+  RRR_2_OPCODE_Y0 = 7,
+  RRR_2_OPCODE_Y1 = 8,
+  RRR_3_OPCODE_Y0 = 8,
+  RRR_3_OPCODE_Y1 = 9,
+  RRR_4_OPCODE_Y0 = 9,
+  RRR_4_OPCODE_Y1 = 10,
+  RRR_5_OPCODE_Y0 = 10,
+  RRR_5_OPCODE_Y1 = 11,
+  RRR_6_OPCODE_Y0 = 11,
+  RRR_6_OPCODE_Y1 = 12,
+  RRR_7_OPCODE_Y0 = 12,
+  RRR_7_OPCODE_Y1 = 13,
+  RRR_8_OPCODE_Y0 = 13,
+  RRR_9_OPCODE_Y0 = 14,
+  SHIFT_OPCODE_X0 = 6,
+  SHIFT_OPCODE_X1 = 6,
+  SHIFT_OPCODE_Y0 = 15,
+  SHIFT_OPCODE_Y1 = 14,
+  SHL16INSLI_OPCODE_X0 = 7,
+  SHL16INSLI_OPCODE_X1 = 7,
+  SHL1ADDX_RRR_0_OPCODE_X0 = 67,
+  SHL1ADDX_RRR_0_OPCODE_X1 = 31,
+  SHL1ADDX_RRR_7_OPCODE_Y0 = 1,
+  SHL1ADDX_RRR_7_OPCODE_Y1 = 1,
+  SHL1ADD_RRR_0_OPCODE_X0 = 68,
+  SHL1ADD_RRR_0_OPCODE_X1 = 32,
+  SHL1ADD_RRR_1_OPCODE_Y0 = 0,
+  SHL1ADD_RRR_1_OPCODE_Y1 = 0,
+  SHL2ADDX_RRR_0_OPCODE_X0 = 69,
+  SHL2ADDX_RRR_0_OPCODE_X1 = 33,
+  SHL2ADDX_RRR_7_OPCODE_Y0 = 2,
+  SHL2ADDX_RRR_7_OPCODE_Y1 = 2,
+  SHL2ADD_RRR_0_OPCODE_X0 = 70,
+  SHL2ADD_RRR_0_OPCODE_X1 = 34,
+  SHL2ADD_RRR_1_OPCODE_Y0 = 1,
+  SHL2ADD_RRR_1_OPCODE_Y1 = 1,
+  SHL3ADDX_RRR_0_OPCODE_X0 = 71,
+  SHL3ADDX_RRR_0_OPCODE_X1 = 35,
+  SHL3ADDX_RRR_7_OPCODE_Y0 = 3,
+  SHL3ADDX_RRR_7_OPCODE_Y1 = 3,
+  SHL3ADD_RRR_0_OPCODE_X0 = 72,
+  SHL3ADD_RRR_0_OPCODE_X1 = 36,
+  SHL3ADD_RRR_1_OPCODE_Y0 = 2,
+  SHL3ADD_RRR_1_OPCODE_Y1 = 2,
+  SHLI_SHIFT_OPCODE_X0 = 2,
+  SHLI_SHIFT_OPCODE_X1 = 2,
+  SHLI_SHIFT_OPCODE_Y0 = 1,
+  SHLI_SHIFT_OPCODE_Y1 = 1,
+  SHLXI_SHIFT_OPCODE_X0 = 3,
+  SHLXI_SHIFT_OPCODE_X1 = 3,
+  SHLX_RRR_0_OPCODE_X0 = 73,
+  SHLX_RRR_0_OPCODE_X1 = 37,
+  SHL_RRR_0_OPCODE_X0 = 74,
+  SHL_RRR_0_OPCODE_X1 = 38,
+  SHL_RRR_6_OPCODE_Y0 = 1,
+  SHL_RRR_6_OPCODE_Y1 = 1,
+  SHRSI_SHIFT_OPCODE_X0 = 4,
+  SHRSI_SHIFT_OPCODE_X1 = 4,
+  SHRSI_SHIFT_OPCODE_Y0 = 2,
+  SHRSI_SHIFT_OPCODE_Y1 = 2,
+  SHRS_RRR_0_OPCODE_X0 = 75,
+  SHRS_RRR_0_OPCODE_X1 = 39,
+  SHRS_RRR_6_OPCODE_Y0 = 2,
+  SHRS_RRR_6_OPCODE_Y1 = 2,
+  SHRUI_SHIFT_OPCODE_X0 = 5,
+  SHRUI_SHIFT_OPCODE_X1 = 5,
+  SHRUI_SHIFT_OPCODE_Y0 = 3,
+  SHRUI_SHIFT_OPCODE_Y1 = 3,
+  SHRUXI_SHIFT_OPCODE_X0 = 6,
+  SHRUXI_SHIFT_OPCODE_X1 = 6,
+  SHRUX_RRR_0_OPCODE_X0 = 76,
+  SHRUX_RRR_0_OPCODE_X1 = 40,
+  SHRU_RRR_0_OPCODE_X0 = 77,
+  SHRU_RRR_0_OPCODE_X1 = 41,
+  SHRU_RRR_6_OPCODE_Y0 = 3,
+  SHRU_RRR_6_OPCODE_Y1 = 3,
+  SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78,
+  ST1_ADD_IMM8_OPCODE_X1 = 25,
+  ST1_OPCODE_Y2 = 0,
+  ST1_RRR_0_OPCODE_X1 = 42,
+  ST2_ADD_IMM8_OPCODE_X1 = 26,
+  ST2_OPCODE_Y2 = 1,
+  ST2_RRR_0_OPCODE_X1 = 43,
+  ST4_ADD_IMM8_OPCODE_X1 = 27,
+  ST4_OPCODE_Y2 = 2,
+  ST4_RRR_0_OPCODE_X1 = 44,
+  STNT1_ADD_IMM8_OPCODE_X1 = 28,
+  STNT1_RRR_0_OPCODE_X1 = 45,
+  STNT2_ADD_IMM8_OPCODE_X1 = 29,
+  STNT2_RRR_0_OPCODE_X1 = 46,
+  STNT4_ADD_IMM8_OPCODE_X1 = 30,
+  STNT4_RRR_0_OPCODE_X1 = 47,
+  STNT_ADD_IMM8_OPCODE_X1 = 31,
+  STNT_RRR_0_OPCODE_X1 = 48,
+  ST_ADD_IMM8_OPCODE_X1 = 32,
+  ST_OPCODE_Y2 = 3,
+  ST_RRR_0_OPCODE_X1 = 49,
+  SUBXSC_RRR_0_OPCODE_X0 = 79,
+  SUBXSC_RRR_0_OPCODE_X1 = 50,
+  SUBX_RRR_0_OPCODE_X0 = 80,
+  SUBX_RRR_0_OPCODE_X1 = 51,
+  SUBX_RRR_0_OPCODE_Y0 = 2,
+  SUBX_RRR_0_OPCODE_Y1 = 2,
+  SUB_RRR_0_OPCODE_X0 = 81,
+  SUB_RRR_0_OPCODE_X1 = 52,
+  SUB_RRR_0_OPCODE_Y0 = 3,
+  SUB_RRR_0_OPCODE_Y1 = 3,
+  SWINT0_UNARY_OPCODE_X1 = 34,
+  SWINT1_UNARY_OPCODE_X1 = 35,
+  SWINT2_UNARY_OPCODE_X1 = 36,
+  SWINT3_UNARY_OPCODE_X1 = 37,
+  TBLIDXB0_UNARY_OPCODE_X0 = 9,
+  TBLIDXB0_UNARY_OPCODE_Y0 = 9,
+  TBLIDXB1_UNARY_OPCODE_X0 = 10,
+  TBLIDXB1_UNARY_OPCODE_Y0 = 10,
+  TBLIDXB2_UNARY_OPCODE_X0 = 11,
+  TBLIDXB2_UNARY_OPCODE_Y0 = 11,
+  TBLIDXB3_UNARY_OPCODE_X0 = 12,
+  TBLIDXB3_UNARY_OPCODE_Y0 = 12,
+  UNARY_RRR_0_OPCODE_X0 = 82,
+  UNARY_RRR_0_OPCODE_X1 = 53,
+  UNARY_RRR_1_OPCODE_Y0 = 3,
+  UNARY_RRR_1_OPCODE_Y1 = 3,
+  V1ADDI_IMM8_OPCODE_X0 = 8,
+  V1ADDI_IMM8_OPCODE_X1 = 33,
+  V1ADDUC_RRR_0_OPCODE_X0 = 83,
+  V1ADDUC_RRR_0_OPCODE_X1 = 54,
+  V1ADD_RRR_0_OPCODE_X0 = 84,
+  V1ADD_RRR_0_OPCODE_X1 = 55,
+  V1ADIFFU_RRR_0_OPCODE_X0 = 85,
+  V1AVGU_RRR_0_OPCODE_X0 = 86,
+  V1CMPEQI_IMM8_OPCODE_X0 = 9,
+  V1CMPEQI_IMM8_OPCODE_X1 = 34,
+  V1CMPEQ_RRR_0_OPCODE_X0 = 87,
+  V1CMPEQ_RRR_0_OPCODE_X1 = 56,
+  V1CMPLES_RRR_0_OPCODE_X0 = 88,
+  V1CMPLES_RRR_0_OPCODE_X1 = 57,
+  V1CMPLEU_RRR_0_OPCODE_X0 = 89,
+  V1CMPLEU_RRR_0_OPCODE_X1 = 58,
+  V1CMPLTSI_IMM8_OPCODE_X0 = 10,
+  V1CMPLTSI_IMM8_OPCODE_X1 = 35,
+  V1CMPLTS_RRR_0_OPCODE_X0 = 90,
+  V1CMPLTS_RRR_0_OPCODE_X1 = 59,
+  V1CMPLTUI_IMM8_OPCODE_X0 = 11,
+  V1CMPLTUI_IMM8_OPCODE_X1 = 36,
+  V1CMPLTU_RRR_0_OPCODE_X0 = 91,
+  V1CMPLTU_RRR_0_OPCODE_X1 = 60,
+  V1CMPNE_RRR_0_OPCODE_X0 = 92,
+  V1CMPNE_RRR_0_OPCODE_X1 = 61,
+  V1DDOTPUA_RRR_0_OPCODE_X0 = 161,
+  V1DDOTPUSA_RRR_0_OPCODE_X0 = 93,
+  V1DDOTPUS_RRR_0_OPCODE_X0 = 94,
+  V1DDOTPU_RRR_0_OPCODE_X0 = 162,
+  V1DOTPA_RRR_0_OPCODE_X0 = 95,
+  V1DOTPUA_RRR_0_OPCODE_X0 = 163,
+  V1DOTPUSA_RRR_0_OPCODE_X0 = 96,
+  V1DOTPUS_RRR_0_OPCODE_X0 = 97,
+  V1DOTPU_RRR_0_OPCODE_X0 = 164,
+  V1DOTP_RRR_0_OPCODE_X0 = 98,
+  V1INT_H_RRR_0_OPCODE_X0 = 99,
+  V1INT_H_RRR_0_OPCODE_X1 = 62,
+  V1INT_L_RRR_0_OPCODE_X0 = 100,
+  V1INT_L_RRR_0_OPCODE_X1 = 63,
+  V1MAXUI_IMM8_OPCODE_X0 = 12,
+  V1MAXUI_IMM8_OPCODE_X1 = 37,
+  V1MAXU_RRR_0_OPCODE_X0 = 101,
+  V1MAXU_RRR_0_OPCODE_X1 = 64,
+  V1MINUI_IMM8_OPCODE_X0 = 13,
+  V1MINUI_IMM8_OPCODE_X1 = 38,
+  V1MINU_RRR_0_OPCODE_X0 = 102,
+  V1MINU_RRR_0_OPCODE_X1 = 65,
+  V1MNZ_RRR_0_OPCODE_X0 = 103,
+  V1MNZ_RRR_0_OPCODE_X1 = 66,
+  V1MULTU_RRR_0_OPCODE_X0 = 104,
+  V1MULUS_RRR_0_OPCODE_X0 = 105,
+  V1MULU_RRR_0_OPCODE_X0 = 106,
+  V1MZ_RRR_0_OPCODE_X0 = 107,
+  V1MZ_RRR_0_OPCODE_X1 = 67,
+  V1SADAU_RRR_0_OPCODE_X0 = 108,
+  V1SADU_RRR_0_OPCODE_X0 = 109,
+  V1SHLI_SHIFT_OPCODE_X0 = 7,
+  V1SHLI_SHIFT_OPCODE_X1 = 7,
+  V1SHL_RRR_0_OPCODE_X0 = 110,
+  V1SHL_RRR_0_OPCODE_X1 = 68,
+  V1SHRSI_SHIFT_OPCODE_X0 = 8,
+  V1SHRSI_SHIFT_OPCODE_X1 = 8,
+  V1SHRS_RRR_0_OPCODE_X0 = 111,
+  V1SHRS_RRR_0_OPCODE_X1 = 69,
+  V1SHRUI_SHIFT_OPCODE_X0 = 9,
+  V1SHRUI_SHIFT_OPCODE_X1 = 9,
+  V1SHRU_RRR_0_OPCODE_X0 = 112,
+  V1SHRU_RRR_0_OPCODE_X1 = 70,
+  V1SUBUC_RRR_0_OPCODE_X0 = 113,
+  V1SUBUC_RRR_0_OPCODE_X1 = 71,
+  V1SUB_RRR_0_OPCODE_X0 = 114,
+  V1SUB_RRR_0_OPCODE_X1 = 72,
+  V2ADDI_IMM8_OPCODE_X0 = 14,
+  V2ADDI_IMM8_OPCODE_X1 = 39,
+  V2ADDSC_RRR_0_OPCODE_X0 = 115,
+  V2ADDSC_RRR_0_OPCODE_X1 = 73,
+  V2ADD_RRR_0_OPCODE_X0 = 116,
+  V2ADD_RRR_0_OPCODE_X1 = 74,
+  V2ADIFFS_RRR_0_OPCODE_X0 = 117,
+  V2AVGS_RRR_0_OPCODE_X0 = 118,
+  V2CMPEQI_IMM8_OPCODE_X0 = 15,
+  V2CMPEQI_IMM8_OPCODE_X1 = 40,
+  V2CMPEQ_RRR_0_OPCODE_X0 = 119,
+  V2CMPEQ_RRR_0_OPCODE_X1 = 75,
+  V2CMPLES_RRR_0_OPCODE_X0 = 120,
+  V2CMPLES_RRR_0_OPCODE_X1 = 76,
+  V2CMPLEU_RRR_0_OPCODE_X0 = 121,
+  V2CMPLEU_RRR_0_OPCODE_X1 = 77,
+  V2CMPLTSI_IMM8_OPCODE_X0 = 16,
+  V2CMPLTSI_IMM8_OPCODE_X1 = 41,
+  V2CMPLTS_RRR_0_OPCODE_X0 = 122,
+  V2CMPLTS_RRR_0_OPCODE_X1 = 78,
+  V2CMPLTUI_IMM8_OPCODE_X0 = 17,
+  V2CMPLTUI_IMM8_OPCODE_X1 = 42,
+  V2CMPLTU_RRR_0_OPCODE_X0 = 123,
+  V2CMPLTU_RRR_0_OPCODE_X1 = 79,
+  V2CMPNE_RRR_0_OPCODE_X0 = 124,
+  V2CMPNE_RRR_0_OPCODE_X1 = 80,
+  V2DOTPA_RRR_0_OPCODE_X0 = 125,
+  V2DOTP_RRR_0_OPCODE_X0 = 126,
+  V2INT_H_RRR_0_OPCODE_X0 = 127,
+  V2INT_H_RRR_0_OPCODE_X1 = 81,
+  V2INT_L_RRR_0_OPCODE_X0 = 128,
+  V2INT_L_RRR_0_OPCODE_X1 = 82,
+  V2MAXSI_IMM8_OPCODE_X0 = 18,
+  V2MAXSI_IMM8_OPCODE_X1 = 43,
+  V2MAXS_RRR_0_OPCODE_X0 = 129,
+  V2MAXS_RRR_0_OPCODE_X1 = 83,
+  V2MINSI_IMM8_OPCODE_X0 = 19,
+  V2MINSI_IMM8_OPCODE_X1 = 44,
+  V2MINS_RRR_0_OPCODE_X0 = 130,
+  V2MINS_RRR_0_OPCODE_X1 = 84,
+  V2MNZ_RRR_0_OPCODE_X0 = 131,
+  V2MNZ_RRR_0_OPCODE_X1 = 85,
+  V2MULFSC_RRR_0_OPCODE_X0 = 132,
+  V2MULS_RRR_0_OPCODE_X0 = 133,
+  V2MULTS_RRR_0_OPCODE_X0 = 134,
+  V2MZ_RRR_0_OPCODE_X0 = 135,
+  V2MZ_RRR_0_OPCODE_X1 = 86,
+  V2PACKH_RRR_0_OPCODE_X0 = 136,
+  V2PACKH_RRR_0_OPCODE_X1 = 87,
+  V2PACKL_RRR_0_OPCODE_X0 = 137,
+  V2PACKL_RRR_0_OPCODE_X1 = 88,
+  V2PACKUC_RRR_0_OPCODE_X0 = 138,
+  V2PACKUC_RRR_0_OPCODE_X1 = 89,
+  V2SADAS_RRR_0_OPCODE_X0 = 139,
+  V2SADAU_RRR_0_OPCODE_X0 = 140,
+  V2SADS_RRR_0_OPCODE_X0 = 141,
+  V2SADU_RRR_0_OPCODE_X0 = 142,
+  V2SHLI_SHIFT_OPCODE_X0 = 10,
+  V2SHLI_SHIFT_OPCODE_X1 = 10,
+  V2SHLSC_RRR_0_OPCODE_X0 = 143,
+  V2SHLSC_RRR_0_OPCODE_X1 = 90,
+  V2SHL_RRR_0_OPCODE_X0 = 144,
+  V2SHL_RRR_0_OPCODE_X1 = 91,
+  V2SHRSI_SHIFT_OPCODE_X0 = 11,
+  V2SHRSI_SHIFT_OPCODE_X1 = 11,
+  V2SHRS_RRR_0_OPCODE_X0 = 145,
+  V2SHRS_RRR_0_OPCODE_X1 = 92,
+  V2SHRUI_SHIFT_OPCODE_X0 = 12,
+  V2SHRUI_SHIFT_OPCODE_X1 = 12,
+  V2SHRU_RRR_0_OPCODE_X0 = 146,
+  V2SHRU_RRR_0_OPCODE_X1 = 93,
+  V2SUBSC_RRR_0_OPCODE_X0 = 147,
+  V2SUBSC_RRR_0_OPCODE_X1 = 94,
+  V2SUB_RRR_0_OPCODE_X0 = 148,
+  V2SUB_RRR_0_OPCODE_X1 = 95,
+  V4ADDSC_RRR_0_OPCODE_X0 = 149,
+  V4ADDSC_RRR_0_OPCODE_X1 = 96,
+  V4ADD_RRR_0_OPCODE_X0 = 150,
+  V4ADD_RRR_0_OPCODE_X1 = 97,
+  V4INT_H_RRR_0_OPCODE_X0 = 151,
+  V4INT_H_RRR_0_OPCODE_X1 = 98,
+  V4INT_L_RRR_0_OPCODE_X0 = 152,
+  V4INT_L_RRR_0_OPCODE_X1 = 99,
+  V4PACKSC_RRR_0_OPCODE_X0 = 153,
+  V4PACKSC_RRR_0_OPCODE_X1 = 100,
+  V4SHLSC_RRR_0_OPCODE_X0 = 154,
+  V4SHLSC_RRR_0_OPCODE_X1 = 101,
+  V4SHL_RRR_0_OPCODE_X0 = 155,
+  V4SHL_RRR_0_OPCODE_X1 = 102,
+  V4SHRS_RRR_0_OPCODE_X0 = 156,
+  V4SHRS_RRR_0_OPCODE_X1 = 103,
+  V4SHRU_RRR_0_OPCODE_X0 = 157,
+  V4SHRU_RRR_0_OPCODE_X1 = 104,
+  V4SUBSC_RRR_0_OPCODE_X0 = 158,
+  V4SUBSC_RRR_0_OPCODE_X1 = 105,
+  V4SUB_RRR_0_OPCODE_X0 = 159,
+  V4SUB_RRR_0_OPCODE_X1 = 106,
+  WH64_UNARY_OPCODE_X1 = 38,
+  XORI_IMM8_OPCODE_X0 = 20,
+  XORI_IMM8_OPCODE_X1 = 45,
+  XOR_RRR_0_OPCODE_X0 = 160,
+  XOR_RRR_0_OPCODE_X1 = 107,
+  XOR_RRR_5_OPCODE_Y0 = 3,
+  XOR_RRR_5_OPCODE_Y1 = 3
+};
+
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __ARCH_OPCODE_H__ */
diff --git a/target-tilegx/translate.c b/target-tilegx/translate.c
new file mode 100644
index 0000000..4fb0167
--- /dev/null
+++ b/target-tilegx/translate.c
@@ -0,0 +1,1735 @@
+/*
+ * QEMU TILE-Gx CPU
+ *
+ *  Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "cpu.h"
+#include "qemu/log.h"
+#include "disas/disas.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "opcode_tilegx.h"
+
+#define TILEGX_OPCODE_MAX_X0            164  /* include 164 */
+#define TILEGX_OPCODE_MAX_X1            107  /* include 107 */
+#define TILEGX_OPCODE_MAX_Y0             15  /* include 15 */
+#define TILEGX_OPCODE_MAX_Y1             15  /* include 15 */
+#define TILEGX_OPCODE_MAX_Y2              3  /* include 3 */
+
+static TCGv_ptr cpu_env;
+static TCGv cpu_pc;
+static TCGv cpu_regs[TILEGX_R_COUNT];
+
+static const char * const reg_names[] = {
+     "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
+     "r8",  "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+    "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
+    "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
+    "r48", "r49", "r50", "r51",  "bp",  "tp",  "sp",  "lr"
+};
+
+/* It is for temporary registers */
+typedef struct DisasContextTemp {
+    uint8_t idx;                   /* index */
+    TCGv val;                      /* value */
+} DisasContextTemp;
+
+/* This is the state at translation time.  */
+typedef struct DisasContext {
+    uint64_t pc;                   /* Current pc */
+    uint64_t exception;            /* Current exception */
+
+    TCGv zero;                     /* For zero register */
+
+    DisasContextTemp *tmp_regcur;  /* Current temporary registers */
+    DisasContextTemp tmp_regs[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE];
+                                   /* All temporary registers */
+    struct {
+        TCGCond cond;              /* Branch condition */
+        TCGv dest;                 /* pc jump destination, if will jump */
+        TCGv val1;                 /* Firt value for condition comparing */
+        TCGv val2;                 /* Second value for condition comparing */
+    } jmp;                         /* Jump object, only once in each TB block 
*/
+} DisasContext;
+
+#include "exec/gen-icount.h"
+
+static TCGv load_zero(DisasContext *dc)
+{
+    if (TCGV_IS_UNUSED_I64(dc->zero)) {
+        dc->zero = tcg_const_i64(0);
+    }
+    return dc->zero;
+}
+
+static TCGv load_gr(DisasContext *dc, uint8_t reg)
+{
+    if (likely(reg < TILEGX_R_COUNT)) {
+        return cpu_regs[reg];
+    } else if (reg != TILEGX_R_ZERO) {
+        dc->exception = TILEGX_EXCP_REG_UNSUPPORTED;
+    }
+    return load_zero(dc);
+}
+
+static TCGv dest_gr(DisasContext *dc, uint8_t rdst)
+{
+    DisasContextTemp *tmp = dc->tmp_regcur;
+    tmp->idx = rdst;
+    tmp->val = tcg_temp_new_i64();
+    return tmp->val;
+}
+
+static void gen_exception(DisasContext *dc, int num)
+{
+    TCGv_i32 tmp = tcg_const_i32(num);
+
+    gen_helper_exception(cpu_env, tmp);
+    tcg_temp_free_i32(tmp);
+}
+
+static void gen_fnop(void)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "(f)nop\n");
+}
+
+static void gen_cmpltui(struct DisasContext *dc,
+                        uint8_t rdst, uint8_t rsrc, int8_t imm8)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmpltui r%d, r%d, %d\n",
+                  rdst, rsrc, imm8);
+    tcg_gen_setcondi_i64(TCG_COND_LTU, dest_gr(dc, rdst), load_gr(dc, rsrc),
+                        (uint64_t)imm8);
+}
+
+static void gen_cmpeqi(struct DisasContext *dc,
+                       uint8_t rdst, uint8_t rsrc, int8_t imm8)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmpeqi r%d, r%d, %d\n", rdst, rsrc, 
imm8);
+    tcg_gen_setcondi_i64(TCG_COND_EQ, dest_gr(dc, rdst), load_gr(dc, rsrc),
+                        (uint64_t)imm8);
+}
+
+static void gen_cmpne(struct DisasContext *dc,
+                      uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmpne r%d, r%d, r%d\n",
+                  rdst, rsrc, rsrcb);
+    tcg_gen_setcond_i64(TCG_COND_NE, dest_gr(dc, rdst), load_gr(dc, rsrc),
+                        load_gr(dc, rsrcb));
+}
+
+static void gen_cmoveqz(struct DisasContext *dc,
+                        uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmoveqz r%d, r%d, r%d\n",
+                  rdst, rsrc, rsrcb);
+    tcg_gen_movcond_i64(TCG_COND_EQ, dest_gr(dc, rdst), load_gr(dc, rsrc),
+                        load_zero(dc), load_gr(dc, rsrcb), load_gr(dc, rdst));
+}
+
+static void gen_cmovnez(struct DisasContext *dc,
+                        uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "cmovnez r%d, r%d, r%d\n",
+                  rdst, rsrc, rsrcb);
+    tcg_gen_movcond_i64(TCG_COND_NE, dest_gr(dc, rdst), load_gr(dc, rsrc),
+                        load_zero(dc), load_gr(dc, rsrcb), load_gr(dc, rdst));
+}
+
+static void gen_add(struct DisasContext *dc,
+                    uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "add r%d, r%d, r%d\n",
+                  rdst, rsrc, rsrcb);
+    tcg_gen_add_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), load_gr(dc, rsrcb));
+}
+
+static void gen_addimm(struct DisasContext *dc,
+                       uint8_t rdst, uint8_t rsrc, int64_t imm)
+{
+    tcg_gen_addi_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), imm);
+}
+
+static void gen_addi(struct DisasContext *dc,
+                     uint8_t rdst, uint8_t rsrc, int8_t imm8)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "addi r%d, r%d, %d\n", rdst, rsrc, imm8);
+    gen_addimm(dc, rdst, rsrc, (int64_t)imm8);
+}
+
+static void gen_addli(struct DisasContext *dc,
+                      uint8_t rdst, uint8_t rsrc, int16_t im16)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "addli r%d, r%d, %d\n", rdst, rsrc, im16);
+    gen_addimm(dc, rdst, rsrc, (int64_t)im16);
+}
+
+static void gen_addx(struct DisasContext *dc,
+                     uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    TCGv vdst = dest_gr(dc, rdst);
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "addx r%d, r%d, r%d\n", rdst, rsrc, 
rsrcb);
+    tcg_gen_add_i64(vdst, load_gr(dc, rsrc), load_gr(dc, rsrcb));
+    tcg_gen_ext32s_i64(vdst, vdst);
+}
+
+static void gen_addximm(struct DisasContext *dc,
+                        uint8_t rdst, uint8_t rsrc, int32_t imm)
+{
+    TCGv vdst = dest_gr(dc, rdst);
+
+    tcg_gen_addi_i64(vdst, load_gr(dc, rsrc), imm);
+    tcg_gen_ext32s_i64(vdst, vdst);
+}
+
+static void gen_addxi(struct DisasContext *dc,
+                     uint8_t rdst, uint8_t rsrc, int8_t imm8)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "addxi r%d, r%d, %d\n", rdst, rsrc, imm8);
+    gen_addximm(dc, rdst, rsrc, (int32_t)imm8);
+}
+
+static void gen_subx(struct DisasContext *dc,
+                     uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    TCGv vdst = dest_gr(dc, rdst);
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "subx r%d, r%d, r%d\n", rdst, rsrc, 
rsrcb);
+    tcg_gen_sub_i64(vdst, load_gr(dc, rsrc), load_gr(dc, rsrcb));
+    tcg_gen_ext32s_i64(vdst, vdst);
+}
+
+/*
+ * The related functional description for bfextu in isa document:
+ *
+ * uint64_t mask = 0;
+ * mask = (-1ULL) ^ ((-1ULL << ((BFEnd - BFStart) & 63)) << 1);
+ * uint64_t rot_src = (((uint64_t) rf[SrcA]) >> BFStart)
+ *                    | (rf[SrcA] << (64 - BFStart));
+ * rf[Dest] = rot_src & mask;
+ */
+static void gen_bfextu(struct DisasContext *dc, uint8_t rdst, uint8_t rsrc,
+                       int8_t start, int8_t end)
+{
+    TCGv mask = tcg_temp_new_i64();
+    TCGv tmp = dest_gr(dc, rdst);
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "bfextu r%d, r%d, %d, %d\n",
+                  rdst, rsrc, start, end);
+
+    tcg_gen_movi_i64(tmp, -1ULL);
+    tcg_gen_movi_i64(mask, end);
+
+    tcg_gen_subi_i64(mask, mask, start);
+    tcg_gen_andi_i64(mask, mask, 63);
+    tcg_gen_shl_i64(mask, tmp, mask);
+    tcg_gen_shli_i64(mask, mask, 1);
+    tcg_gen_xori_i64(mask, mask, -1ULL);
+
+    tcg_gen_rotli_i64(tmp, load_gr(dc, rsrc), start);
+    tcg_gen_and_i64(tmp, tmp, mask);
+
+    tcg_temp_free_i64(mask);
+}
+
+static void gen_or(struct DisasContext *dc,
+                   uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "or r%d, r%d, r%d\n", rdst, rsrc, rsrcb);
+    tcg_gen_or_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), load_gr(dc, rsrcb));
+}
+
+static void gen_orimm(struct DisasContext *dc,
+                      uint8_t rdst, uint8_t rsrc, int64_t imm)
+{
+    tcg_gen_ori_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), imm);
+}
+
+static void gen_ori(struct DisasContext *dc,
+                    uint8_t rdst, uint8_t rsrc, int8_t imm8)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "ori r%d, r%d, %d\n", rdst, rsrc, imm8);
+    gen_orimm(dc, rdst, rsrc, (int64_t)imm8);
+}
+
+static void gen_xor(struct DisasContext *dc,
+                    uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "xor r%d, r%d, r%d\n", rdst, rsrc, rsrcb);
+    tcg_gen_xor_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), load_gr(dc, rsrcb));
+}
+
+static void gen_and(struct DisasContext *dc,
+                    uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "and r%d, r%d, r%d\n", rdst, rsrc, rsrcb);
+    tcg_gen_and_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), load_gr(dc, rsrcb));
+}
+
+static void gen_andimm(struct DisasContext *dc,
+                       uint8_t rdst, uint8_t rsrc, int64_t imm)
+{
+    tcg_gen_andi_i64(dest_gr(dc, rdst), load_gr(dc, rsrc), imm);
+}
+
+static void gen_andi(struct DisasContext *dc,
+                     uint8_t rdst, uint8_t rsrc, int8_t imm8)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "andi r%d, r%d, %d\n", rdst, rsrc, imm8);
+    gen_andimm(dc, rdst, rsrc, (int64_t)imm8);
+}
+
+static void gen_mulx(struct DisasContext *dc,
+                     uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    TCGv tmp = tcg_temp_new_i64();
+    TCGv vdst = dest_gr(dc, rdst);
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "mulx r%d, r%d, r%d\n", rdst, rsrc, 
rsrcb);
+
+    tcg_gen_ext32s_i64(vdst, load_gr(dc, rsrc));
+    tcg_gen_ext32s_i64(tmp, load_gr(dc, rsrcb));
+
+    tcg_gen_mul_i64(tmp, vdst, tmp);
+    tcg_gen_ext32s_i64(vdst, tmp);
+
+    tcg_temp_free_i64(tmp);
+}
+
+static void gen_shlx(struct DisasContext *dc,
+                     uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    TCGv vdst = dest_gr(dc, rdst);
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "shlx r%d, r%d, r%d\n", rdst, rsrc, 
rsrcb);
+    tcg_gen_andi_i64(vdst, load_gr(dc, rsrcb), 31);
+    tcg_gen_shl_i64(vdst, load_gr(dc, rsrc), vdst);
+    tcg_gen_ext32s_i64(vdst, vdst);
+}
+
+static void gen_shlxi(struct DisasContext *dc,
+                      uint8_t rdst, uint8_t rsrc, uint8_t shamt)
+{
+    TCGv vdst = dest_gr(dc, rdst);
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "shlxi r%d, r%d, r%d\n",
+                  rdst, rsrc, shamt);
+    tcg_gen_shli_i64(vdst, load_gr(dc, rsrc), shamt & 31);
+    tcg_gen_ext32s_i64(vdst, vdst);
+}
+
+static void gen_shl3add(struct DisasContext *dc,
+                        uint8_t rdst, uint8_t rsrc, uint8_t rsrcb)
+{
+    TCGv vdst = dest_gr(dc, rdst);
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "shl3add r%d, r%d, r%d\n",
+                  rdst, rsrc, rsrcb);
+    tcg_gen_shli_i64(vdst, load_gr(dc, rsrc), 3);
+    tcg_gen_add_i64(vdst, vdst, load_gr(dc, rsrcb));
+}
+
+static void gen_shl16insli(struct DisasContext *dc,
+                           uint8_t rdst, uint8_t rsrc, uint16_t uimm16)
+{
+    TCGv vdst = dest_gr(dc, rdst);
+
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "shl16insli r%d, r%d, %llx\n",
+                  rdst, rsrc, (long long)uimm16);
+    tcg_gen_shli_i64(vdst, load_gr(dc, rsrc), 16);
+    tcg_gen_ori_i64(vdst, vdst, uimm16);
+}
+
+static void gen_lnk(struct DisasContext *dc, uint8_t rdst)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "lnk r%d\n", rdst);
+    tcg_gen_movi_i64(dest_gr(dc, rdst), dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+}
+
+static int gen_beqz(struct DisasContext *dc, uint8_t rsrc, int32_t off)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "beqz(t) r%d, %d\n", rsrc, off);
+
+    dc->jmp.dest = tcg_temp_new_i64();
+    dc->jmp.val1 = tcg_temp_new_i64();
+    dc->jmp.val2 = tcg_temp_new_i64();
+
+    dc->jmp.cond = TCG_COND_EQ;
+    tcg_gen_movi_i64(dc->jmp.dest,
+                     dc->pc + (int64_t)off * TILEGX_BUNDLE_SIZE_IN_BYTES);
+    tcg_gen_mov_i64(dc->jmp.val1, load_gr(dc, rsrc));
+    tcg_gen_movi_i64(dc->jmp.val2, 0);
+
+    return 0;
+}
+
+static int gen_bnezt(struct DisasContext *dc, uint8_t rsrc, int32_t off)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "bnez(t) r%d, %d\n", rsrc, off);
+
+    dc->jmp.dest = tcg_temp_new_i64();
+    dc->jmp.val1 = tcg_temp_new_i64();
+    dc->jmp.val2 = tcg_temp_new_i64();
+
+    dc->jmp.cond = TCG_COND_NE;
+    tcg_gen_movi_i64(dc->jmp.dest,
+                     dc->pc + (int64_t)off * TILEGX_BUNDLE_SIZE_IN_BYTES);
+    tcg_gen_mov_i64(dc->jmp.val1, load_gr(dc, rsrc));
+    tcg_gen_movi_i64(dc->jmp.val2, 0);
+
+    return 0;
+}
+
+static int gen_blbc(struct DisasContext *dc, uint8_t rsrc, int32_t off)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "blbc r%d, %d\n", rsrc, off);
+
+    dc->jmp.dest = tcg_temp_new_i64();
+    dc->jmp.val1 = tcg_temp_new_i64();
+    dc->jmp.val2 = tcg_temp_new_i64();
+
+    dc->jmp.cond = TCG_COND_EQ;
+    tcg_gen_movi_i64(dc->jmp.dest,
+                     dc->pc + (int64_t)off * TILEGX_BUNDLE_SIZE_IN_BYTES);
+    tcg_gen_mov_i64(dc->jmp.val1, load_gr(dc, rsrc));
+    tcg_gen_andi_i64(dc->jmp.val1, dc->jmp.val1, 1ULL);
+    tcg_gen_movi_i64(dc->jmp.val2, 0);
+
+    return 0;
+}
+
+static void gen_ld(struct DisasContext *dc, uint8_t rdst, uint8_t rsrc)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "ld r%d, r%d\n", rdst, rsrc);
+    tcg_gen_qemu_ld_i64(dest_gr(dc, rdst), load_gr(dc, rsrc),
+                        MMU_USER_IDX, MO_LEQ);
+}
+
+static void gen_ld1s(struct DisasContext *dc, uint8_t rdst, uint8_t rsrc)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "ld1s r%d, r%d\n", rdst, rsrc);
+    tcg_gen_qemu_ld_i64(dest_gr(dc, rdst), load_gr(dc, rsrc),
+                        MMU_USER_IDX, MO_SB);
+}
+
+static void gen_ld4s(struct DisasContext *dc, uint8_t rdst, uint8_t rsrc)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "ld4s r%d, r%d\n", rdst, rsrc);
+    tcg_gen_qemu_ld_i64(dest_gr(dc, rdst), load_gr(dc, rsrc),
+                        MMU_USER_IDX, MO_LESL);
+}
+
+static void gen_st(struct DisasContext *dc, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "st r%d, r%d\n", rsrc, rsrcb);
+    tcg_gen_qemu_st_i64(load_gr(dc, rsrcb), load_gr(dc, rsrc),
+                        MMU_USER_IDX, MO_LEQ);
+}
+
+static void gen_st4(struct DisasContext *dc, uint8_t rsrc, uint8_t rsrcb)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "st4 r%d, r%d\n", rsrc, rsrcb);
+    tcg_gen_qemu_st_i64(load_gr(dc, rsrcb), load_gr(dc, rsrc),
+                        MMU_USER_IDX, MO_LEUL);
+}
+
+static void gen_jr(struct DisasContext *dc, uint8_t rsrc)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "jr(p) r%d\n", rsrc);
+
+    dc->jmp.dest = tcg_temp_new_i64();
+
+    dc->jmp.cond = TCG_COND_ALWAYS;
+    tcg_gen_mov_i64(dc->jmp.dest, load_gr(dc, rsrc));
+}
+
+static void gen_j(struct DisasContext *dc, int off)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "j %d\n", off);
+
+    dc->jmp.dest = tcg_temp_new_i64();
+
+    dc->jmp.cond = TCG_COND_ALWAYS;
+    tcg_gen_movi_i64(dc->jmp.dest, dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES);
+}
+
+static void gen_jal(struct DisasContext *dc, int off)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "jal %d\n", off);
+
+    dc->jmp.dest = tcg_temp_new_i64();
+    tcg_gen_movi_i64(dest_gr(dc, TILEGX_R_LR),
+                     dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+
+    dc->jmp.cond = TCG_COND_ALWAYS;
+    tcg_gen_movi_i64(dc->jmp.dest, dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES);
+}
+
+static void gen_swint1(struct DisasContext *dc)
+{
+    qemu_log_mask(CPU_LOG_TB_IN_ASM, "swint1\n");
+
+    tcg_gen_movi_i64(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+    dc->exception = TILEGX_EXCP_SYSCALL;
+}
+
+static void decode_addi_opcode_y0(struct DisasContext *dc,
+                                  tilegx_bundle_bits bundle)
+{
+    gen_addi(dc, (uint8_t)get_Dest_Y0(bundle),
+             (uint8_t)get_SrcA_Y0(bundle), (int8_t)get_Imm8_Y0(bundle));
+}
+
+static void decode_rrr_1_opcode_y0(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    switch (get_RRROpcodeExtension_Y0(bundle)) {
+    case UNARY_RRR_1_OPCODE_Y0:
+        switch (get_UnaryOpcodeExtension_Y0(bundle)) {
+        case NOP_UNARY_OPCODE_Y0:
+        case  FNOP_UNARY_OPCODE_Y0:
+            if (!get_SrcA_Y0(bundle) && !get_Dest_Y0(bundle)) {
+                gen_fnop();
+                return;
+            }
+            break;
+        case CNTLZ_UNARY_OPCODE_Y0:
+        case CNTTZ_UNARY_OPCODE_Y0:
+        case FSINGLE_PACK1_UNARY_OPCODE_Y0:
+        case PCNT_UNARY_OPCODE_Y0:
+        case REVBITS_UNARY_OPCODE_Y0:
+        case REVBYTES_UNARY_OPCODE_Y0:
+        case TBLIDXB0_UNARY_OPCODE_Y0:
+        case TBLIDXB1_UNARY_OPCODE_Y0:
+        case TBLIDXB2_UNARY_OPCODE_Y0:
+        case TBLIDXB3_UNARY_OPCODE_Y0:
+        default:
+            break;
+        }
+        break;
+    case SHL1ADD_RRR_1_OPCODE_Y0:
+    case SHL2ADD_RRR_1_OPCODE_Y0:
+    case SHL3ADD_RRR_1_OPCODE_Y0:
+    case RRR_1_OPCODE_Y0:
+    default:
+        break;
+    }
+
+    qemu_log_mask(LOG_UNIMP, "UNIMP rrr_1_opcode_y0, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_rrr_5_opcode_y0(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_Y0(bundle);
+    uint8_t rsrcb = (uint8_t)get_SrcB_Y0(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_Y0(bundle);
+
+    switch (get_RRROpcodeExtension_Y0(bundle)) {
+    case OR_RRR_5_OPCODE_Y0:
+        gen_or(dc, rdst, rsrc, rsrcb);
+        return;
+    case AND_RRR_5_OPCODE_Y0:
+    case NOR_RRR_5_OPCODE_Y0:
+    case XOR_RRR_5_OPCODE_Y0:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP rrr_5_opcode_y0, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_addi_opcode_y1(struct DisasContext *dc,
+                                  tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_Y1(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_Y1(bundle);
+    int8_t imm8 = (int8_t)get_Imm8_Y1(bundle);
+
+    gen_addi(dc, rdst, rsrc, imm8);
+}
+
+static void decode_rrr_1_opcode_y1(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    switch (get_RRROpcodeExtension_Y1(bundle)) {
+    case UNARY_RRR_1_OPCODE_Y1:
+        switch (get_UnaryOpcodeExtension_Y1(bundle)) {
+        case NOP_UNARY_OPCODE_Y1:
+        case FNOP_UNARY_OPCODE_Y1:
+            if (!get_SrcA_Y1(bundle) && !get_Dest_Y1(bundle)) {
+                gen_fnop();
+                return;
+            }
+            break;
+        case ILL_UNARY_OPCODE_Y1:
+        case JALRP_UNARY_OPCODE_Y1:
+        case JALR_UNARY_OPCODE_Y1:
+        case JRP_UNARY_OPCODE_Y1:
+        case JR_UNARY_OPCODE_Y1:
+        case LNK_UNARY_OPCODE_Y1:
+        default:
+            break;
+        }
+        break;
+    case SHL1ADD_RRR_1_OPCODE_Y1:
+    case SHL2ADD_RRR_1_OPCODE_Y1:
+    case SHL3ADD_RRR_1_OPCODE_Y1:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP rrr_1_opcode_y1, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_rrr_5_opcode_y1(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_Y1(bundle);
+    uint8_t rsrcb = (uint8_t)get_SrcB_Y1(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_Y1(bundle);
+
+    switch (get_RRROpcodeExtension_Y1(bundle)) {
+    case OR_RRR_5_OPCODE_Y1:
+        gen_or(dc, rdst, rsrc, rsrcb);
+        return;
+    case AND_RRR_5_OPCODE_Y1:
+    case NOR_RRR_5_OPCODE_Y1:
+    case XOR_RRR_5_OPCODE_Y1:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP rrr_5_opcode_y1, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_ldst0_opcode_y2(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrca = (uint8_t)get_SrcA_Y2(bundle);
+    uint8_t rsrcbdst = (uint8_t)get_SrcBDest_Y2(bundle);
+
+    switch (get_Mode(bundle)) {
+    case MODE_OPCODE_YA2:
+        gen_ld1s(dc, rsrcbdst, rsrca);
+        return;
+    case MODE_OPCODE_YB2:
+    case MODE_OPCODE_YC2:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP ldst_opcode_y2, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_ldst1_opcode_y2(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrca = (uint8_t)get_SrcA_Y2(bundle);
+    uint8_t rsrcbdst = (uint8_t)get_SrcBDest_Y2(bundle);
+
+    switch (get_Mode(bundle)) {
+    case MODE_OPCODE_YB2:
+        gen_ld4s(dc, rsrcbdst, rsrca);
+        return;
+    case MODE_OPCODE_YA2:
+    case MODE_OPCODE_YC2:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP ldst_opcode_y2, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_ldst2_opcode_y2(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrca = (uint8_t)get_SrcA_Y2(bundle);
+    uint8_t rsrcbdst = (uint8_t)get_SrcBDest_Y2(bundle);
+
+    switch (get_Mode(bundle)) {
+    case MODE_OPCODE_YC2:
+        gen_st4(dc, rsrca, rsrcbdst);
+        return;
+    case MODE_OPCODE_YA2:
+    case MODE_OPCODE_YB2:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP ldst_opcode_y2, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_ldst3_opcode_y2(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrca = (uint8_t)get_SrcA_Y2(bundle);
+    uint8_t rsrcbdst = (uint8_t)get_SrcBDest_Y2(bundle);
+
+    switch (get_Mode(bundle)) {
+    case MODE_OPCODE_YB2:
+        gen_ld(dc, rsrcbdst, rsrca);
+        return;
+    case MODE_OPCODE_YC2:
+        gen_st(dc, rsrca, rsrcbdst);
+        return;
+    case MODE_OPCODE_YA2:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP ldst_opcode_y2, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_addli_opcode_x0(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X0(bundle);
+    int16_t imm16 = (int16_t)get_Imm16_X0(bundle);
+
+    gen_addli(dc, rdst, rsrc, imm16);
+}
+
+static void decode_bf_opcode_x0(struct DisasContext *dc,
+                                tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X0(bundle);
+    int8_t start = (int8_t)get_BFStart_X0(bundle);
+    int8_t end = (int8_t)get_BFEnd_X0(bundle);
+
+    switch (get_BFOpcodeExtension_X0(bundle)) {
+    case BFEXTU_BF_OPCODE_X0:
+        gen_bfextu(dc, rdst, rsrc, start, end);
+        return;
+    case BFEXTS_BF_OPCODE_X0:
+    case BFINS_BF_OPCODE_X0:
+    case MM_BF_OPCODE_X0:
+    default:
+        break;
+    }
+
+    qemu_log_mask(LOG_UNIMP, "UNIMP bf_opcode_x0, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_imm8_opcode_x0(struct DisasContext *dc,
+                                  tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X0(bundle);
+    int8_t imm8 = (int8_t)get_Imm8_X0(bundle);
+
+    switch (get_Imm8OpcodeExtension_X0(bundle)) {
+    case ADDI_IMM8_OPCODE_X0:
+        gen_addi(dc, rdst, rsrc, imm8);
+        return;
+    case ADDXI_IMM8_OPCODE_X0:
+        gen_addxi(dc, rdst, rsrc, imm8);
+        return;
+    case ANDI_IMM8_OPCODE_X0:
+        gen_andi(dc, rdst, rsrc, imm8);
+        return;
+    case CMPEQI_IMM8_OPCODE_X0:
+        gen_cmpeqi(dc, rdst, rsrc, imm8);
+        return;
+    case ORI_IMM8_OPCODE_X0:
+        gen_ori(dc, rdst, rsrc, imm8);
+        return;
+    case CMPLTSI_IMM8_OPCODE_X0:
+    case CMPLTUI_IMM8_OPCODE_X0:
+    case V1ADDI_IMM8_OPCODE_X0:
+    case V1CMPEQI_IMM8_OPCODE_X0:
+    case V1CMPLTSI_IMM8_OPCODE_X0:
+    case V1CMPLTUI_IMM8_OPCODE_X0:
+    case V1MAXUI_IMM8_OPCODE_X0:
+    case V1MINUI_IMM8_OPCODE_X0:
+    case V2ADDI_IMM8_OPCODE_X0:
+    case V2CMPEQI_IMM8_OPCODE_X0:
+    case V2CMPLTSI_IMM8_OPCODE_X0:
+    case V2CMPLTUI_IMM8_OPCODE_X0:
+    case V2MAXSI_IMM8_OPCODE_X0:
+    case V2MINSI_IMM8_OPCODE_X0:
+    case XORI_IMM8_OPCODE_X0:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP imm8_opcode_x0, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_rrr_0_opcode_x0(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle);
+    uint8_t rsrcb = (uint8_t)get_SrcB_X0(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X0(bundle);
+
+    switch (get_RRROpcodeExtension_X0(bundle)) {
+    case ADD_RRR_0_OPCODE_X0:
+        gen_add(dc, rdst, rsrc, rsrcb);
+        return;
+    case CMOVEQZ_RRR_0_OPCODE_X0:
+        gen_cmoveqz(dc, rdst, rsrc, rsrcb);
+        return;
+    case CMOVNEZ_RRR_0_OPCODE_X0:
+        gen_cmovnez(dc, rdst, rsrc, rsrcb);
+        return;
+    case CMPNE_RRR_0_OPCODE_X0:
+        gen_cmpne(dc, rdst, rsrc, rsrcb);
+        return;
+    case MULX_RRR_0_OPCODE_X0:
+        gen_mulx(dc, rdst, rsrc, rsrcb);
+        return;
+    case OR_RRR_0_OPCODE_X0:
+        gen_or(dc, rdst, rsrc, rsrcb);
+        return;
+    case SHL3ADD_RRR_0_OPCODE_X0:
+        gen_shl3add(dc, rdst, rsrc, rsrcb);
+        return;
+    case SHLX_RRR_0_OPCODE_X0:
+        gen_shlx(dc, rdst, rsrc, rsrcb);
+        return;
+    case SUBX_RRR_0_OPCODE_X0:
+        gen_subx(dc, rdst, rsrc, rsrcb);
+        return;
+    case UNARY_RRR_0_OPCODE_X0:
+        switch (get_UnaryOpcodeExtension_X0(bundle)) {
+        case FNOP_UNARY_OPCODE_X0:
+        case NOP_UNARY_OPCODE_X0:
+            if (!rsrc && !rdst) {
+                gen_fnop();
+                return;
+            }
+            break;
+        case CNTTZ_UNARY_OPCODE_X0:
+        case FSINGLE_PACK1_UNARY_OPCODE_X0:
+        case PCNT_UNARY_OPCODE_X0:
+        case REVBITS_UNARY_OPCODE_X0:
+        case REVBYTES_UNARY_OPCODE_X0:
+        case TBLIDXB0_UNARY_OPCODE_X0:
+        case TBLIDXB1_UNARY_OPCODE_X0:
+        case TBLIDXB2_UNARY_OPCODE_X0:
+        case TBLIDXB3_UNARY_OPCODE_X0:
+        default:
+            break;
+        }
+        break;
+    case XOR_RRR_0_OPCODE_X0:
+        gen_xor(dc, rdst, rsrc, rsrcb);
+        return;
+    case ADDXSC_RRR_0_OPCODE_X0:
+    case ADDX_RRR_0_OPCODE_X0:
+    case AND_RRR_0_OPCODE_X0:
+    case CMPEQ_RRR_0_OPCODE_X0:
+    case CMPLES_RRR_0_OPCODE_X0:
+    case CMPLEU_RRR_0_OPCODE_X0:
+    case CMPLTS_RRR_0_OPCODE_X0:
+    case CMPLTU_RRR_0_OPCODE_X0:
+    case CMULAF_RRR_0_OPCODE_X0:
+    case CMULA_RRR_0_OPCODE_X0:
+    case CMULFR_RRR_0_OPCODE_X0:
+    case CMULF_RRR_0_OPCODE_X0:
+    case CMULHR_RRR_0_OPCODE_X0:
+    case CMULH_RRR_0_OPCODE_X0:
+    case CMUL_RRR_0_OPCODE_X0:
+    case CRC32_32_RRR_0_OPCODE_X0:
+    case CRC32_8_RRR_0_OPCODE_X0:
+    case DBLALIGN2_RRR_0_OPCODE_X0:
+    case DBLALIGN4_RRR_0_OPCODE_X0:
+    case DBLALIGN6_RRR_0_OPCODE_X0:
+    case DBLALIGN_RRR_0_OPCODE_X0:
+    case FDOUBLE_ADDSUB_RRR_0_OPCODE_X0:
+    case FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0:
+    case FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0:
+    case FDOUBLE_PACK1_RRR_0_OPCODE_X0:
+    case FDOUBLE_PACK2_RRR_0_OPCODE_X0:
+    case FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0:
+    case FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0:
+    case FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0:
+    case FSINGLE_ADD1_RRR_0_OPCODE_X0:
+    case FSINGLE_ADDSUB2_RRR_0_OPCODE_X0:
+    case FSINGLE_MUL1_RRR_0_OPCODE_X0:
+    case FSINGLE_MUL2_RRR_0_OPCODE_X0:
+    case FSINGLE_PACK2_RRR_0_OPCODE_X0:
+    case FSINGLE_SUB1_RRR_0_OPCODE_X0:
+    case MNZ_RRR_0_OPCODE_X0:
+    case MULAX_RRR_0_OPCODE_X0:
+    case MULA_HS_HS_RRR_0_OPCODE_X0:
+    case MULA_HS_HU_RRR_0_OPCODE_X0:
+    case MULA_HS_LS_RRR_0_OPCODE_X0:
+    case MULA_HS_LU_RRR_0_OPCODE_X0:
+    case MULA_HU_HU_RRR_0_OPCODE_X0:
+    case MULA_HU_LS_RRR_0_OPCODE_X0:
+    case MULA_HU_LU_RRR_0_OPCODE_X0:
+    case MULA_LS_LS_RRR_0_OPCODE_X0:
+    case MULA_LS_LU_RRR_0_OPCODE_X0:
+    case MULA_LU_LU_RRR_0_OPCODE_X0:
+    case MUL_HS_HS_RRR_0_OPCODE_X0:
+    case MUL_HS_HU_RRR_0_OPCODE_X0:
+    case MUL_HS_LS_RRR_0_OPCODE_X0:
+    case MUL_HS_LU_RRR_0_OPCODE_X0:
+    case MUL_HU_HU_RRR_0_OPCODE_X0:
+    case MUL_HU_LS_RRR_0_OPCODE_X0:
+    case MUL_HU_LU_RRR_0_OPCODE_X0:
+    case MUL_LS_LS_RRR_0_OPCODE_X0:
+    case MUL_LS_LU_RRR_0_OPCODE_X0:
+    case MUL_LU_LU_RRR_0_OPCODE_X0:
+    case MZ_RRR_0_OPCODE_X0:
+    case NOR_RRR_0_OPCODE_X0:
+    case ROTL_RRR_0_OPCODE_X0:
+    case SHL1ADDX_RRR_0_OPCODE_X0:
+    case SHL1ADD_RRR_0_OPCODE_X0:
+    case SHL2ADDX_RRR_0_OPCODE_X0:
+    case SHL2ADD_RRR_0_OPCODE_X0:
+    case SHL3ADDX_RRR_0_OPCODE_X0:
+    case SHL_RRR_0_OPCODE_X0:
+    case SHRS_RRR_0_OPCODE_X0:
+    case SHRUX_RRR_0_OPCODE_X0:
+    case SHRU_RRR_0_OPCODE_X0:
+    case SHUFFLEBYTES_RRR_0_OPCODE_X0:
+    case SUBXSC_RRR_0_OPCODE_X0:
+    case SUB_RRR_0_OPCODE_X0:
+    case V1ADDUC_RRR_0_OPCODE_X0:
+    case V1ADD_RRR_0_OPCODE_X0:
+    case V1ADIFFU_RRR_0_OPCODE_X0:
+    case V1AVGU_RRR_0_OPCODE_X0:
+    case V1CMPEQ_RRR_0_OPCODE_X0:
+    case V1CMPLES_RRR_0_OPCODE_X0:
+    case V1CMPLEU_RRR_0_OPCODE_X0:
+    case V1CMPLTS_RRR_0_OPCODE_X0:
+    case V1CMPLTU_RRR_0_OPCODE_X0:
+    case V1CMPNE_RRR_0_OPCODE_X0:
+    case V1DDOTPUSA_RRR_0_OPCODE_X0:
+    case V1DDOTPUS_RRR_0_OPCODE_X0:
+    case V1DOTPA_RRR_0_OPCODE_X0:
+    case V1DOTPUSA_RRR_0_OPCODE_X0:
+    case V1DOTPUS_RRR_0_OPCODE_X0:
+    case V1DOTP_RRR_0_OPCODE_X0:
+    case V1INT_H_RRR_0_OPCODE_X0:
+    case V1INT_L_RRR_0_OPCODE_X0:
+    case V1MAXU_RRR_0_OPCODE_X0:
+    case V1MINU_RRR_0_OPCODE_X0:
+    case V1MNZ_RRR_0_OPCODE_X0:
+    case V1MULTU_RRR_0_OPCODE_X0:
+    case V1MULUS_RRR_0_OPCODE_X0:
+    case V1MULU_RRR_0_OPCODE_X0:
+    case V1MZ_RRR_0_OPCODE_X0:
+    case V1SADAU_RRR_0_OPCODE_X0:
+    case V1SADU_RRR_0_OPCODE_X0:
+    case V1SHL_RRR_0_OPCODE_X0:
+    case V1SHRS_RRR_0_OPCODE_X0:
+    case V1SHRU_RRR_0_OPCODE_X0:
+    case V1SUBUC_RRR_0_OPCODE_X0:
+    case V1SUB_RRR_0_OPCODE_X0:
+    case V2ADDSC_RRR_0_OPCODE_X0:
+    case V2ADD_RRR_0_OPCODE_X0:
+    case V2ADIFFS_RRR_0_OPCODE_X0:
+    case V2AVGS_RRR_0_OPCODE_X0:
+    case V2CMPEQ_RRR_0_OPCODE_X0:
+    case V2CMPLES_RRR_0_OPCODE_X0:
+    case V2CMPLEU_RRR_0_OPCODE_X0:
+    case V2CMPLTS_RRR_0_OPCODE_X0:
+    case V2CMPLTU_RRR_0_OPCODE_X0:
+    case V2CMPNE_RRR_0_OPCODE_X0:
+    case V2DOTPA_RRR_0_OPCODE_X0:
+    case V2DOTP_RRR_0_OPCODE_X0:
+    case V2INT_H_RRR_0_OPCODE_X0:
+    case V2INT_L_RRR_0_OPCODE_X0:
+    case V2MAXS_RRR_0_OPCODE_X0:
+    case V2MINS_RRR_0_OPCODE_X0:
+    case V2MNZ_RRR_0_OPCODE_X0:
+    case V2MULFSC_RRR_0_OPCODE_X0:
+    case V2MULS_RRR_0_OPCODE_X0:
+    case V2MULTS_RRR_0_OPCODE_X0:
+    case V2MZ_RRR_0_OPCODE_X0:
+    case V2PACKH_RRR_0_OPCODE_X0:
+    case V2PACKL_RRR_0_OPCODE_X0:
+    case V2PACKUC_RRR_0_OPCODE_X0:
+    case V2SADAS_RRR_0_OPCODE_X0:
+    case V2SADAU_RRR_0_OPCODE_X0:
+    case V2SADS_RRR_0_OPCODE_X0:
+    case V2SADU_RRR_0_OPCODE_X0:
+    case V2SHLSC_RRR_0_OPCODE_X0:
+    case V2SHL_RRR_0_OPCODE_X0:
+    case V2SHRS_RRR_0_OPCODE_X0:
+    case V2SHRU_RRR_0_OPCODE_X0:
+    case V2SUBSC_RRR_0_OPCODE_X0:
+    case V2SUB_RRR_0_OPCODE_X0:
+    case V4ADDSC_RRR_0_OPCODE_X0:
+    case V4ADD_RRR_0_OPCODE_X0:
+    case V4INT_H_RRR_0_OPCODE_X0:
+    case V4INT_L_RRR_0_OPCODE_X0:
+    case V4PACKSC_RRR_0_OPCODE_X0:
+    case V4SHLSC_RRR_0_OPCODE_X0:
+    case V4SHL_RRR_0_OPCODE_X0:
+    case V4SHRS_RRR_0_OPCODE_X0:
+    case V4SHRU_RRR_0_OPCODE_X0:
+    case V4SUBSC_RRR_0_OPCODE_X0:
+    case V4SUB_RRR_0_OPCODE_X0:
+    case V1DDOTPUA_RRR_0_OPCODE_X0:
+    case V1DDOTPU_RRR_0_OPCODE_X0:
+    case V1DOTPUA_RRR_0_OPCODE_X0:
+    case V1DOTPU_RRR_0_OPCODE_X0:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP rrr_0_opcode_x0, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_shift_opcode_x0(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X0(bundle);
+    uint8_t shamt = (uint8_t)get_ShAmt_X0(bundle);
+
+    switch (get_ShiftOpcodeExtension_X0(bundle)) {
+    case SHLXI_SHIFT_OPCODE_X0:
+        gen_shlxi(dc, rdst, rsrc, shamt);
+        return;
+    case ROTLI_SHIFT_OPCODE_X0:
+    case SHLI_SHIFT_OPCODE_X0:
+    case SHRSI_SHIFT_OPCODE_X0:
+    case SHRUI_SHIFT_OPCODE_X0:
+    case SHRUXI_SHIFT_OPCODE_X0:
+    case V1SHLI_SHIFT_OPCODE_X0:
+    case V1SHRSI_SHIFT_OPCODE_X0:
+    case V1SHRUI_SHIFT_OPCODE_X0:
+    case V2SHLI_SHIFT_OPCODE_X0:
+    case V2SHRSI_SHIFT_OPCODE_X0:
+    case V2SHRUI_SHIFT_OPCODE_X0:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP shift_opcode_x0, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_shl16insli_opcode_x0(struct DisasContext *dc,
+                                        tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X0(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X0(bundle);
+    uint16_t uimm16 = (uint16_t)get_Imm16_X0(bundle);
+
+    gen_shl16insli(dc, rdst, rsrc, uimm16);
+}
+
+static void decode_addli_opcode_x1(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X1(bundle);
+    int16_t imm16 = (int16_t)get_Imm16_X1(bundle);
+
+    gen_addli(dc, rdst, rsrc, imm16);
+}
+
+static void decode_branch_opcode_x1(struct DisasContext *dc,
+                                    tilegx_bundle_bits bundle)
+{
+    uint8_t src = (uint8_t)get_SrcA_X1(bundle);
+    int32_t off = get_BrOff_X1(bundle);
+
+    switch (get_BrType_X1(bundle)) {
+    case BEQZT_BRANCH_OPCODE_X1:
+    case BEQZ_BRANCH_OPCODE_X1:
+        gen_beqz(dc, src, sign_extend(off, 17));
+        return;
+    case BNEZT_BRANCH_OPCODE_X1:
+    case BNEZ_BRANCH_OPCODE_X1:
+        gen_bnezt(dc, src, sign_extend(off, 17));
+        return;
+    case BLBC_BRANCH_OPCODE_X1:
+        gen_blbc(dc, src, sign_extend(off, 17));
+        return;
+    case BGEZT_BRANCH_OPCODE_X1:
+    case BGEZ_BRANCH_OPCODE_X1:
+    case BGTZT_BRANCH_OPCODE_X1:
+    case BGTZ_BRANCH_OPCODE_X1:
+    case BLBCT_BRANCH_OPCODE_X1:
+    case BLBST_BRANCH_OPCODE_X1:
+    case BLBS_BRANCH_OPCODE_X1:
+    case BLEZT_BRANCH_OPCODE_X1:
+    case BLEZ_BRANCH_OPCODE_X1:
+    case BLTZT_BRANCH_OPCODE_X1:
+    case BLTZ_BRANCH_OPCODE_X1:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP branch_opcode_x1, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_imm8_opcode_x1(struct DisasContext *dc,
+                                  tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X1(bundle);
+    int8_t imm8 = (int8_t)get_Imm8_X1(bundle);
+
+    switch (get_Imm8OpcodeExtension_X1(bundle)) {
+    case ADDI_IMM8_OPCODE_X1:
+        gen_addi(dc, rdst, rsrc, imm8);
+        return;
+    case ADDXI_IMM8_OPCODE_X1:
+        gen_addxi(dc, rdst, rsrc, imm8);
+        return;
+    case CMPEQI_IMM8_OPCODE_X1:
+        gen_cmpeqi(dc, rdst, rsrc, imm8);
+        return;
+    case CMPLTUI_IMM8_OPCODE_X1:
+        gen_cmpltui(dc, rdst, rsrc, imm8);
+        return;
+    case ANDI_IMM8_OPCODE_X1:
+    case CMPLTSI_IMM8_OPCODE_X1:
+    case LD1S_ADD_IMM8_OPCODE_X1:
+    case LD1U_ADD_IMM8_OPCODE_X1:
+    case LD2S_ADD_IMM8_OPCODE_X1:
+    case LD2U_ADD_IMM8_OPCODE_X1:
+    case LD4S_ADD_IMM8_OPCODE_X1:
+    case LD4U_ADD_IMM8_OPCODE_X1:
+    case LDNT1S_ADD_IMM8_OPCODE_X1:
+    case LDNT1U_ADD_IMM8_OPCODE_X1:
+    case LDNT2S_ADD_IMM8_OPCODE_X1:
+    case LDNT2U_ADD_IMM8_OPCODE_X1:
+    case LDNT4S_ADD_IMM8_OPCODE_X1:
+    case LDNT4U_ADD_IMM8_OPCODE_X1:
+    case LDNT_ADD_IMM8_OPCODE_X1:
+    case LD_ADD_IMM8_OPCODE_X1:
+    case LWNA_ADD_IMM8_OPCODE_X1:
+    case MFSPR_IMM8_OPCODE_X1:
+    case MTSPR_IMM8_OPCODE_X1:
+    case ORI_IMM8_OPCODE_X1:
+    case ST1_ADD_IMM8_OPCODE_X1:
+    case ST2_ADD_IMM8_OPCODE_X1:
+    case ST4_ADD_IMM8_OPCODE_X1:
+    case STNT1_ADD_IMM8_OPCODE_X1:
+    case STNT2_ADD_IMM8_OPCODE_X1:
+    case STNT4_ADD_IMM8_OPCODE_X1:
+    case STNT_ADD_IMM8_OPCODE_X1:
+    case ST_ADD_IMM8_OPCODE_X1:
+    case V1ADDI_IMM8_OPCODE_X1:
+    case V1CMPEQI_IMM8_OPCODE_X1:
+    case V1CMPLTSI_IMM8_OPCODE_X1:
+    case V1CMPLTUI_IMM8_OPCODE_X1:
+    case V1MAXUI_IMM8_OPCODE_X1:
+    case V1MINUI_IMM8_OPCODE_X1:
+    case V2ADDI_IMM8_OPCODE_X1:
+    case V2CMPEQI_IMM8_OPCODE_X1:
+    case V2CMPLTSI_IMM8_OPCODE_X1:
+    case V2CMPLTUI_IMM8_OPCODE_X1:
+    case V2MAXSI_IMM8_OPCODE_X1:
+    case V2MINSI_IMM8_OPCODE_X1:
+    case XORI_IMM8_OPCODE_X1:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP imm8_opcode_x1, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_jump_opcode_x1(struct DisasContext *dc,
+                                  tilegx_bundle_bits bundle)
+{
+    int off = sign_extend(get_JumpOff_X1(bundle), 27);
+
+    switch (get_JumpOpcodeExtension_X1(bundle)) {
+    case JAL_JUMP_OPCODE_X1:
+        gen_jal(dc, off);
+        return;
+    case J_JUMP_OPCODE_X1:
+        gen_j(dc, off);
+        return;
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP jump_opcode_x1, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_rrr_0_opcode_x1(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle);
+    uint8_t rsrcb = (uint8_t)get_SrcB_X1(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X1(bundle);
+
+    switch (get_RRROpcodeExtension_X1(bundle)) {
+    case ADDX_RRR_0_OPCODE_X1:
+        gen_addx(dc, rdst, rsrc, rsrcb);
+        return;
+    case ADD_RRR_0_OPCODE_X1:
+        gen_add(dc, rdst, rsrc, rsrcb);
+        return;
+    case AND_RRR_0_OPCODE_X1:
+        gen_and(dc, rdst, rsrc, rsrcb);
+        return;
+    case OR_RRR_0_OPCODE_X1:
+        gen_or(dc, rdst, rsrc, rsrcb);
+        return;
+    case CMPNE_RRR_0_OPCODE_X1:
+        gen_cmpne(dc, rdst, rsrc, rsrcb);
+        return;
+    case SHL3ADD_RRR_0_OPCODE_X1:
+        gen_shl3add(dc, rdst, rsrc, rsrcb);
+        return;
+    case ST4_RRR_0_OPCODE_X1:
+        if (!rdst) {
+            gen_st4(dc, rsrc, rsrcb);
+            return;
+        }
+        break;
+    case ST_RRR_0_OPCODE_X1:
+        if (!rdst) {
+            gen_st(dc, rsrc, rsrcb);
+            return;
+        }
+        break;
+    case UNARY_RRR_0_OPCODE_X1:
+        switch (get_UnaryOpcodeExtension_X1(bundle)) {
+        case NOP_UNARY_OPCODE_X1:
+        case FNOP_UNARY_OPCODE_X1:
+            if (!rdst && !rsrc) {
+                gen_fnop();
+                return;
+            }
+            break;
+        case JRP_UNARY_OPCODE_X1:
+        case JR_UNARY_OPCODE_X1:
+            if (!rdst) {
+                gen_jr(dc, rsrc);
+                return;
+            }
+            break;
+        case LD4S_UNARY_OPCODE_X1:
+            gen_ld4s(dc, rdst, rsrc);
+            return;
+        case LD_UNARY_OPCODE_X1:
+            gen_ld(dc, rdst, rsrc);
+            return;
+        case LNK_UNARY_OPCODE_X1:
+            if (!rsrc) {
+                gen_lnk(dc, (uint8_t) get_Dest_X1(bundle));
+                return;
+            }
+            break;
+        case SWINT1_UNARY_OPCODE_X1:
+            if (!rsrc && !rdst) {
+                gen_swint1(dc);
+                return;
+            }
+            break;
+        case DRAIN_UNARY_OPCODE_X1:
+        case DTLBPR_UNARY_OPCODE_X1:
+        case FINV_UNARY_OPCODE_X1:
+        case FLUSHWB_UNARY_OPCODE_X1:
+        case FLUSH_UNARY_OPCODE_X1:
+        case ICOH_UNARY_OPCODE_X1:
+        case ILL_UNARY_OPCODE_X1:
+        case INV_UNARY_OPCODE_X1:
+        case IRET_UNARY_OPCODE_X1:
+        case JALRP_UNARY_OPCODE_X1:
+        case JALR_UNARY_OPCODE_X1:
+        case LD1S_UNARY_OPCODE_X1:
+        case LD1U_UNARY_OPCODE_X1:
+        case LD2S_UNARY_OPCODE_X1:
+        case LD2U_UNARY_OPCODE_X1:
+        case LD4U_UNARY_OPCODE_X1:
+        case LDNA_UNARY_OPCODE_X1:
+        case LDNT1S_UNARY_OPCODE_X1:
+        case LDNT1U_UNARY_OPCODE_X1:
+        case LDNT2S_UNARY_OPCODE_X1:
+        case LDNT2U_UNARY_OPCODE_X1:
+        case LDNT4S_UNARY_OPCODE_X1:
+        case LDNT4U_UNARY_OPCODE_X1:
+        case LDNT_UNARY_OPCODE_X1:
+        case MF_UNARY_OPCODE_X1:
+        case NAP_UNARY_OPCODE_X1:
+        case SWINT0_UNARY_OPCODE_X1:
+        case SWINT2_UNARY_OPCODE_X1:
+        case SWINT3_UNARY_OPCODE_X1:
+        case WH64_UNARY_OPCODE_X1:
+        default:
+            break;
+        }
+        break;
+    case ADDXSC_RRR_0_OPCODE_X1:
+    case CMPEQ_RRR_0_OPCODE_X1:
+    case CMPEXCH4_RRR_0_OPCODE_X1:
+    case CMPEXCH_RRR_0_OPCODE_X1:
+    case CMPLES_RRR_0_OPCODE_X1:
+    case CMPLEU_RRR_0_OPCODE_X1:
+    case CMPLTS_RRR_0_OPCODE_X1:
+    case CMPLTU_RRR_0_OPCODE_X1:
+    case DBLALIGN2_RRR_0_OPCODE_X1:
+    case DBLALIGN4_RRR_0_OPCODE_X1:
+    case DBLALIGN6_RRR_0_OPCODE_X1:
+    case EXCH4_RRR_0_OPCODE_X1:
+    case EXCH_RRR_0_OPCODE_X1:
+    case FETCHADD4_RRR_0_OPCODE_X1:
+    case FETCHADDGEZ4_RRR_0_OPCODE_X1:
+    case FETCHADDGEZ_RRR_0_OPCODE_X1:
+    case FETCHADD_RRR_0_OPCODE_X1:
+    case FETCHAND4_RRR_0_OPCODE_X1:
+    case FETCHAND_RRR_0_OPCODE_X1:
+    case FETCHOR4_RRR_0_OPCODE_X1:
+    case FETCHOR_RRR_0_OPCODE_X1:
+    case MNZ_RRR_0_OPCODE_X1:
+    case MZ_RRR_0_OPCODE_X1:
+    case NOR_RRR_0_OPCODE_X1:
+    case ROTL_RRR_0_OPCODE_X1:
+    case SHL1ADDX_RRR_0_OPCODE_X1:
+    case SHL1ADD_RRR_0_OPCODE_X1:
+    case SHL2ADDX_RRR_0_OPCODE_X1:
+    case SHL2ADD_RRR_0_OPCODE_X1:
+    case SHL3ADDX_RRR_0_OPCODE_X1:
+    case SHLX_RRR_0_OPCODE_X1:
+    case SHL_RRR_0_OPCODE_X1:
+    case SHRS_RRR_0_OPCODE_X1:
+    case SHRUX_RRR_0_OPCODE_X1:
+    case SHRU_RRR_0_OPCODE_X1:
+    case ST1_RRR_0_OPCODE_X1:
+    case ST2_RRR_0_OPCODE_X1:
+    case STNT1_RRR_0_OPCODE_X1:
+    case STNT2_RRR_0_OPCODE_X1:
+    case STNT4_RRR_0_OPCODE_X1:
+    case STNT_RRR_0_OPCODE_X1:
+    case SUBXSC_RRR_0_OPCODE_X1:
+    case SUBX_RRR_0_OPCODE_X1:
+    case SUB_RRR_0_OPCODE_X1:
+    case V1ADDUC_RRR_0_OPCODE_X1:
+    case V1ADD_RRR_0_OPCODE_X1:
+    case V1CMPEQ_RRR_0_OPCODE_X1:
+    case V1CMPLES_RRR_0_OPCODE_X1:
+    case V1CMPLEU_RRR_0_OPCODE_X1:
+    case V1CMPLTS_RRR_0_OPCODE_X1:
+    case V1CMPLTU_RRR_0_OPCODE_X1:
+    case V1CMPNE_RRR_0_OPCODE_X1:
+    case V1INT_H_RRR_0_OPCODE_X1:
+    case V1INT_L_RRR_0_OPCODE_X1:
+    case V1MAXU_RRR_0_OPCODE_X1:
+    case V1MINU_RRR_0_OPCODE_X1:
+    case V1MNZ_RRR_0_OPCODE_X1:
+    case V1MZ_RRR_0_OPCODE_X1:
+    case V1SHL_RRR_0_OPCODE_X1:
+    case V1SHRS_RRR_0_OPCODE_X1:
+    case V1SHRU_RRR_0_OPCODE_X1:
+    case V1SUBUC_RRR_0_OPCODE_X1:
+    case V1SUB_RRR_0_OPCODE_X1:
+    case V2ADDSC_RRR_0_OPCODE_X1:
+    case V2ADD_RRR_0_OPCODE_X1:
+    case V2CMPEQ_RRR_0_OPCODE_X1:
+    case V2CMPLES_RRR_0_OPCODE_X1:
+    case V2CMPLEU_RRR_0_OPCODE_X1:
+    case V2CMPLTS_RRR_0_OPCODE_X1:
+    case V2CMPLTU_RRR_0_OPCODE_X1:
+    case V2CMPNE_RRR_0_OPCODE_X1:
+    case V2INT_H_RRR_0_OPCODE_X1:
+    case V2INT_L_RRR_0_OPCODE_X1:
+    case V2MAXS_RRR_0_OPCODE_X1:
+    case V2MINS_RRR_0_OPCODE_X1:
+    case V2MNZ_RRR_0_OPCODE_X1:
+    case V2MZ_RRR_0_OPCODE_X1:
+    case V2PACKH_RRR_0_OPCODE_X1:
+    case V2PACKL_RRR_0_OPCODE_X1:
+    case V2PACKUC_RRR_0_OPCODE_X1:
+    case V2SHLSC_RRR_0_OPCODE_X1:
+    case V2SHL_RRR_0_OPCODE_X1:
+    case V2SHRS_RRR_0_OPCODE_X1:
+    case V2SHRU_RRR_0_OPCODE_X1:
+    case V2SUBSC_RRR_0_OPCODE_X1:
+    case V2SUB_RRR_0_OPCODE_X1:
+    case V4ADDSC_RRR_0_OPCODE_X1:
+    case V4ADD_RRR_0_OPCODE_X1:
+    case V4INT_H_RRR_0_OPCODE_X1:
+    case V4INT_L_RRR_0_OPCODE_X1:
+    case V4PACKSC_RRR_0_OPCODE_X1:
+    case V4SHLSC_RRR_0_OPCODE_X1:
+    case V4SHL_RRR_0_OPCODE_X1:
+    case V4SHRS_RRR_0_OPCODE_X1:
+    case V4SHRU_RRR_0_OPCODE_X1:
+    case V4SUBSC_RRR_0_OPCODE_X1:
+    case V4SUB_RRR_0_OPCODE_X1:
+    case XOR_RRR_0_OPCODE_X1:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP rrr_0_opcode_x1, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_shift_opcode_x1(struct DisasContext *dc,
+                                   tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X1(bundle);
+    uint8_t shamt = (uint8_t)get_ShAmt_X1(bundle);
+
+    switch (get_ShiftOpcodeExtension_X1(bundle)) {
+    case SHLXI_SHIFT_OPCODE_X1:
+        gen_shlxi(dc, rdst, rsrc, shamt);
+        return;
+    case ROTLI_SHIFT_OPCODE_X1:
+    case SHLI_SHIFT_OPCODE_X1:
+    case SHRSI_SHIFT_OPCODE_X1:
+    case SHRUI_SHIFT_OPCODE_X1:
+    case SHRUXI_SHIFT_OPCODE_X1:
+    case V1SHLI_SHIFT_OPCODE_X1:
+    case V1SHRSI_SHIFT_OPCODE_X1:
+    case V1SHRUI_SHIFT_OPCODE_X1:
+    case V2SHLI_SHIFT_OPCODE_X1:
+    case V2SHRSI_SHIFT_OPCODE_X1:
+    case V2SHRUI_SHIFT_OPCODE_X1:
+    default:
+        break;
+    }
+    qemu_log_mask(LOG_UNIMP, "UNIMP shift_opcode_x1, %16.16llx\n", bundle);
+    dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+}
+
+static void decode_shl16insli_opcode_x1(struct DisasContext *dc,
+                                        tilegx_bundle_bits bundle)
+{
+    uint8_t rsrc = (uint8_t)get_SrcA_X1(bundle);
+    uint8_t rdst = (uint8_t)get_Dest_X1(bundle);
+    uint16_t uimm16 = (uint16_t)get_Imm16_X1(bundle);
+
+    gen_shl16insli(dc, rdst, rsrc, uimm16);
+}
+
+static void decode_y0(struct DisasContext *dc, tilegx_bundle_bits bundle)
+{
+    unsigned int opcode = get_Opcode_Y0(bundle);
+
+    dc->tmp_regcur = dc->tmp_regs + 0;
+
+    switch (opcode) {
+    case ADDI_OPCODE_Y0:
+        decode_addi_opcode_y0(dc, bundle);
+        return;
+    case RRR_1_OPCODE_Y0:
+        decode_rrr_1_opcode_y0(dc, bundle);
+        return;
+    case RRR_5_OPCODE_Y0:
+        decode_rrr_5_opcode_y0(dc, bundle);
+        return;
+    case ADDXI_OPCODE_Y0:
+    case ANDI_OPCODE_Y0:
+    case CMPEQI_OPCODE_Y0:
+    case CMPLTSI_OPCODE_Y0:
+    case RRR_0_OPCODE_Y0:
+    case RRR_2_OPCODE_Y0:
+    case RRR_3_OPCODE_Y0:
+    case RRR_4_OPCODE_Y0:
+    case RRR_6_OPCODE_Y0:
+    case RRR_7_OPCODE_Y0:
+    case RRR_8_OPCODE_Y0:
+    case RRR_9_OPCODE_Y0:
+    case SHIFT_OPCODE_Y0:
+    default:
+        qemu_log_mask(LOG_UNIMP, "UNIMP y0, opcode %d, bundle %16.16llx\n",
+                      opcode, bundle);
+        dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+        return;
+    }
+}
+
+static void decode_y1(struct DisasContext *dc, tilegx_bundle_bits bundle)
+{
+    unsigned int opcode = get_Opcode_Y1(bundle);
+
+    dc->tmp_regcur = dc->tmp_regs + 1;
+
+    switch (opcode) {
+    case ADDI_OPCODE_Y1:
+        decode_addi_opcode_y1(dc, bundle);
+        return;
+    case RRR_1_OPCODE_Y1:
+        decode_rrr_1_opcode_y1(dc, bundle);
+        return;
+    case RRR_5_OPCODE_Y1:
+        decode_rrr_5_opcode_y1(dc, bundle);
+        return;
+    case ADDXI_OPCODE_Y1:
+    case ANDI_OPCODE_Y1:
+    case CMPEQI_OPCODE_Y1:
+    case CMPLTSI_OPCODE_Y1:
+    case RRR_0_OPCODE_Y1:
+    case RRR_2_OPCODE_Y1:
+    case RRR_3_OPCODE_Y1:
+    case RRR_4_OPCODE_Y1:
+    case RRR_6_OPCODE_Y1:
+    case RRR_7_OPCODE_Y1:
+    case SHIFT_OPCODE_Y1:
+    default:
+        qemu_log_mask(LOG_UNIMP, "UNIMP y1, opcode %d, bundle %16.16llx\n",
+                      opcode, bundle);
+        dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+        return;
+    }
+}
+
+static void decode_y2(struct DisasContext *dc, tilegx_bundle_bits bundle)
+{
+    unsigned int opcode = get_Opcode_Y2(bundle);
+
+    dc->tmp_regcur = dc->tmp_regs + 2;
+
+    switch (opcode) {
+    case 0: /* LD1S_OPCODE_Y2, ST1_OPCODE_Y2 */
+        decode_ldst0_opcode_y2(dc, bundle);
+        return;
+    case 1: /* LD4S_OPCODE_Y2, LD1U_OPCODE_Y2, ST2_OPCODE_Y2 */
+        decode_ldst1_opcode_y2(dc, bundle);
+        return;
+    case 2: /* LD2S_OPCODE_Y2, LD4U_OPCODE_Y2, ST4_OPCODE_Y2 */
+        decode_ldst2_opcode_y2(dc, bundle);
+        return;
+    case 3: /* LD_OPCODE_Y2, ST_OPCODE_Y2, LD2U_OPCODE_Y2 */
+        decode_ldst3_opcode_y2(dc, bundle);
+        return;
+    default:
+        qemu_log_mask(LOG_UNIMP, "UNIMP y2, opcode %d, bundle %16.16llx\n",
+                      opcode, bundle);
+        dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+        return;
+    }
+}
+
+static void decode_x0(struct DisasContext *dc, tilegx_bundle_bits bundle)
+{
+    unsigned int opcode = get_Opcode_X0(bundle);
+
+    dc->tmp_regcur = dc->tmp_regs + 0;
+
+    switch (opcode) {
+    case ADDLI_OPCODE_X0:
+        decode_addli_opcode_x0(dc, bundle);
+        return;
+    case BF_OPCODE_X0:
+        decode_bf_opcode_x0(dc, bundle);
+        return;
+    case IMM8_OPCODE_X0:
+        decode_imm8_opcode_x0(dc, bundle);
+        return;
+    case RRR_0_OPCODE_X0:
+        decode_rrr_0_opcode_x0(dc, bundle);
+        return;
+    case SHIFT_OPCODE_X0:
+        decode_shift_opcode_x0(dc, bundle);
+        return;
+    case SHL16INSLI_OPCODE_X0:
+        decode_shl16insli_opcode_x0(dc, bundle);
+        return;
+    case ADDXLI_OPCODE_X0:
+    default:
+        qemu_log_mask(LOG_UNIMP, "UNIMP x0, opcode %d, bundle %16.16llx\n",
+                      opcode, bundle);
+        dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+        return;
+    }
+}
+
+static void decode_x1(struct DisasContext *dc, tilegx_bundle_bits bundle)
+{
+    unsigned int opcode = get_Opcode_X1(bundle);
+
+    dc->tmp_regcur = dc->tmp_regs + 1;
+
+    switch (opcode) {
+    case ADDLI_OPCODE_X1:
+        decode_addli_opcode_x1(dc, bundle);
+        return;
+    case BRANCH_OPCODE_X1:
+        decode_branch_opcode_x1(dc, bundle);
+        return;
+    case IMM8_OPCODE_X1:
+        decode_imm8_opcode_x1(dc, bundle);
+        return;
+    case JUMP_OPCODE_X1:
+        decode_jump_opcode_x1(dc, bundle);
+        return;
+    case RRR_0_OPCODE_X1:
+        decode_rrr_0_opcode_x1(dc, bundle);
+        return;
+    case SHIFT_OPCODE_X1:
+        decode_shift_opcode_x1(dc, bundle);
+        return;
+    case SHL16INSLI_OPCODE_X1:
+        decode_shl16insli_opcode_x1(dc, bundle);
+        return;
+    case ADDXLI_OPCODE_X1:
+    default:
+        qemu_log_mask(LOG_UNIMP, "UNIMP x1, opcode %d, bundle %16.16llx\n",
+                      opcode, bundle);
+        dc->exception = TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+        return;
+    }
+}
+
+static void translate_one_bundle(struct DisasContext *dc, uint64_t bundle)
+{
+    int i;
+    TCGv tmp;
+
+    for (i = 0; i < TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE; i++) {
+        dc->tmp_regs[i].idx = TILEGX_R_NOREG;
+        TCGV_UNUSED_I64(dc->tmp_regs[i].val);
+    }
+
+    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
+        tcg_gen_debug_insn_start(dc->pc);
+    }
+
+    if (get_Mode(bundle)) {
+        decode_y0(dc, bundle);
+        decode_y1(dc, bundle);
+        decode_y2(dc, bundle);
+    } else {
+        decode_x0(dc, bundle);
+        decode_x1(dc, bundle);
+    }
+
+    for (i = 0; i < TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE; i++) {
+        if (dc->tmp_regs[i].idx == TILEGX_R_NOREG) {
+            continue;
+        }
+        if (dc->tmp_regs[i].idx < TILEGX_R_COUNT) {
+            tcg_gen_mov_i64(cpu_regs[dc->tmp_regs[i].idx], 
dc->tmp_regs[i].val);
+        }
+        tcg_temp_free_i64(dc->tmp_regs[i].val);
+    }
+
+    if (dc->jmp.cond != TCG_COND_NEVER) {
+        if (dc->jmp.cond == TCG_COND_ALWAYS) {
+            tcg_gen_mov_i64(cpu_pc, dc->jmp.dest);
+        } else {
+            tmp = tcg_const_i64(dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+            tcg_gen_movcond_i64(dc->jmp.cond, cpu_pc,
+                                dc->jmp.val1, dc->jmp.val2,
+                                dc->jmp.dest, tmp);
+            tcg_temp_free_i64(dc->jmp.val1);
+            tcg_temp_free_i64(dc->jmp.val2);
+            tcg_temp_free_i64(tmp);
+        }
+        tcg_temp_free_i64(dc->jmp.dest);
+        tcg_gen_exit_tb(0);
+    }
+}
+
+static inline void gen_intermediate_code_internal(TileGXCPU *cpu,
+                                                  TranslationBlock *tb,
+                                                  bool search_pc)
+{
+    DisasContext ctx;
+    DisasContext *dc = &ctx;
+
+    CPUTLGState *env = &cpu->env;
+    uint64_t pc_start = tb->pc;
+    uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + 
TARGET_PAGE_SIZE;
+    int j, lj = -1;
+    int num_insns = 0;
+    int max_insns = tb->cflags & CF_COUNT_MASK;
+
+    dc->pc = pc_start;
+    dc->exception = TILEGX_EXCP_NONE;
+    dc->jmp.cond = TCG_COND_NEVER;
+    TCGV_UNUSED_I64(dc->jmp.dest);
+    TCGV_UNUSED_I64(dc->jmp.val1);
+    TCGV_UNUSED_I64(dc->jmp.val2);
+
+    if (!max_insns) {
+        max_insns = CF_COUNT_MASK;
+    }
+    gen_tb_start(tb);
+
+    do {
+        TCGV_UNUSED_I64(dc->zero);
+        if (search_pc) {
+            j = tcg_op_buf_count();
+            if (lj < j) {
+                lj++;
+                while (lj < j) {
+                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
+                }
+            }
+            tcg_ctx.gen_opc_pc[lj] = dc->pc;
+            tcg_ctx.gen_opc_instr_start[lj] = 1;
+            tcg_ctx.gen_opc_icount[lj] = num_insns;
+        }
+        translate_one_bundle(dc, cpu_ldq_data(env, dc->pc));
+        num_insns++;
+        dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
+        if (dc->exception != TILEGX_EXCP_NONE) {
+            gen_exception(dc, dc->exception);
+            break;
+        }
+    } while (dc->jmp.cond == TCG_COND_NEVER && dc->pc < next_page_start
+             && num_insns < max_insns && !tcg_op_buf_full());
+
+    gen_tb_end(tb, num_insns);
+    if (search_pc) {
+        j = tcg_op_buf_count();
+        lj++;
+        while (lj <= j) {
+            tcg_ctx.gen_opc_instr_start[lj++] = 0;
+        }
+    } else {
+        tb->size = dc->pc - pc_start;
+        tb->icount = num_insns;
+    }
+
+    return;
+}
+
+void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
+{
+    gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, false);
+}
+
+void gen_intermediate_code_pc(CPUTLGState *env, struct TranslationBlock *tb)
+{
+    gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, true);
+}
+
+void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb, int pc_pos)
+{
+    env->pc = tcg_ctx.gen_opc_pc[pc_pos];
+}
+
+void tilegx_tcg_init(void)
+{
+    int i;
+
+    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUTLGState, pc), 
"pc");
+    for (i = 0; i < TILEGX_R_COUNT; i++) {
+        cpu_regs[i] = tcg_global_mem_new_i64(TCG_AREG0,
+                                             offsetof(CPUTLGState, regs[i]),
+                                             reg_names[i]);
+    }
+}
-- 
1.9.3



reply via email to

[Prev in Thread] Current Thread [Next in Thread]