qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC 1/1] riscv: Add full-system emulation support for the


From: Sagar Karandikar
Subject: [Qemu-devel] [RFC 1/1] riscv: Add full-system emulation support for the RISC-V Instruction Set Architecture (RV64G)
Date: Thu, 18 Feb 2016 17:02:05 -0800

Signed-off-by: Sagar Karandikar <address@hidden>
---
 arch_init.c                                        |    2 +
 configure                                          |   13 +
 cpus.c                                             |    6 +
 default-configs/riscv-linux-user.mak               |    1 +
 default-configs/riscv-softmmu.mak                  |   38 +
 disas.c                                            |    2 +
 disas/Makefile.objs                                |    1 +
 disas/riscv.c                                      |   46 +
 hw/riscv/Makefile.objs                             |    5 +
 hw/riscv/cputimer.c                                |  170 ++
 hw/riscv/htif/frontend.c                           |  215 ++
 hw/riscv/htif/htif.c                               |  459 +++++
 hw/riscv/riscv_board.c                             |  330 +++
 hw/riscv/riscv_int.c                               |   84 +
 hw/riscv/softint.c                                 |  121 ++
 include/disas/bfd.h                                |    1 +
 include/elf.h                                      |    2 +
 include/exec/poison.h                              |    1 +
 include/exec/user/thunk.h                          |    2 +-
 include/hw/riscv/bios.h                            |    4 +
 include/hw/riscv/cpudevs.h                         |   14 +
 include/hw/riscv/cputimer.h                        |    4 +
 include/hw/riscv/htif/frontend.h                   |   30 +
 include/hw/riscv/htif/htif.h                       |   76 +
 include/hw/riscv/riscv.h                           |   10 +
 include/hw/riscv/softint.h                         |   50 +
 include/sysemu/arch_init.h                         |    1 +
 target-riscv/Makefile.objs                         |  114 ++
 target-riscv/TODO                                  |   17 +
 target-riscv/cpu-qom.h                             |   86 +
 target-riscv/cpu.c                                 |  143 ++
 target-riscv/cpu.h                                 |  449 ++++
 .../fpu-custom-riscv/8086/OLD-specialize.c         |   40 +
 .../fpu-custom-riscv/8086/OLD-specialize.h         |  379 ++++
 target-riscv/fpu-custom-riscv/8086/platform.h      |   38 +
 .../fpu-custom-riscv/8086/s_commonNaNToF32UI.c     |   17 +
 .../fpu-custom-riscv/8086/s_commonNaNToF64UI.c     |   19 +
 .../fpu-custom-riscv/8086/s_f32UIToCommonNaN.c     |   25 +
 .../fpu-custom-riscv/8086/s_f64UIToCommonNaN.c     |   25 +
 .../fpu-custom-riscv/8086/s_isSigNaNF32UI.c        |   13 +
 .../fpu-custom-riscv/8086/s_isSigNaNF64UI.c        |   15 +
 .../fpu-custom-riscv/8086/s_propagateNaNF32UI.c    |   55 +
 .../fpu-custom-riscv/8086/s_propagateNaNF64UI.c    |   55 +
 .../fpu-custom-riscv/8086/softfloat_raiseFlags.c   |   51 +
 .../fpu-custom-riscv/8086/softfloat_types.h        |   16 +
 target-riscv/fpu-custom-riscv/8086/specialize.h    |  113 +
 target-riscv/fpu-custom-riscv/f32_add.c            |   29 +
 target-riscv/fpu-custom-riscv/f32_classify.c       |   33 +
 target-riscv/fpu-custom-riscv/f32_div.c            |   96 +
 target-riscv/fpu-custom-riscv/f32_eq.c             |   34 +
 target-riscv/fpu-custom-riscv/f32_eq_signaling.c   |   29 +
 target-riscv/fpu-custom-riscv/f32_isSignalingNaN.c |   16 +
 target-riscv/fpu-custom-riscv/f32_le.c             |   34 +
 target-riscv/fpu-custom-riscv/f32_le_quiet.c       |   39 +
 target-riscv/fpu-custom-riscv/f32_lt.c             |   34 +
 target-riscv/fpu-custom-riscv/f32_lt_quiet.c       |   39 +
 target-riscv/fpu-custom-riscv/f32_mul.c            |   89 +
 target-riscv/fpu-custom-riscv/f32_mulAdd.c         |   25 +
 target-riscv/fpu-custom-riscv/f32_rem.c            |  124 ++
 target-riscv/fpu-custom-riscv/f32_roundToInt.c     |   78 +
 target-riscv/fpu-custom-riscv/f32_sqrt.c           |   74 +
 target-riscv/fpu-custom-riscv/f32_sub.c            |   29 +
 target-riscv/fpu-custom-riscv/f32_to_f64.c         |   47 +
 target-riscv/fpu-custom-riscv/f32_to_i32.c         |   34 +
 .../fpu-custom-riscv/f32_to_i32_r_minMag.c         |   45 +
 target-riscv/fpu-custom-riscv/f32_to_i64.c         |   44 +
 .../fpu-custom-riscv/f32_to_i64_r_minMag.c         |   52 +
 target-riscv/fpu-custom-riscv/f32_to_ui32.c        |   33 +
 .../fpu-custom-riscv/f32_to_ui32_r_minMag.c        |   41 +
 target-riscv/fpu-custom-riscv/f32_to_ui64.c        |   42 +
 .../fpu-custom-riscv/f32_to_ui64_r_minMag.c        |   45 +
 target-riscv/fpu-custom-riscv/f64_add.c            |   29 +
 target-riscv/fpu-custom-riscv/f64_classify.c       |   33 +
 target-riscv/fpu-custom-riscv/f64_div.c            |  104 +
 target-riscv/fpu-custom-riscv/f64_eq.c             |   35 +
 target-riscv/fpu-custom-riscv/f64_eq_signaling.c   |   30 +
 target-riscv/fpu-custom-riscv/f64_isSignalingNaN.c |   16 +
 target-riscv/fpu-custom-riscv/f64_le.c             |   35 +
 target-riscv/fpu-custom-riscv/f64_le_quiet.c       |   40 +
 target-riscv/fpu-custom-riscv/f64_lt.c             |   35 +
 target-riscv/fpu-custom-riscv/f64_lt_quiet.c       |   40 +
 target-riscv/fpu-custom-riscv/f64_mul.c            |   91 +
 target-riscv/fpu-custom-riscv/f64_mulAdd.c         |   25 +
 target-riscv/fpu-custom-riscv/f64_rem.c            |  113 +
 target-riscv/fpu-custom-riscv/f64_roundToInt.c     |   80 +
 target-riscv/fpu-custom-riscv/f64_sqrt.c           |   74 +
 target-riscv/fpu-custom-riscv/f64_sub.c            |   29 +
 target-riscv/fpu-custom-riscv/f64_to_f32.c         |   43 +
 target-riscv/fpu-custom-riscv/f64_to_i32.c         |   30 +
 .../fpu-custom-riscv/f64_to_i32_r_minMag.c         |   50 +
 target-riscv/fpu-custom-riscv/f64_to_i64.c         |   46 +
 .../fpu-custom-riscv/f64_to_i64_r_minMag.c         |   52 +
 target-riscv/fpu-custom-riscv/f64_to_ui32.c        |   29 +
 .../fpu-custom-riscv/f64_to_ui32_r_minMag.c        |   40 +
 target-riscv/fpu-custom-riscv/f64_to_ui64.c        |   41 +
 .../fpu-custom-riscv/f64_to_ui64_r_minMag.c        |   45 +
 target-riscv/fpu-custom-riscv/i32_to_f32.c         |   21 +
 target-riscv/fpu-custom-riscv/i32_to_f64.c         |   31 +
 target-riscv/fpu-custom-riscv/i64_to_f32.c         |   36 +
 target-riscv/fpu-custom-riscv/i64_to_f64.c         |   21 +
 target-riscv/fpu-custom-riscv/internals.h          |  232 +++
 target-riscv/fpu-custom-riscv/platform.h           |   42 +
 target-riscv/fpu-custom-riscv/primitives.h         |  628 ++++++
 target-riscv/fpu-custom-riscv/s_add128.c           |   17 +
 target-riscv/fpu-custom-riscv/s_add192.c           |   30 +
 target-riscv/fpu-custom-riscv/s_addMagsF32.c       |   75 +
 target-riscv/fpu-custom-riscv/s_addMagsF64.c       |   77 +
 target-riscv/fpu-custom-riscv/s_commonNaNToF32UI.c |   17 +
 target-riscv/fpu-custom-riscv/s_commonNaNToF64UI.c |   17 +
 .../fpu-custom-riscv/s_countLeadingZeros32.c       |   22 +
 .../fpu-custom-riscv/s_countLeadingZeros64.c       |   32 +
 .../fpu-custom-riscv/s_countLeadingZeros8.c        |   24 +
 target-riscv/fpu-custom-riscv/s_eq128.c            |   13 +
 .../fpu-custom-riscv/s_estimateDiv128To64.c        |   28 +
 target-riscv/fpu-custom-riscv/s_estimateSqrt32.c   |   37 +
 target-riscv/fpu-custom-riscv/s_f32UIToCommonNaN.c |   25 +
 target-riscv/fpu-custom-riscv/s_f64UIToCommonNaN.c |   25 +
 target-riscv/fpu-custom-riscv/s_isSigNaNF32UI.c    |   13 +
 target-riscv/fpu-custom-riscv/s_isSigNaNF64UI.c    |   15 +
 target-riscv/fpu-custom-riscv/s_le128.c            |   13 +
 target-riscv/fpu-custom-riscv/s_lt128.c            |   13 +
 target-riscv/fpu-custom-riscv/s_mul128By64To192.c  |   20 +
 target-riscv/fpu-custom-riscv/s_mul128To256.c      |   28 +
 target-riscv/fpu-custom-riscv/s_mul64To128.c       |   28 +
 target-riscv/fpu-custom-riscv/s_mulAddF32.c        |  171 ++
 target-riscv/fpu-custom-riscv/s_mulAddF64.c        |  188 ++
 .../fpu-custom-riscv/s_normRoundPackToF32.c        |   24 +
 .../fpu-custom-riscv/s_normRoundPackToF64.c        |   24 +
 .../fpu-custom-riscv/s_normSubnormalF32Sig.c       |   18 +
 .../fpu-custom-riscv/s_normSubnormalF64Sig.c       |   18 +
 .../fpu-custom-riscv/s_propagateNaNF32UI.c         |   25 +
 .../fpu-custom-riscv/s_propagateNaNF64UI.c         |   25 +
 target-riscv/fpu-custom-riscv/s_roundPackToF32.c   |   65 +
 target-riscv/fpu-custom-riscv/s_roundPackToF64.c   |   66 +
 target-riscv/fpu-custom-riscv/s_roundPackToI32.c   |   48 +
 target-riscv/fpu-custom-riscv/s_roundPackToI64.c   |   52 +
 target-riscv/fpu-custom-riscv/s_roundPackToUI32.c  |   44 +
 target-riscv/fpu-custom-riscv/s_roundPackToUI64.c  |   46 +
 .../fpu-custom-riscv/s_shift128ExtraRightJam.c     |   38 +
 target-riscv/fpu-custom-riscv/s_shift128RightJam.c |   31 +
 target-riscv/fpu-custom-riscv/s_shift32RightJam.c  |   15 +
 .../fpu-custom-riscv/s_shift64ExtraRightJam.c      |   23 +
 target-riscv/fpu-custom-riscv/s_shift64RightJam.c  |   15 +
 .../s_shortShift128ExtraRightJam.c                 |   20 +
 .../fpu-custom-riscv/s_shortShift128Left.c         |   16 +
 .../fpu-custom-riscv/s_shortShift128Right.c        |   16 +
 .../fpu-custom-riscv/s_shortShift192Left.c         |   20 +
 .../fpu-custom-riscv/s_shortShift32Right1Jam.c     |   12 +
 .../fpu-custom-riscv/s_shortShift64ExtraRightJam.c |   17 +
 .../fpu-custom-riscv/s_shortShift64RightJam.c      |   12 +
 target-riscv/fpu-custom-riscv/s_sub128.c           |   17 +
 target-riscv/fpu-custom-riscv/s_sub192.c           |   30 +
 target-riscv/fpu-custom-riscv/s_subMagsF32.c       |   81 +
 target-riscv/fpu-custom-riscv/s_subMagsF64.c       |   81 +
 target-riscv/fpu-custom-riscv/softfloat.ac         |    0
 target-riscv/fpu-custom-riscv/softfloat.h          |  235 +++
 target-riscv/fpu-custom-riscv/softfloat.mk.in      |  126 ++
 .../fpu-custom-riscv/softfloat_raiseFlags.c        |   51 +
 target-riscv/fpu-custom-riscv/softfloat_state.c    |   19 +
 target-riscv/fpu-custom-riscv/softfloat_types.h    |   16 +
 target-riscv/fpu-custom-riscv/specialize.h         |  113 +
 target-riscv/fpu-custom-riscv/ui32_to_f32.c        |   25 +
 target-riscv/fpu-custom-riscv/ui32_to_f64.c        |   26 +
 target-riscv/fpu-custom-riscv/ui64_to_f32.c        |   31 +
 target-riscv/fpu-custom-riscv/ui64_to_f64.c        |   25 +
 target-riscv/gdbstub.c                             |  177 ++
 target-riscv/helper.c                              |  356 ++++
 target-riscv/helper.h                              |   82 +
 target-riscv/instmap.h                             |  311 +++
 target-riscv/machine.c                             |   91 +
 target-riscv/op_helper.c                           | 1037 ++++++++++
 target-riscv/riscv-defs.h                          |   14 +
 target-riscv/translate.c                           | 2155 ++++++++++++++++++++
 target-riscv/translate_init.c                      |   63 +
 174 files changed, 13518 insertions(+), 1 deletion(-)
 create mode 100644 default-configs/riscv-linux-user.mak
 create mode 100644 default-configs/riscv-softmmu.mak
 create mode 100644 disas/riscv.c
 create mode 100644 hw/riscv/Makefile.objs
 create mode 100644 hw/riscv/cputimer.c
 create mode 100644 hw/riscv/htif/frontend.c
 create mode 100644 hw/riscv/htif/htif.c
 create mode 100644 hw/riscv/riscv_board.c
 create mode 100644 hw/riscv/riscv_int.c
 create mode 100644 hw/riscv/softint.c
 create mode 100644 include/hw/riscv/bios.h
 create mode 100644 include/hw/riscv/cpudevs.h
 create mode 100644 include/hw/riscv/cputimer.h
 create mode 100644 include/hw/riscv/htif/frontend.h
 create mode 100644 include/hw/riscv/htif/htif.h
 create mode 100644 include/hw/riscv/riscv.h
 create mode 100644 include/hw/riscv/softint.h
 create mode 100644 target-riscv/Makefile.objs
 create mode 100644 target-riscv/TODO
 create mode 100644 target-riscv/cpu-qom.h
 create mode 100644 target-riscv/cpu.c
 create mode 100644 target-riscv/cpu.h
 create mode 100755 target-riscv/fpu-custom-riscv/8086/OLD-specialize.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/OLD-specialize.h
 create mode 100755 target-riscv/fpu-custom-riscv/8086/platform.h
 create mode 100755 target-riscv/fpu-custom-riscv/8086/s_commonNaNToF32UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/s_commonNaNToF64UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/s_f32UIToCommonNaN.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/s_f64UIToCommonNaN.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/s_isSigNaNF32UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/s_isSigNaNF64UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/s_propagateNaNF32UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/s_propagateNaNF64UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/softfloat_raiseFlags.c
 create mode 100755 target-riscv/fpu-custom-riscv/8086/softfloat_types.h
 create mode 100755 target-riscv/fpu-custom-riscv/8086/specialize.h
 create mode 100755 target-riscv/fpu-custom-riscv/f32_add.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_classify.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_div.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_eq.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_eq_signaling.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_isSignalingNaN.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_le.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_le_quiet.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_lt.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_lt_quiet.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_mul.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_mulAdd.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_rem.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_roundToInt.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_sqrt.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_sub.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_f64.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_i32.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_i32_r_minMag.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_i64.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_i64_r_minMag.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_ui32.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_ui32_r_minMag.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_ui64.c
 create mode 100755 target-riscv/fpu-custom-riscv/f32_to_ui64_r_minMag.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_add.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_classify.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_div.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_eq.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_eq_signaling.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_isSignalingNaN.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_le.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_le_quiet.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_lt.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_lt_quiet.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_mul.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_mulAdd.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_rem.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_roundToInt.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_sqrt.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_sub.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_f32.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_i32.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_i32_r_minMag.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_i64.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_i64_r_minMag.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_ui32.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_ui32_r_minMag.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_ui64.c
 create mode 100755 target-riscv/fpu-custom-riscv/f64_to_ui64_r_minMag.c
 create mode 100755 target-riscv/fpu-custom-riscv/i32_to_f32.c
 create mode 100755 target-riscv/fpu-custom-riscv/i32_to_f64.c
 create mode 100755 target-riscv/fpu-custom-riscv/i64_to_f32.c
 create mode 100755 target-riscv/fpu-custom-riscv/i64_to_f64.c
 create mode 100755 target-riscv/fpu-custom-riscv/internals.h
 create mode 100755 target-riscv/fpu-custom-riscv/platform.h
 create mode 100755 target-riscv/fpu-custom-riscv/primitives.h
 create mode 100755 target-riscv/fpu-custom-riscv/s_add128.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_add192.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_addMagsF32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_addMagsF64.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_commonNaNToF32UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_commonNaNToF64UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_countLeadingZeros32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_countLeadingZeros64.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_countLeadingZeros8.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_eq128.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_estimateDiv128To64.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_estimateSqrt32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_f32UIToCommonNaN.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_f64UIToCommonNaN.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_isSigNaNF32UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_isSigNaNF64UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_le128.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_lt128.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_mul128By64To192.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_mul128To256.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_mul64To128.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_mulAddF32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_mulAddF64.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_normRoundPackToF32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_normRoundPackToF64.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_normSubnormalF32Sig.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_normSubnormalF64Sig.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_propagateNaNF32UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_propagateNaNF64UI.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_roundPackToF32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_roundPackToF64.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_roundPackToI32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_roundPackToI64.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_roundPackToUI32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_roundPackToUI64.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shift128ExtraRightJam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shift128RightJam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shift32RightJam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shift64ExtraRightJam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shift64RightJam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shortShift128ExtraRightJam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shortShift128Left.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shortShift128Right.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shortShift192Left.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shortShift32Right1Jam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shortShift64ExtraRightJam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_shortShift64RightJam.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_sub128.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_sub192.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_subMagsF32.c
 create mode 100755 target-riscv/fpu-custom-riscv/s_subMagsF64.c
 create mode 100644 target-riscv/fpu-custom-riscv/softfloat.ac
 create mode 100755 target-riscv/fpu-custom-riscv/softfloat.h
 create mode 100644 target-riscv/fpu-custom-riscv/softfloat.mk.in
 create mode 100755 target-riscv/fpu-custom-riscv/softfloat_raiseFlags.c
 create mode 100755 target-riscv/fpu-custom-riscv/softfloat_state.c
 create mode 100755 target-riscv/fpu-custom-riscv/softfloat_types.h
 create mode 100755 target-riscv/fpu-custom-riscv/specialize.h
 create mode 100755 target-riscv/fpu-custom-riscv/ui32_to_f32.c
 create mode 100755 target-riscv/fpu-custom-riscv/ui32_to_f64.c
 create mode 100755 target-riscv/fpu-custom-riscv/ui64_to_f32.c
 create mode 100755 target-riscv/fpu-custom-riscv/ui64_to_f64.c
 create mode 100644 target-riscv/gdbstub.c
 create mode 100644 target-riscv/helper.c
 create mode 100644 target-riscv/helper.h
 create mode 100644 target-riscv/instmap.h
 create mode 100644 target-riscv/machine.c
 create mode 100644 target-riscv/op_helper.c
 create mode 100644 target-riscv/riscv-defs.h
 create mode 100644 target-riscv/translate.c
 create mode 100644 target-riscv/translate_init.c

diff --git a/arch_init.c b/arch_init.c
index 38f5fb9..4608ce4 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -59,6 +59,8 @@ int graphic_depth = 32;
 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
 #elif defined(TARGET_MIPS)
 #define QEMU_ARCH QEMU_ARCH_MIPS
+#elif defined(TARGET_RISCV)
+#define QEMU_ARCH QEMU_ARCH_RISCV
 #elif defined(TARGET_MOXIE)
 #define QEMU_ARCH QEMU_ARCH_MOXIE
 #elif defined(TARGET_OPENRISC)
diff --git a/configure b/configure
index b9552fd..00541ca 100755
--- a/configure
+++ b/configure
@@ -518,6 +518,8 @@ elif check_define _ARCH_PPC ; then
   fi
 elif check_define __mips__ ; then
   cpu="mips"
+elif check_define __riscv__ ; then
+  cpu="riscv"
 elif check_define __ia64__ ; then
   cpu="ia64"
 elif check_define __s390__ ; then
@@ -558,6 +560,9 @@ case "$cpu" in
   mips*)
     cpu="mips"
   ;;
+  riscv*)
+    cpu="riscv"
+  ;;
   sparc|sun4[cdmuv])
     cpu="sparc"
   ;;
@@ -5600,6 +5605,10 @@ case "$target_name" in
     TARGET_BASE_ARCH=mips
     echo "TARGET_ABI_MIPSN64=y" >> $config_target_mak
   ;;
+  riscv)
+    TARGET_ARCH=riscv
+    echo "TARGET_ABI_RISCV=y" >> $config_target_mak
+  ;;
   moxie)
   ;;
   or32)
@@ -5804,6 +5813,10 @@ for i in $ARCH $TARGET_BASE_ARCH ; do
   ppc*)
     disas_config "PPC"
   ;;
+  riscv*)
+    echo "CONFIG_RISCV_DIS=y"  >> $config_target_mak
+    echo "CONFIG_RISCV_DIS=y"  >> config-all-disas.mak
+  ;;
   s390*)
     disas_config "S390"
   ;;
diff --git a/cpus.c b/cpus.c
index 43676fa..e38e8dd 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1540,6 +1540,9 @@ CpuInfoList *qmp_query_cpus(Error **errp)
 #elif defined(TARGET_SPARC)
         SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
         CPUSPARCState *env = &sparc_cpu->env;
+#elif defined(TARGET_RISCV)
+        RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
+        CPURISCVState *env = &riscv_cpu->env;
 #elif defined(TARGET_MIPS)
         MIPSCPU *mips_cpu = MIPS_CPU(cpu);
         CPUMIPSState *env = &mips_cpu->env;
@@ -1574,6 +1577,9 @@ CpuInfoList *qmp_query_cpus(Error **errp)
 #elif defined(TARGET_TRICORE)
         info->value->has_PC = true;
         info->value->PC = env->PC;
+#elif defined(TARGET_RISCV)
+        info->value->has_PC = true;
+        info->value->PC = env->active_tc.PC;
 #endif
 
         /* XXX: waiting for the qapi to support GSList */
diff --git a/default-configs/riscv-linux-user.mak 
b/default-configs/riscv-linux-user.mak
new file mode 100644
index 0000000..865b362
--- /dev/null
+++ b/default-configs/riscv-linux-user.mak
@@ -0,0 +1 @@
+# Default configuration for riscv-linux-user
diff --git a/default-configs/riscv-softmmu.mak 
b/default-configs/riscv-softmmu.mak
new file mode 100644
index 0000000..c8b7fa1
--- /dev/null
+++ b/default-configs/riscv-softmmu.mak
@@ -0,0 +1,38 @@
+# Default configuration for riscv-softmmu
+
+#include pci.mak
+#include sound.mak
+#include usb.mak
+#CONFIG_ESP=y
+#CONFIG_VGA=y
+#CONFIG_VGA_PCI=y
+#CONFIG_VGA_ISA=y
+#CONFIG_VGA_ISA_MM=y
+#CONFIG_VGA_CIRRUS=y
+#CONFIG_VMWARE_VGA=y
+CONFIG_SERIAL=y
+#CONFIG_PARALLEL=y
+#CONFIG_I8254=y
+#CONFIG_PCSPK=y
+#CONFIG_PCKBD=y
+#CONFIG_FDC=y
+#CONFIG_ACPI=y
+#CONFIG_APM=y
+#CONFIG_I8257=y
+#CONFIG_PIIX4=y
+#CONFIG_IDE_ISA=y
+#CONFIG_IDE_PIIX=y
+#CONFIG_NE2000_ISA=y
+#CONFIG_RC4030=y
+#CONFIG_DP8393X=y
+#CONFIG_DS1225Y=y
+#CONFIG_MIPSNET=y
+#CONFIG_PFLASH_CFI01=y
+#CONFIG_G364FB=y
+CONFIG_I8259=y
+#CONFIG_JAZZ_LED=y
+#CONFIG_MC146818RTC=y
+#CONFIG_VT82C686=y
+#CONFIG_ISA_TESTDEV=y
+#CONFIG_EMPTY_SLOT=y
+CONFIG_VIRTIO=y
diff --git a/disas.c b/disas.c
index 4e11944..85d0fb9 100644
--- a/disas.c
+++ b/disas.c
@@ -315,6 +315,8 @@ void disas(FILE *out, void *code, unsigned long size)
     print_insn = print_insn_hppa;
 #elif defined(__ia64__)
     print_insn = print_insn_ia64;
+#elif defined(__riscv__)
+    print_insn = print_insn_riscv;
 #endif
     if (print_insn == NULL) {
         print_insn = print_insn_od_host;
diff --git a/disas/Makefile.objs b/disas/Makefile.objs
index 8dae4da..152a11b 100644
--- a/disas/Makefile.objs
+++ b/disas/Makefile.objs
@@ -18,6 +18,7 @@ common-obj-$(CONFIG_S390_DIS) += s390.o
 common-obj-$(CONFIG_SH4_DIS) += sh4.o
 common-obj-$(CONFIG_SPARC_DIS) += sparc.o
 common-obj-$(CONFIG_LM32_DIS) += lm32.o
+common-obj-$(CONFIG_RISCV_DIS) += riscv.o
 
 # TODO: As long as the TCG interpreter and its generated code depend
 # on the QEMU target, we cannot compile the disassembler here.
diff --git a/disas/riscv.c b/disas/riscv.c
new file mode 100644
index 0000000..fe27082
--- /dev/null
+++ b/disas/riscv.c
@@ -0,0 +1,46 @@
+#include "disas/bfd.h"
+
+#define USE_SPIKE_DASM
+
+int print_insn_riscv(bfd_vma pc, disassemble_info *info)
+{
+    int i, n = info->buffer_length;
+    int j;
+    uint8_t *buf = g_malloc(n);
+
+    #ifdef USE_SPIKE_DASM
+    int buflen = 100;
+    char * runbuf = g_malloc(buflen); // TODO len
+    #endif
+
+    info->read_memory_func(pc, buf, n, info);
+
+    const char *prefix = "RISC-V Inst";
+
+    for (j = 0; j < n; j += 4) {
+        info->fprintf_func(info->stream, "\n%s: 0x", prefix);
+
+        // little-endian
+        for (i = 3; i >= 0; --i) {
+            info->fprintf_func(info->stream, "%02x", buf[j+i]);
+        }
+
+        // use spike-dasm
+        #ifdef USE_SPIKE_DASM
+        info->fprintf_func(info->stream, "\n");
+        snprintf(runbuf, buflen, "echo 'DASM(%02x%02x%02x%02x)\n' | spike-dasm 
1>&2", 
+                buf[j+3], buf[j+2], buf[j+1], buf[j+0]);
+        int res = system(runbuf);
+        if (res) {
+            printf("spike-dasm error\n");
+            exit(1);
+        }
+        #endif
+    }
+
+    g_free(runbuf);
+    g_free(buf);
+    return n;
+}
+
+
diff --git a/hw/riscv/Makefile.objs b/hw/riscv/Makefile.objs
new file mode 100644
index 0000000..8b57734
--- /dev/null
+++ b/hw/riscv/Makefile.objs
@@ -0,0 +1,5 @@
+obj-y += riscv_board.o
+obj-y += cputimer.o riscv_int.o
+obj-y += htif/htif.o
+obj-y += htif/frontend.o
+obj-y += softint.o
diff --git a/hw/riscv/cputimer.c b/hw/riscv/cputimer.c
new file mode 100644
index 0000000..76cc0d9
--- /dev/null
+++ b/hw/riscv/cputimer.c
@@ -0,0 +1,170 @@
+/*
+ * QEMU RISC-V timer, instret counter support
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/hw.h"
+#include "hw/riscv/cpudevs.h"
+#include "hw/riscv/cputimer.h"
+#include "qemu/timer.h"
+
+//#define TIMER_DEBUGGING_RISCV
+
+static uint64_t written_delta;
+static uint64_t instret_delta;
+
+// this is the "right value" for defaults in pk/linux
+// see pk/sbi_entry.S and arch/riscv/kernel/time.c call to
+// clockevents_config_and_register
+#define TIMER_FREQ     10 * 1000 * 1000
+// CPU_FREQ is for instret approximation - say we're running at 1 BIPS
+#define CPU_FREQ    1000 * 1000 * 1000
+
+inline uint64_t rtc_read(CPURISCVState *env) {
+    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), TIMER_FREQ, 
get_ticks_per_sec());
+}
+
+inline uint64_t rtc_read_with_delta(CPURISCVState *env) {
+    return rtc_read(env) + written_delta;
+}
+
+inline uint64_t instret_read(CPURISCVState *env) {
+    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), CPU_FREQ, 
get_ticks_per_sec());
+}
+
+inline uint64_t instret_read_with_delta(CPURISCVState *env) {
+    return instret_read(env) + instret_delta;
+}
+
+/*
+ * Called when mtimecmp is written to update the QEMU timer or immediately
+ * trigger timer interrupt if mtimecmp <= current timer value.
+ */
+static inline void cpu_riscv_timer_update(CPURISCVState *env)
+{
+    uint64_t next;
+    uint64_t diff;
+
+    uint64_t rtc_r = rtc_read_with_delta(env);
+
+    #ifdef TIMER_DEBUGGING_RISCV
+    printf("timer update: mtimecmp %016lx, timew %016lx\n",
+            env->csr[NEW_CSR_MTIMECMP], rtc_r);
+    #endif
+
+    if (env->csr[NEW_CSR_MTIMECMP] <= rtc_r) {
+        // if we're setting an MTIMECMP value in the "past", immediately raise
+        // the timer interrupt
+        env->csr[NEW_CSR_MIP] |= MIP_MTIP;
+        qemu_irq_raise(env->irq[7]);
+        return;
+    }
+
+    // otherwise, set up the future timer interrupt
+    diff = env->csr[NEW_CSR_MTIMECMP] - rtc_r;
+    // back to ns (note args switched in muldiv64)
+    next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+        muldiv64(diff, get_ticks_per_sec(), TIMER_FREQ);
+    timer_mod(env->timer, next);
+}
+
+/*
+ * Called by the callback used when the timer set using timer_mod expires.
+ * Should raise the timer interrupt line
+ */
+static inline void cpu_riscv_timer_expire(CPURISCVState *env)
+{
+    // do not call update here
+    env->csr[NEW_CSR_MIP] |= MIP_MTIP;
+    qemu_irq_raise(env->irq[7]);
+}
+
+
+inline void cpu_riscv_store_timew(CPURISCVState *env, uint64_t val_to_write) {
+    #ifdef TIMER_DEBUGGING_RISCV
+    printf("write timew: 0x%016lx\n", val_to_write);
+    #endif
+
+    written_delta = val_to_write - rtc_read(env);
+}
+
+inline void cpu_riscv_store_instretw(CPURISCVState *env, uint64_t 
val_to_write) {
+    #ifdef TIMER_DEBUGGING_RISCV
+    printf("write instretw: 0x%016lx\n", val_to_write);
+    #endif
+
+    written_delta = val_to_write - instret_read(env);
+}
+
+inline uint64_t cpu_riscv_read_instretw(CPURISCVState *env) {
+    uint64_t retval = instret_read_with_delta(env);
+    return retval;
+}
+
+inline uint64_t cpu_riscv_read_mtime(CPURISCVState *env) {
+    uint64_t retval = rtc_read(env);
+    return retval;
+}
+
+inline uint64_t cpu_riscv_read_stime(CPURISCVState *env) {
+    uint64_t retval = rtc_read(env);
+    return retval;
+}
+
+inline uint64_t cpu_riscv_read_time(CPURISCVState *env) {
+    uint64_t retval = rtc_read_with_delta(env);
+    return retval;
+}
+
+inline void cpu_riscv_store_compare (CPURISCVState *env, uint64_t value)
+{
+    #ifdef TIMER_DEBUGGING_RISCV
+    uint64_t rtc_r = rtc_read_with_delta(env);
+    printf("wrote mtimecmp %016lx, timew %016lx\n", value, rtc_r);
+    #endif
+
+    env->csr[NEW_CSR_MTIMECMP] = value;
+    env->csr[NEW_CSR_MIP] &= ~MIP_MTIP;
+    cpu_riscv_timer_update(env);
+}
+
+/*
+ * Callback used when the timer set using timer_mod expires.
+ */
+static void riscv_timer_cb (void *opaque)
+{
+    CPURISCVState *env;
+    env = opaque;
+    cpu_riscv_timer_expire(env);
+}
+
+/*
+ * Initialize clock mechanism.
+ */
+void cpu_riscv_clock_init (CPURISCVState *env)
+{
+    env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &riscv_timer_cb, env);
+    env->csr[NEW_CSR_MTIMECMP] = 0;
+    cpu_riscv_store_timew(env, 1);
+    written_delta = 0;
+    instret_delta = 0;
+}
diff --git a/hw/riscv/htif/frontend.c b/hw/riscv/htif/frontend.c
new file mode 100644
index 0000000..f8c7ac0
--- /dev/null
+++ b/hw/riscv/htif/frontend.c
@@ -0,0 +1,215 @@
+/*
+ * QEMU RISC-V Syscall Proxy Emulation
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ * This provides a set of functions used by the HTIF Syscall Proxy device.
+ * This is used by bbl and pk. Currently, only syscalls needed by bbl to
+ * boot Linux are supported.
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/riscv/htif/htif.h"
+#include "hw/riscv/htif/frontend.h"
+#include <inttypes.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+//#define DEBUG_FRONTEND_RISCV
+
+// only supports one fd right now, for the kernel we load
+int real_kernelfd = -1;
+
+#define BBL_AT_FDCWD (-100)
+
+uint64_t sys_openat(HTIFState *htifstate, uint64_t dirfd, uint64_t pname,
+        uint64_t len, uint64_t flags, uint64_t mode) {
+
+    void * base = htifstate->main_mem_ram_ptr + (uintptr_t)pname;
+
+    char name[len];
+    int i;
+    for (i = 0; i < len; i++) {
+        name[i] = ldub_p((void*)(base + i));
+    }
+
+    // in case host OS has different val for AT_FDCWD, e.g. OS X
+    dirfd = dirfd == BBL_AT_FDCWD ? AT_FDCWD : dirfd;
+
+    #ifdef DEBUG_FRONTEND_RISCV
+    fprintf(stderr, "openat: %s\n"
+           "dirfd %ld, flags %ld, mode %ld\n", name, dirfd, flags, mode);
+    #endif
+
+    real_kernelfd = openat(dirfd, name, flags, mode);
+
+    #ifdef DEBUG_FRONTEND_RISCV
+    fprintf(stderr, "got real fd: %d\n", real_kernelfd);
+    #endif
+
+    if (real_kernelfd != -1) {
+        // always give fd 3 to bbl, until we have a better tracking mechanism
+        return 3;
+    }
+    return -1;
+}
+
+
+uint64_t sys_close(HTIFState *htifstate, uint64_t fd) {
+    if (fd != 3) {
+        fprintf(stderr, "INVALID close fd: %ld. only 3 allowed\n", fd);
+        fprintf(stderr, "Did you supply the right kernel using -append?\n");
+        exit(1);
+    }
+
+    if (close(real_kernelfd) < 0) {
+        return -1;
+    }
+    real_kernelfd = -1;
+    return 0;
+}
+
+/*
+ * Used by bbl to print.
+ */
+uint64_t sys_write(HTIFState *htifstate, uint64_t fd, uint64_t pbuf, uint64_t 
len) {
+
+    int i;
+    char * printbuf = malloc(sizeof(char)*(len+1));
+    printbuf[len] = '\0'; // null term for easy printing
+    void * base = htifstate->main_mem_ram_ptr + (uintptr_t)pbuf;
+    for (i = 0; i < len; i++) {
+        printbuf[i] = ldub_p((void*)(base + i));
+    }
+
+    switch(fd) {
+        case 1:
+        case 2:
+            printf("%s", printbuf);
+            break;
+        default:
+            fprintf(stderr, "INVALID SYS_WRITE\n");
+            exit(1);
+    }
+    free(printbuf);
+    return len;
+}
+
+uint64_t sys_pread(HTIFState *htifstate, uint64_t fd, uint64_t pbuf, uint64_t 
len, uint64_t off) {
+    #ifdef DEBUG_FRONTEND_RISCV
+    fprintf(stderr, "read fd: %ld, len: %ld, off: %ld\n", fd, len, off);
+    #endif
+    if (fd != 3) {
+        fprintf(stderr, "INVALID pread fd: %ld. only 3 allowed\n", fd);
+        exit(1);
+    }
+
+    char * buf = malloc(sizeof(char)*len);
+    size_t bytes_read = pread(real_kernelfd, buf, len, off);
+
+    void * base = htifstate->main_mem_ram_ptr + (uintptr_t)pbuf;
+    int i;
+    for (i = 0; i < bytes_read; i++) {
+      stb_p((void*)(base + i), buf[i]);
+    }
+    free(buf);
+    return bytes_read;
+}
+
+uint64_t sys_exit(HTIFState *htifstate, uint64_t code) {
+    printf("sys_exit. code: %ld\n", code << 1 | 1);
+    exit(code << 1 | 1);
+}
+
+uint64_t sys_getmainvars(HTIFState * htifstate, uint64_t pbuf, uint64_t limit) 
{
+    #ifdef DEBUG_FRONTEND_RISCV
+    fprintf(stderr, "%s\n", htifstate->kernel_cmdline);
+    #endif
+
+    void * base = htifstate->main_mem_ram_ptr + (uintptr_t)pbuf;
+
+    // assume args are bbl + some kernel for now
+    // later, do the right thing
+    const char * arg0 = "bbl";
+    const char * arg1 = htifstate->kernel_cmdline;
+
+    #define WORDS_LEN 5
+    #define START_ARGS (WORDS_LEN*8)
+    uint64_t words[WORDS_LEN] = {2, START_ARGS+pbuf, START_ARGS+pbuf+4, 0, 0};
+    int i;
+    for (i = 0; i < WORDS_LEN; i++) {
+        stq_p((void*)(base+i*8), words[i]);
+    }
+    for (i = 0; i < 4; i++) {
+        stb_p((void*)(base + START_ARGS + i), arg0[i]);
+    }
+    for (i = 0; i < strlen(arg1)+1; i++) {
+        stb_p((void*)(base + START_ARGS+4 + i), arg1[i]);
+    }
+    // currently no support for > 2 args
+    return 0;
+}
+
+
+int handle_frontend_syscall(HTIFState *htifstate, uint64_t payload) {
+
+    uint64_t mm[8];
+    int i;
+    void * base = htifstate->main_mem_ram_ptr + (uintptr_t)payload;
+    for (i = 0; i < 8; i++) {
+        mm[i] = ldq_p((void*)(base + i*8));
+    }
+
+    #ifdef DEBUG_FRONTEND_RISCV
+    for (i = 0; i < 8; i++) {
+        fprintf(stderr, "elem %d, val 0x%016lx\n", i, mm[i]);
+    }
+    #endif
+
+    uint64_t retval = -1;
+    switch(mm[0]) {
+        case RV_FSYSCALL_sys_openat:
+            retval = sys_openat(htifstate, mm[1], mm[2], mm[3], mm[4], mm[5]);
+            break;
+        case RV_FSYSCALL_sys_close:
+            retval = sys_close(htifstate, mm[1]);
+            break;
+        case RV_FSYSCALL_sys_write:
+            retval = sys_write(htifstate, mm[1], mm[2], mm[3]);
+            break;
+        case RV_FSYSCALL_sys_pread:
+            retval = sys_pread(htifstate, mm[1], mm[2], mm[3], mm[4]);
+            break;
+        case RV_FSYSCALL_sys_exit:
+            retval = sys_exit(htifstate, mm[1]);
+            break;
+        case RV_FSYSCALL_sys_getmainvars:
+            retval = sys_getmainvars(htifstate, mm[1], mm[2]);
+            break;
+        default:
+            fprintf(stderr, "FRONTEND SYSCALL %ld NOT IMPLEMENTED\n", mm[0]);
+            exit(1);
+    }
+
+    // write retval to mm
+    stq_p((void*)base, retval);
+    return 1;
+}
diff --git a/hw/riscv/htif/htif.c b/hw/riscv/htif/htif.c
new file mode 100644
index 0000000..09d050f
--- /dev/null
+++ b/hw/riscv/htif/htif.c
@@ -0,0 +1,459 @@
+/*
+ * QEMU RISC-V Host Target Interface (HTIF) Emulation
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ * This provides HTIF device emulation for QEMU. At the moment this allows
+ * for identical copies of bbl/linux to run on both spike and QEMU.
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/char/serial.h"
+#include "sysemu/char.h"
+#include "hw/riscv/htif/htif.h"
+#include "hw/riscv/htif/frontend.h"
+#include "qemu/timer.h"
+#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <inttypes.h>
+
+
+#define ENABLE_CHARDEV
+//#define DEBUG_CHARDEV
+//#define DEBUG_BLKDEV
+//#define DEBUG_HTIF
+
+
+#ifdef ENABLE_CHARDEV
+/*
+ * Called by the char dev to see if HTIF is ready to accept input.
+ */
+static int htif_can_recv(void *opaque)
+{
+    return 1;
+}
+
+/*
+ * Called by the char dev to supply input to HTIF console.
+ * We assume that we will receive one character at a time.
+ */
+static void htif_recv(void *opaque, const uint8_t *buf, int size)
+{
+    if (size != 1) {
+        return;
+    }
+
+    HTIFState *htifstate = opaque;
+
+    #ifdef DEBUG_CHARDEV
+    if (htifstate->env->csr[NEW_CSR_MFROMHOST] != 0x0) {
+        fprintf(stderr, "recv handler: fromhost was not ready to accept 
input\n");
+        fprintf(stderr, "recv handler: prev value was: %016lx\n", 
htifstate->env->csr[NEW_CSR_MFROMHOST]);
+    }
+    #endif
+
+    uint64_t val_written = htifstate->pending_read;
+    uint64_t resp = 0x100 | *buf;
+
+    htifstate->env->csr[NEW_CSR_MFROMHOST] = (val_written >> 48 << 48) | (resp 
<< 16 >> 16);
+    qemu_irq_raise(htifstate->irq);
+}
+
+/*
+ * Called by the char dev to supply special events to the HTIF console.
+ * Not used for HTIF.
+ */
+static void htif_event(void *opaque, int event)
+{
+    #ifdef DEBUG_CHARDEV
+    fprintf(stderr, "GOT EVENT: %d\n", event);
+    #endif
+}
+#endif
+
+static void htif_pre_save(void *opaque)
+{
+    return;
+}
+
+static int htif_post_load(void *opaque, int version_id)
+{
+    return 0;
+}
+
+const VMStateDescription vmstate_htif = {
+    .name = "htif",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .pre_save = htif_pre_save,
+    .post_load = htif_post_load,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINT64(tohost_addr, HTIFState),
+        VMSTATE_UINT64(fromhost_addr, HTIFState),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static void dma_strcopy(HTIFState *htifstate, char *str, hwaddr phys_addr) {
+    int i = 0;
+    void* base_copy_addr = htifstate->main_mem_ram_ptr+phys_addr;
+    while(*(str+i)) {
+        stb_p((void*)(base_copy_addr + i), *(str + i));
+        i++;
+    }
+    stb_p((void*)(base_copy_addr + i), 0); // store null term
+}
+
+static int htif_block_device_read(HTIFState *htifstate, uint64_t payload) {
+    request_t req;
+    int i;
+    uint8_t* reqptr = (uint8_t*)&req;
+    void *base = htifstate->main_mem_ram_ptr+payload;
+    for (i = 0; i < sizeof(req); i++) {
+        *(reqptr + i) = ldub_p((void*)(base + i));
+    }
+
+    #ifdef DEBUG_BLKDEV
+    fprintf(stderr, "HTIF Block device read:\n");
+    fprintf(stderr, "-addr: %016lx\n", req.addr);
+    fprintf(stderr, "-offset: %016lx\n", req.offset);
+    fprintf(stderr, "-size: %016lx\n", req.size);
+    fprintf(stderr, "-tag: %016lx\n", req.tag);
+    #endif
+
+    uint8_t * copybuf = malloc(req.size * sizeof(uint8_t));
+    if (pread(htifstate->block_fd, copybuf, req.size, req.offset) != req.size) 
{
+        printf("FAILED READ\n");
+        exit(1);
+    }
+
+    base = htifstate->main_mem_ram_ptr + req.addr;
+
+    for (i = 0; i < req.size; i++) {
+        stb_p((void*)(base + i), copybuf[i]);
+    }
+    free(copybuf);
+    return req.tag;
+}
+
+static int htif_block_device_write(HTIFState *htifstate, uint64_t payload) {
+    request_t req;
+    int i;
+    uint8_t* reqptr = (uint8_t*)&req;
+    void* base = htifstate->main_mem_ram_ptr + payload;
+    for (i = 0; i < sizeof(req); i++) {
+        *(reqptr + i) = ldub_p((void*)(base + i));
+    }
+
+    uint8_t * copybuf = malloc(req.size * sizeof(uint8_t));
+
+    base = htifstate->main_mem_ram_ptr + req.addr;
+    for (i = 0; i < req.size; i++) {
+        copybuf[i] = ldub_p((void*)(base + i));
+    }
+
+    if (pwrite(htifstate->block_fd, copybuf, req.size, req.offset) != 
req.size) {
+        printf("FAILED WRITE\n");
+        exit(1);
+    }
+
+    free(copybuf);
+    return req.tag;
+}
+
+static void htif_handle_tohost_write(HTIFState *htifstate, uint64_t 
val_written) {
+
+    #ifdef DEBUG_HTIF
+    fprintf(stderr, "TOHOST WRITE WITH val 0x%016lx\n", val_written);
+    #endif
+
+    uint8_t device = val_written >> 56;
+    uint8_t cmd = val_written >> 48;
+    uint64_t payload = val_written & 0xFFFFFFFFFFFFULL;
+
+    uint64_t addr = payload >> 8;
+    hwaddr real_addr = (hwaddr)addr;
+    uint8_t what = payload & 0xFF;
+    int resp;
+
+    resp = 0; // stop gcc complaining
+    #ifdef DEBUG_HTIF
+    fprintf(stderr, "mtohost write:\n-device: %d\n-cmd: %d\n-what: 
%02lx\n-payload: %016lx\n", device, cmd, payload & 0xFF, payload);
+    #endif
+
+    /*
+     * Currently, there is a fixed mapping of devices:
+     * 0: Syscall Proxy
+     * 1: Console
+     * 2: Block Device
+     */
+    if (unlikely(device == 0x0)) {
+        // frontend syscall handler
+        if (cmd == 0x0) {
+            #ifdef DEBUG_HTIF
+            fprintf(stderr, "frontend syscall handler\n");
+            #endif
+            if (payload & 0x1) {
+                // test result
+                if (payload >> 1) {
+                    printf("*** FAILED *** (exitcode = %016lx)\n", payload >> 
1);
+                } else {
+                    printf("TEST PASSED\n");
+                }
+                exit(payload >> 1);
+            }
+            resp = handle_frontend_syscall(htifstate, payload);
+        } else if (cmd == 0xFF) {
+            // use what
+            if (what == 0xFF) {
+                #ifdef DEBUG_HTIF
+                fprintf(stderr, "registering name\n");
+                #endif
+                dma_strcopy(htifstate, (char*)"syscall_proxy", real_addr);
+            } else if (what == 0x0) {
+                #ifdef DEBUG_HTIF
+                fprintf(stderr, "registering syscall cmd\n");
+                #endif
+                dma_strcopy(htifstate, (char*)"syscall", real_addr);
+            } else {
+                #ifdef DEBUG_HTIF
+                fprintf(stderr, "registering end of cmds list\n");
+                #endif
+                dma_strcopy(htifstate, (char*)"", real_addr);
+            }
+            resp = 0x1; // write to indicate device name placed
+        } else {
+            fprintf(stderr, "HTIF device %d: UNKNOWN COMMAND\n", device);
+            exit(1);
+        }
+    } else if (likely(device == 0x1)) {
+        // HTIF Console
+        if (cmd == 0x0) {
+            // this should be a queue, but not yet implemented as such
+            htifstate->pending_read = val_written;
+            htifstate->env->csr[NEW_CSR_MTOHOST] = 0; // clear to indicate we 
read
+            return;
+        } else if (cmd == 0x1) {
+            #ifdef ENABLE_CHARDEV
+            qemu_chr_fe_write(htifstate->chr, (uint8_t*)&payload, 1);
+            #endif
+            resp = 0x100 | (uint8_t)payload;
+        } else if (cmd == 0xFF) {
+            // use what
+            if (what == 0xFF) {
+                #ifdef DEBUG_HTIF
+                fprintf(stderr, "registering name\n");
+                #endif
+                dma_strcopy(htifstate, (char*)"bcd", real_addr);
+            } else if (what == 0x0) {
+                #ifdef DEBUG_HTIF
+                fprintf(stderr, "registering read cmd\n");
+                #endif
+                dma_strcopy(htifstate, (char*)"read", real_addr);
+            } else if (what == 0x1) {
+                #ifdef DEBUG_HTIF
+                fprintf(stderr, "registering write cmd\n");
+                #endif
+                dma_strcopy(htifstate, (char*)"write", real_addr);
+            } else {
+                #ifdef DEBUG_HTIF
+                fprintf(stderr, "registering end of cmds list\n");
+                #endif
+                dma_strcopy(htifstate, (char*)"", real_addr);
+            }
+            resp = 0x1; // write to indicate device name placed
+        } else {
+            fprintf(stderr, "HTIF device %d: UNKNOWN COMMAND\n", device);
+            exit(1);
+        }
+    } else if (device == 0x2 && htifstate->block_dev_present) {
+        // HTIF Block Device
+        if (unlikely(cmd == 0xFF)) {
+            if (what == 0xFF) { // register
+                dma_strcopy(htifstate, htifstate->real_name, real_addr);
+            } else if (what == 0x0) {
+                dma_strcopy(htifstate, (char*)"read", real_addr);
+            } else if (what == 0x1) {
+                dma_strcopy(htifstate, (char*)"write", real_addr);
+            } else {
+                dma_strcopy(htifstate, (char*)"", real_addr);
+            }
+            resp = 0x1; // write to indicate device name placed
+        } else if (cmd == 0x0) {
+            #ifdef DEBUG_HTIF
+            fprintf(stderr, "DOING DISK READ\n");
+            #endif
+            resp = htif_block_device_read(htifstate, payload);
+        } else if (cmd == 0x1) {
+            #ifdef DEBUG_HTIF
+            fprintf(stderr, "DOING DISK WRITE\n");
+            #endif
+            resp = htif_block_device_write(htifstate, payload);
+        } else {
+            fprintf(stderr, "HTIF device %d: UNKNOWN COMMAND\n", device);
+            exit(1);
+        }
+    } else if (device == 0x3 && cmd == 0xFF && what == 0xFF) { // all other 
devices
+        #ifdef DEBUG_HTIF
+        fprintf(stderr, "registering no device as last\n");
+        #endif
+        stb_p((void*)(htifstate->main_mem_ram_ptr+real_addr), 0);
+        resp = 0x1; // write to indicate device name placed
+    } else {
+        fprintf(stderr, "HTIF UNKNOWN DEVICE OR COMMAND!\n");
+        fprintf(stderr, "-device: %d\n-cmd: %d\n-what: %02lx\n-payload: 
%016lx\n", device, cmd, payload & 0xFF, payload);
+        exit(1);
+    }
+    while (!htifstate->fromhost_inprogress && 
htifstate->env->csr[NEW_CSR_MFROMHOST] != 0x0) {
+        // wait
+    }
+    htifstate->env->csr[NEW_CSR_MFROMHOST] = (val_written >> 48 << 48) | (resp 
<< 16 >> 16);
+    htifstate->env->csr[NEW_CSR_MTOHOST] = 0; // clear to indicate we read
+    if (htifstate->env->csr[NEW_CSR_MFROMHOST] != 0) {
+        // raise HTIF interrupt
+        qemu_irq_raise(htifstate->irq);
+    }
+}
+
+// CPU wants to read an HTIF register
+static uint64_t htif_mm_read(void *opaque, hwaddr addr, unsigned size)
+{
+    HTIFState *htifstate = opaque;
+    if (addr == 0x0) {
+        return htifstate->env->csr[NEW_CSR_MTOHOST] & 0xFFFFFFFF;
+    } else if (addr == 0x4) {
+        return (htifstate->env->csr[NEW_CSR_MTOHOST] >> 32) & 0xFFFFFFFF;
+    } else if (addr == 0x8) {
+        return htifstate->env->csr[NEW_CSR_MFROMHOST] & 0xFFFFFFFF;
+    } else if (addr == 0xc) {
+        return (htifstate->env->csr[NEW_CSR_MFROMHOST] >> 32) & 0xFFFFFFFF;
+    } else {
+        printf("Invalid htif register address %016lx\n", (uint64_t)addr);
+        exit(1);
+    }
+}
+
+// CPU wrote to an HTIF register
+static void htif_mm_write(void *opaque, hwaddr addr,
+                            uint64_t value, unsigned size)
+{
+    HTIFState *htifstate = opaque;
+    if (addr == 0x0) {
+        if (htifstate->env->csr[NEW_CSR_MTOHOST] == 0x0) {
+            htifstate->allow_tohost = 1;
+            htifstate->env->csr[NEW_CSR_MTOHOST] = value & 0xFFFFFFFF;
+        } else {
+            htifstate->allow_tohost = 0;
+        }
+    } else if (addr == 0x4) {
+        if (htifstate->allow_tohost) {
+            htifstate->env->csr[NEW_CSR_MTOHOST] |= value << 32;
+            htif_handle_tohost_write(htifstate, 
htifstate->env->csr[NEW_CSR_MTOHOST]);
+        }
+    } else if (addr == 0x8) {
+        htifstate->fromhost_inprogress = 1;
+        htifstate->env->csr[NEW_CSR_MFROMHOST] = value & 0xFFFFFFFF;
+    } else if (addr == 0xc) {
+        htifstate->env->csr[NEW_CSR_MFROMHOST] |= value << 32;
+        if (htifstate->env->csr[NEW_CSR_MFROMHOST] == 0x0) {
+            qemu_irq_lower(htifstate->irq);
+        }
+        htifstate->fromhost_inprogress = 0;
+    } else {
+        printf("Invalid htif register address %016lx\n", (uint64_t)addr);
+        exit(1);
+    }
+}
+
+static const MemoryRegionOps htif_mm_ops[3] = {
+    [DEVICE_LITTLE_ENDIAN] = {
+        .read = htif_mm_read,
+        .write = htif_mm_write,
+        .endianness = DEVICE_LITTLE_ENDIAN,
+    },
+};
+
+HTIFState *htif_mm_init(MemoryRegion *address_space, hwaddr base, qemu_irq irq,
+                        MemoryRegion *main_mem, const char* htifbd_fname,
+                                            const char *kernel_cmdline,
+                                            CPURISCVState *env,
+                                            CharDriverState *chr)
+{
+    // TODO: cleanup the constant buffer sizes
+    HTIFState *htifstate;
+    size_t size;
+    char *rname;
+    char size_str_buf[400];
+
+    htifstate = g_malloc0(sizeof(HTIFState));
+    rname = g_malloc0(sizeof(char)*500);
+    htifstate->tohost_addr = base;
+    htifstate->fromhost_addr = base + 0x8;
+    htifstate->irq = irq;
+    htifstate->address_space = address_space;
+    htifstate->main_mem = main_mem;
+    htifstate->main_mem_ram_ptr = memory_region_get_ram_ptr(main_mem);
+    htifstate->env = env;
+    htifstate->chr = chr;
+    htifstate->pending_read = 0;
+    htifstate->allow_tohost = 0;
+    htifstate->fromhost_inprogress = 0;
+
+#ifdef ENABLE_CHARDEV
+    qemu_chr_add_handlers(htifstate->chr, htif_can_recv, htif_recv, 
htif_event, htifstate);
+#endif
+
+    vmstate_register(NULL, base, &vmstate_htif, htifstate);
+
+    memory_region_init_io(&htifstate->io, NULL, 
&htif_mm_ops[DEVICE_LITTLE_ENDIAN],
+            htifstate, "htif", 16 /* 2 64-bit registers */);
+    memory_region_add_subregion(address_space, base, &htifstate->io);
+
+    // save kernel_cmdline for sys_getmainvars
+    htifstate->kernel_cmdline = malloc(strlen(kernel_cmdline)+1);
+    strcpy(htifstate->kernel_cmdline, kernel_cmdline);
+
+    if (NULL == htifbd_fname) { // NULL means no -hda specified
+        htifstate->block_dev_present = 0;
+        return htifstate;
+    }
+
+    htifstate->block_fname = htifbd_fname;
+    htifstate->block_fd = open(htifstate->block_fname, O_RDWR);
+
+    struct stat st;
+    if (fstat(htifstate->block_fd, &st) < 0) {
+        fprintf(stderr, "WARN: Could not stat %s, continuing without block 
device.\n",
+                htifstate->block_fname);
+        htifstate->block_dev_present = 0;
+        return htifstate;
+    }
+    size = st.st_size;
+    strcpy(rname, "disk size=");
+    snprintf(size_str_buf, sizeof(size_str_buf), "%zu", size);
+    strcat(rname, size_str_buf);
+    htifstate->real_name = rname;
+    htifstate->block_dev_present = 1;
+    return htifstate;
+}
diff --git a/hw/riscv/riscv_board.c b/hw/riscv/riscv_board.c
new file mode 100644
index 0000000..9c8c9cd
--- /dev/null
+++ b/hw/riscv/riscv_board.c
@@ -0,0 +1,330 @@
+/*
+ * QEMU RISC-V Generic Board Support
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ * This provides a RISC-V Board with the following devices:
+ *
+ * 0) HTIF Syscall Proxy
+ * 1) HTIF Console
+ * 2) HTIF Block Device
+ *
+ * These are created by htif_mm_init below.
+ *
+ * The following "Shim" devices allow support for interrupts triggered by the
+ * processor itself (writes to the MIP/SIP CSRs):
+ *
+ * softint0 - SSIP
+ * softint1 - STIP
+ * softint2 - MSIP
+ *
+ * These are created by softint_mm_init below.
+ *
+ * This board currently uses a hardcoded devicetree that indicates one hart
+ * and 2048 MB of memory.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/hw.h"
+#include "hw/i386/pc.h"
+#include "hw/char/serial.h"
+#include "hw/riscv/softint.h"
+#include "hw/riscv/htif/htif.h"
+#include "hw/riscv/htif/frontend.h"
+#include "hw/block/fdc.h"
+#include "net/net.h"
+#include "hw/boards.h"
+#include "hw/i2c/smbus.h"
+#include "block/block.h"
+#include "hw/block/flash.h"
+#include "block/block_int.h" // move later
+#include "hw/riscv/riscv.h"
+#include "hw/riscv/cpudevs.h"
+#include "hw/pci/pci.h"
+#include "sysemu/char.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/arch_init.h"
+#include "qemu/log.h"
+#include "hw/riscv/bios.h"
+#include "hw/ide.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "hw/timer/mc146818rtc.h"
+#include "hw/timer/i8254.h"
+#include "sysemu/blockdev.h"
+#include "exec/address-spaces.h"
+#include "hw/sysbus.h"             /* SysBusDevice */
+#include "qemu/host-utils.h"
+#include "sysemu/qtest.h"
+#include "qemu/error-report.h"
+#include "hw/empty_slot.h"
+#include "qemu/error-report.h"
+#include "sysemu/block-backend.h"
+
+#define TYPE_RISCV_BOARD "riscv-board"
+#define RISCV_BOARD(obj) OBJECT_CHECK(BoardState, (obj), TYPE_RISCV_BOARD)
+
+// TODO: once device tree format is finalized, don't hardcode
+#define DEVTREE_LEN 65944
+char devtree[DEVTREE_LEN] = {
+0xd0,
+0x0d, 0xfe, 0xed, 0x00, 0x00, 0x01, 0x98, 0x00, 0x00, 0x00,
+0x38, 0x00, 0x00, 0x01, 0x58, 0x00, 0x00, 0x00, 0x28, 0x00,
+0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x01, 0x20, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+0x0f, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00,
+0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1b, 0x53, 0x70, 0x69,
+0x6b, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x6d,
+0x65, 0x6d, 0x6f, 0x72, 0x79, 0x40, 0x30, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00,
+0x00, 0x00, 0x21, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00,
+0x00, 0x00, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x63, 0x70, 0x75,
+0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00,
+0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00,
+0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+0x01, 0x63, 0x70, 0x75, 0x40, 0x38, 0x30, 0x30, 0x30, 0x31,
+0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x21, 0x63,
+0x70, 0x75, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+0x06, 0x00, 0x00, 0x00, 0x31, 0x72, 0x69, 0x73, 0x63, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+0x0b, 0x00, 0x00, 0x00, 0x3c, 0x72, 0x76, 0x36, 0x34, 0x69,
+0x6d, 0x61, 0x66, 0x64, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x2d, 0x00,
+0x00, 0x00, 0x00, 0x80, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
+0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00,
+0x00, 0x00, 0x09, 0x23, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
+0x73, 0x2d, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x00, 0x23, 0x73,
+0x69, 0x7a, 0x65, 0x2d, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x00,
+0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x00, 0x64, 0x65, 0x76, 0x69,
+0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x00, 0x72, 0x65,
+0x67, 0x00, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62,
+0x6c, 0x65, 0x00, 0x69, 0x73, 0x61, 0x00,
+};
+
+
+
+typedef struct {
+    SysBusDevice parent_obj;
+} BoardState;
+
+static struct _loaderparams {
+    int ram_size;
+    const char *kernel_filename;
+    const char *kernel_cmdline;
+    const char *initrd_filename;
+} loaderparams;
+
+uint64_t identity_translate(void *opaque, uint64_t addr)
+{
+    return addr;
+}
+
+static int64_t load_kernel (void)
+{
+    int64_t kernel_entry, kernel_high;
+    int big_endian;
+    big_endian = 0;
+
+    if (load_elf(loaderparams.kernel_filename, identity_translate, NULL,
+                 (uint64_t *)&kernel_entry, NULL, (uint64_t *)&kernel_high,
+                 big_endian, ELF_MACHINE, 1) < 0) {
+        fprintf(stderr, "qemu: could not load kernel '%s'\n",
+                loaderparams.kernel_filename);
+        exit(1);
+    }
+    return kernel_entry;
+}
+
+static void main_cpu_reset(void *opaque)
+{
+    RISCVCPU *cpu = opaque;
+    cpu_reset(CPU(cpu));
+}
+
+/* hack for now to set memory size without implementing devicetree generation
+ * modifies memory size and addresses */
+static void set_devtree_memsize(uint64_t memsize)
+{
+
+    uint64_t addr1 = memsize | 0x1000;
+
+    if (memsize <= 0x1000) {
+        fprintf(stderr, "Warning: Insufficient memsize for bbl. If you are not 
"
+                        "using bbl, you may disregard this message\n");
+    }
+
+    int i;
+    for (i = 0; i < 8; i++) {
+        devtree[179-i] = (memsize >> (i*8)) & 0xFF;
+        devtree[327-i] = (addr1 >> (i*8)) & 0xFF;
+    }
+}
+
+static void riscv_board_init(MachineState *args)
+{
+    ram_addr_t ram_size = args->ram_size;
+    const char *cpu_model = args->cpu_model;
+    const char *kernel_filename = args->kernel_filename;
+    const char *kernel_cmdline = args->kernel_cmdline;
+    const char *initrd_filename = args->initrd_filename;
+    MemoryRegion *system_memory = get_system_memory();
+    MemoryRegion *main_mem = g_new(MemoryRegion, 1);
+    RISCVCPU *cpu;
+    CPURISCVState *env;
+    int i;
+
+    DriveInfo *htifbd_drive;
+    const char *htifbd_fname; // htif block device filename
+
+    DeviceState *dev = qdev_create(NULL, TYPE_RISCV_BOARD);
+
+    object_property_set_bool(OBJECT(dev), true, "realized", NULL);
+
+    /* Make sure the first 3 serial ports are associated with a device. */
+    for(i = 0; i < 3; i++) {
+        if (!serial_hds[i]) {
+            char label[32];
+            snprintf(label, sizeof(label), "serial%d", i);
+            serial_hds[i] = qemu_chr_new(label, "null", NULL);
+        }
+    }
+
+    /* init CPUs */
+    if (cpu_model == NULL) {
+        cpu_model = "riscv-generic";
+    }
+
+    for (i = 0; i < smp_cpus; i++) {
+        cpu = cpu_riscv_init(cpu_model);
+        if (cpu == NULL) {
+            fprintf(stderr, "Unable to find CPU definition\n");
+            exit(1);
+        }
+        env = &cpu->env;
+
+        /* Init internal devices */
+        cpu_riscv_irq_init_cpu(env);
+        cpu_riscv_clock_init(env);
+        qemu_register_reset(main_cpu_reset, cpu);
+    }
+    cpu = RISCV_CPU(first_cpu);
+    env = &cpu->env;
+
+    /* register system main memory (actual RAM) */
+    memory_region_init_ram(main_mem, NULL, "riscv_board.ram", ram_size + 
DEVTREE_LEN, &error_fatal);
+    // for CSR_MIOBASE
+    env->memsize = ram_size;
+    vmstate_register_ram_global(main_mem);
+    memory_region_add_subregion(system_memory, 0x0, main_mem);
+
+    if (kernel_filename) {
+        loaderparams.ram_size = ram_size;
+        loaderparams.kernel_filename = kernel_filename;
+        loaderparams.kernel_cmdline = kernel_cmdline;
+        loaderparams.initrd_filename = initrd_filename;
+        load_kernel();
+    }
+
+    // TODO: still necessary?
+    // write memory amount in MiB to 0x0
+    //stl_p(memory_region_get_ram_ptr(main_mem), ram_size >> 20);
+
+    // set memory size in devicetree
+    set_devtree_memsize(ram_size);
+
+    // copy in the devtree
+    int q;
+    for (q = 0; q < DEVTREE_LEN; q++) {
+        stb_p(memory_region_get_ram_ptr(main_mem)+ram_size+q, devtree[q]);
+    }
+
+    // add serial device 0x3f8-0x3ff
+    // serial_mm_init(system_memory, 0xF0000400, 0, env->irq[5], 1843200/16,
+    //         serial_hds[0], DEVICE_NATIVE_ENDIAN);
+
+    // setup HTIF Block Device if one is specified as -hda FILENAME
+    htifbd_drive = drive_get_by_index(IF_IDE, 0);
+    if (NULL == htifbd_drive) {
+        htifbd_fname = NULL;
+    } else {
+        htifbd_fname = blk_bs(blk_by_legacy_dinfo(htifbd_drive))->filename;
+        // get rid of orphaned drive warning, until htif uses the real blockdev
+        htifbd_drive->is_default = true;
+    }
+
+    // add htif device at 0xFFFFFFFFF0000000
+    htif_mm_init(system_memory, 0xFFFFFFFFF0000000L, env->irq[4], main_mem,
+            htifbd_fname, kernel_cmdline, env, serial_hds[0]);
+
+    // Softint "devices" for cleaner handling of CPU-triggered interrupts
+    softint_mm_init(system_memory, 0xFFFFFFFFF0000020L, env->irq[1], main_mem,
+            env, "SSIP");
+    softint_mm_init(system_memory, 0xFFFFFFFFF0000040L, env->irq[2], main_mem,
+            env, "STIP");
+    softint_mm_init(system_memory, 0xFFFFFFFFF0000060L, env->irq[3], main_mem,
+            env, "MSIP");
+
+    // TODO: VIRTIO
+}
+
+static int riscv_board_sysbus_device_init(SysBusDevice *sysbusdev)
+{
+    return 0;
+}
+
+static void riscv_board_class_init(ObjectClass *klass, void *data)
+{
+    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+    k->init = riscv_board_sysbus_device_init;
+}
+
+static const TypeInfo riscv_board_device = {
+    .name          = TYPE_RISCV_BOARD,
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(BoardState),
+    .class_init    = riscv_board_class_init,
+};
+
+static void riscv_board_machine_init(MachineClass *mc)
+{
+    mc->desc = "RISC-V Generic Board";
+    mc->init = riscv_board_init;
+    mc->max_cpus = 1;
+    mc->is_default = 1;
+}
+
+DEFINE_MACHINE("riscv", riscv_board_machine_init)
+
+static void riscv_board_register_types(void)
+{
+    type_register_static(&riscv_board_device);
+}
+
+type_init(riscv_board_register_types)
diff --git a/hw/riscv/riscv_int.c b/hw/riscv/riscv_int.c
new file mode 100644
index 0000000..1d2cacc
--- /dev/null
+++ b/hw/riscv/riscv_int.c
@@ -0,0 +1,84 @@
+/*
+ * QEMU RISC-V - QEMU IRQ Support
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/hw.h"
+#include "hw/riscv/cpudevs.h"
+#include "cpu.h"
+
+/* irq request function, called in hw/irq.h by qemu_irq_raise (level = 1),
+ * qemu_irq_lower (level = 0), qemu_irq_pulse (level = 1, then 0)
+ *
+ * The device will call this once to raise the interrupt line and once to
+ * lower the interrupt line for level-trigerring
+ *
+ */
+static void cpu_riscv_irq_request(void *opaque, int irq, int level)
+{
+    // This "irq" number is not a real irq number, just some set of numbers
+    // we choose. These are not the same irq numbers visible to the processor.
+
+    RISCVCPU *cpu = opaque;
+    CPURISCVState *env = &cpu->env;
+    CPUState *cs = CPU(cpu);
+
+    // current irqs:
+    // 7: Machine Timer. MIP_MTIP should have already been set
+    // 4: Host Interrupt. mfromhost should have a nonzero value
+    // 3, 2, 1: Interrupts triggered by the CPU. At least one of
+    //    MIP_STIP, MIP_SSIP, MIP_MSIP should already be set
+    if (unlikely(irq != 7 && !(irq < 5 && irq > 0))) {
+        fprintf(stderr, "Unused IRQ was raised.\n");
+        exit(1);
+    }
+
+    if (level) {
+        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+    } else {
+        if (!env->csr[NEW_CSR_MIP] && !env->csr[NEW_CSR_MFROMHOST]) {
+            // no interrupts pending, no host interrupt for HTIF, reset
+            cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+        }
+    }
+}
+
+void cpu_riscv_irq_init_cpu(CPURISCVState *env)
+{
+    qemu_irq *qi;
+    int i;
+
+    qi = qemu_allocate_irqs(cpu_riscv_irq_request, riscv_env_get_cpu(env), 8);
+    for (i = 0; i < 8; i++) {
+        env->irq[i] = qi[i];
+    }
+}
+
+void cpu_riscv_soft_irq(CPURISCVState *env, int irq, int level)
+{
+    printf("NOT USED for RISC-V\n");
+    exit(1);
+    if (irq != 0) {
+        return;
+    }
+    qemu_set_irq(env->irq[irq], level);
+}
diff --git a/hw/riscv/softint.c b/hw/riscv/softint.c
new file mode 100644
index 0000000..0d50fad
--- /dev/null
+++ b/hw/riscv/softint.c
@@ -0,0 +1,121 @@
+/*
+ * QEMU RISC-V Soft Interrupt Emulation
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ * This module provides shim devices that allow support for interrupts
+ * triggered by the RISC-V processor itself (writes to the MIP/SIP CSRs):
+ *
+ * The following instantiations are enabled by default in riscv_board:
+ *
+ * softint0 - SSIP
+ * softint1 - STIP
+ * softint2 - MSIP
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/riscv/softint.h"
+#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <inttypes.h>
+
+
+static void softint_pre_save(void *opaque)
+{
+    return;
+}
+
+static int softint_post_load(void *opaque, int version_id)
+{
+    return 0;
+}
+
+const VMStateDescription vmstate_softint = {
+    .name = "softint",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .pre_save = softint_pre_save,
+    .post_load = softint_post_load,
+    .fields      = (VMStateField []) {
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+// CPU wants to read an Softint register. Should not happen.
+static uint64_t softint_mm_read(void *opaque, hwaddr addr, unsigned size)
+{
+    fprintf(stderr, "Unimplemented read softint\n");
+    exit(1);
+}
+
+// CPU wrote to an Softint register
+static void softint_mm_write(void *opaque, hwaddr addr,
+                            uint64_t value, unsigned size)
+{
+    SoftintState *softintstate = opaque;
+
+    if (addr == 0x0) {
+        if (value != 0) {
+            qemu_irq_raise(softintstate->irq);
+        } else {
+            qemu_irq_lower(softintstate->irq);
+        }
+    } else {
+        fprintf(stderr, "Invalid softint register address %016lx\n", 
(uint64_t)addr);
+        exit(1);
+    }
+}
+
+static const MemoryRegionOps softint_mm_ops[3] = {
+    [DEVICE_LITTLE_ENDIAN] = {
+        .read = softint_mm_read,
+        .write = softint_mm_write,
+        .endianness = DEVICE_LITTLE_ENDIAN,
+    },
+};
+
+SoftintState *softint_mm_init(MemoryRegion *address_space, hwaddr base, 
qemu_irq irq,
+                        MemoryRegion *main_mem, CPURISCVState *env, const char 
* name)
+{
+    // TODO: cleanup the constant buffer sizes
+    SoftintState *softintstate;
+
+    softintstate = g_malloc0(sizeof(SoftintState));
+    softintstate->irq = irq;
+    softintstate->address_space = address_space;
+    softintstate->env = env;
+
+    char * badbuf = g_malloc0(sizeof(char)*100);
+    sprintf(badbuf, "%s%s", "softint", name);
+    softintstate->name = badbuf;
+
+    vmstate_register(NULL, base, &vmstate_softint, softintstate);
+
+    memory_region_init_io(&softintstate->io, NULL,
+            &softint_mm_ops[DEVICE_LITTLE_ENDIAN],
+            softintstate, badbuf, 4 /* 1 32-bit register */);
+    memory_region_add_subregion(address_space, base, &softintstate->io);
+
+    return softintstate;
+}
diff --git a/include/disas/bfd.h b/include/disas/bfd.h
index a112e9c..ae37e8c 100644
--- a/include/disas/bfd.h
+++ b/include/disas/bfd.h
@@ -415,6 +415,7 @@ int print_insn_crisv10          (bfd_vma, 
disassemble_info*);
 int print_insn_microblaze       (bfd_vma, disassemble_info*);
 int print_insn_ia64             (bfd_vma, disassemble_info*);
 int print_insn_lm32             (bfd_vma, disassemble_info*);
+int print_insn_riscv            (bfd_vma, disassemble_info*);
 
 #if 0
 /* Fetch the disassembler for a given BFD, if that support is available.  */
diff --git a/include/elf.h b/include/elf.h
index 66add81..d24836d 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -112,6 +112,8 @@ typedef int64_t  Elf64_Sxword;
 
 #define EM_UNICORE32    110     /* UniCore32 */
 
+#define EM_RISCV       243     /* RISC-V */
+
 /*
  * This is an interim value that we will use until the committee comes
  * up with a final number.
diff --git a/include/exec/poison.h b/include/exec/poison.h
index a4b1eca..7012c51 100644
--- a/include/exec/poison.h
+++ b/include/exec/poison.h
@@ -19,6 +19,7 @@
 #pragma GCC poison TARGET_PPCEMB
 #pragma GCC poison TARGET_PPC64
 #pragma GCC poison TARGET_ABI32
+#pragma GCC poison TARGET_RISCV
 #pragma GCC poison TARGET_SH4
 #pragma GCC poison TARGET_SPARC
 #pragma GCC poison TARGET_SPARC64
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
index 3b67462..d13f468 100644
--- a/include/exec/user/thunk.h
+++ b/include/exec/user/thunk.h
@@ -121,7 +121,7 @@ static inline int thunk_type_size(const argtype *type_ptr, 
int is_host)
 #if defined(TARGET_X86_64)
             return 8;
 #elif defined(TARGET_ALPHA) || defined(TARGET_IA64) || defined(TARGET_MIPS) || 
\
-      defined(TARGET_PARISC) || defined(TARGET_SPARC64)
+      defined(TARGET_PARISC) || defined(TARGET_SPARC64) || 
defined(TARGET_RISCV)
             return 4;
 #elif defined(TARGET_PPC)
             return TARGET_ABI_BITS / 8;
diff --git a/include/hw/riscv/bios.h b/include/hw/riscv/bios.h
new file mode 100644
index 0000000..4e64818
--- /dev/null
+++ b/include/hw/riscv/bios.h
@@ -0,0 +1,4 @@
+#include "cpu.h"
+
+#define BIOS_SIZE (4 * 1024 * 1024)
+#define BIOS_FILENAME "riscv_bios.bin"
diff --git a/include/hw/riscv/cpudevs.h b/include/hw/riscv/cpudevs.h
new file mode 100644
index 0000000..54cee6d
--- /dev/null
+++ b/include/hw/riscv/cpudevs.h
@@ -0,0 +1,14 @@
+#ifndef HW_RISCV_CPUDEVS_H
+#define HW_RISCV_CPUDEVS_H
+/* Definitions for RISCV CPU internal devices.  */
+
+/* riscv_board.c */
+uint64_t identity_translate(void *opaque, uint64_t addr);
+
+/* riscv_int.c */
+void cpu_riscv_irq_init_cpu(CPURISCVState *env);
+
+/* cputimer.c */
+void cpu_riscv_clock_init(CPURISCVState *);
+
+#endif
diff --git a/include/hw/riscv/cputimer.h b/include/hw/riscv/cputimer.h
new file mode 100644
index 0000000..5ea57a3
--- /dev/null
+++ b/include/hw/riscv/cputimer.h
@@ -0,0 +1,4 @@
+uint64_t rtc_read(CPURISCVState *env);
+uint64_t rtc_read_with_delta(CPURISCVState *env);
+uint64_t instret_read(CPURISCVState *env);
+inline uint64_t instret_read_with_delta(CPURISCVState *env);
diff --git a/include/hw/riscv/htif/frontend.h b/include/hw/riscv/htif/frontend.h
new file mode 100644
index 0000000..7c26ac6
--- /dev/null
+++ b/include/hw/riscv/htif/frontend.h
@@ -0,0 +1,30 @@
+#ifndef HW_RISCV_FRONTEND_H
+#define HW_RISCV_FRONTEND_H 1
+
+#include "hw/hw.h"
+#include "sysemu/sysemu.h"
+#include "exec/memory.h"
+
+
+#define RV_FSYSCALL_sys_openat 56
+#define RV_FSYSCALL_sys_close 57
+#define RV_FSYSCALL_sys_write 64
+#define RV_FSYSCALL_sys_pread 67
+#define RV_FSYSCALL_sys_exit  93
+#define RV_FSYSCALL_sys_getmainvars 2011
+
+uint64_t sys_openat(HTIFState *htifstate, uint64_t dirfd, uint64_t pname, 
uint64_t len, uint64_t flags, uint64_t mode);
+
+uint64_t sys_close(HTIFState *htifstate, uint64_t fd);
+
+uint64_t sys_write(HTIFState *htifstate, uint64_t fd, uint64_t pbuf, uint64_t 
len);
+
+uint64_t sys_pread(HTIFState *htifstate, uint64_t fd, uint64_t pbuf, uint64_t 
len, uint64_t off);
+
+uint64_t sys_exit(HTIFState *htifstate, uint64_t code);
+
+int handle_frontend_syscall(HTIFState * htifstate, uint64_t payload);
+
+uint64_t sys_getmainvars(HTIFState * htifstate, uint64_t pbuf, uint64_t limit);
+
+#endif
diff --git a/include/hw/riscv/htif/htif.h b/include/hw/riscv/htif/htif.h
new file mode 100644
index 0000000..6acdcf5
--- /dev/null
+++ b/include/hw/riscv/htif/htif.h
@@ -0,0 +1,76 @@
+/*
+ * QEMU RISCV Host Target Interface (HTIF) Emulation
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef HW_RISCV_HTIF_H
+#define HW_RISCV_HTIF_H 1
+
+#include "hw/hw.h"
+#include "sysemu/sysemu.h"
+#include "exec/memory.h"
+
+typedef struct HTIFState HTIFState;
+
+struct HTIFState {
+    int allow_tohost;
+    int fromhost_inprogress;
+
+    hwaddr tohost_addr;
+    hwaddr fromhost_addr;
+    qemu_irq irq; // host interrupt line
+    MemoryRegion io;
+    MemoryRegion* address_space;
+    MemoryRegion* main_mem;
+    void* main_mem_ram_ptr;
+
+    CPURISCVState *env;
+    CharDriverState *chr;
+    uint64_t pending_read;
+
+
+    int block_dev_present;
+    // TODO: eventually move the following to a separate HTIF block device 
driver
+    const char *block_fname;
+    int block_fd;
+    char *real_name;
+    char *kernel_cmdline; // for sys_getmainvars
+};
+
+typedef struct request_t request_t;
+
+struct request_t
+{ 
+    uint64_t addr;
+    uint64_t offset;
+    uint64_t size;
+    uint64_t tag;
+};
+
+extern const VMStateDescription vmstate_htif;
+extern const MemoryRegionOps htif_io_ops;
+
+/* legacy pre qom */
+HTIFState *htif_mm_init(MemoryRegion *address_space, hwaddr base, 
+                    qemu_irq irq, MemoryRegion *main_mem, const char 
*htifbd_fname,
+                    const char *kernel_cmdline, CPURISCVState *env,
+                    CharDriverState *chr);
+
+#endif
diff --git a/include/hw/riscv/riscv.h b/include/hw/riscv/riscv.h
new file mode 100644
index 0000000..31adb20
--- /dev/null
+++ b/include/hw/riscv/riscv.h
@@ -0,0 +1,10 @@
+#ifndef HW_RISCV_H
+#define HW_RISCV_H
+/* Definitions for riscv board emulation.  */
+
+/* Kernels can be configured with 64KB pages */
+#define INITRD_PAGE_MASK (~((1 << 16) - 1))
+
+#include "exec/memory.h"
+
+#endif
diff --git a/include/hw/riscv/softint.h b/include/hw/riscv/softint.h
new file mode 100644
index 0000000..4c76f6b
--- /dev/null
+++ b/include/hw/riscv/softint.h
@@ -0,0 +1,50 @@
+/*
+ * QEMU RISCV Soft Interrupt Emulation
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef HW_RISCV_SOFTINT_H
+#define HW_RISCV_SOFTINT_H 1
+
+#include "hw/hw.h"
+#include "sysemu/sysemu.h"
+#include "exec/memory.h"
+
+typedef struct SoftintState SoftintState;
+
+struct SoftintState {
+    qemu_irq irq; // host interrupt line
+    MemoryRegion io;
+    MemoryRegion* address_space;
+
+    CPURISCVState *env;
+    CharDriverState *chr;
+    char * name;
+};
+
+extern const VMStateDescription vmstate_softint;
+extern const MemoryRegionOps softint_io_ops;
+
+/* legacy pre qom */
+SoftintState *softint_mm_init(MemoryRegion *address_space, hwaddr base, 
+                    qemu_irq irq, MemoryRegion *main_mem, CPURISCVState *env,
+                    const char * name);
+
+#endif
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
index c38892f..4d8c83a 100644
--- a/include/sysemu/arch_init.h
+++ b/include/sysemu/arch_init.h
@@ -23,6 +23,7 @@ enum {
     QEMU_ARCH_UNICORE32 = (1 << 14),
     QEMU_ARCH_MOXIE = (1 << 15),
     QEMU_ARCH_TRICORE = (1 << 16),
+    QEMU_ARCH_RISCV = (1 << 17),
 };
 
 extern const uint32_t arch_type;
diff --git a/target-riscv/Makefile.objs b/target-riscv/Makefile.objs
new file mode 100644
index 0000000..6219b71
--- /dev/null
+++ b/target-riscv/Makefile.objs
@@ -0,0 +1,114 @@
+obj-y += fpu-custom-riscv/f32_add.o
+obj-y += fpu-custom-riscv/f32_classify.o
+obj-y += fpu-custom-riscv/f32_div.o
+obj-y += fpu-custom-riscv/f32_eq.o
+obj-y += fpu-custom-riscv/f32_eq_signaling.o
+obj-y += fpu-custom-riscv/f32_isSignalingNaN.o
+obj-y += fpu-custom-riscv/f32_le.o
+obj-y += fpu-custom-riscv/f32_le_quiet.o
+obj-y += fpu-custom-riscv/f32_lt.o
+obj-y += fpu-custom-riscv/f32_lt_quiet.o
+obj-y += fpu-custom-riscv/f32_mulAdd.o
+obj-y += fpu-custom-riscv/f32_mul.o
+obj-y += fpu-custom-riscv/f32_rem.o
+obj-y += fpu-custom-riscv/f32_roundToInt.o
+obj-y += fpu-custom-riscv/f32_sqrt.o
+obj-y += fpu-custom-riscv/f32_sub.o
+obj-y += fpu-custom-riscv/f32_to_f64.o
+obj-y += fpu-custom-riscv/f32_to_i32.o
+obj-y += fpu-custom-riscv/f32_to_i32_r_minMag.o
+obj-y += fpu-custom-riscv/f32_to_i64.o
+obj-y += fpu-custom-riscv/f32_to_i64_r_minMag.o
+obj-y += fpu-custom-riscv/f32_to_ui32.o
+obj-y += fpu-custom-riscv/f32_to_ui32_r_minMag.o
+obj-y += fpu-custom-riscv/f32_to_ui64.o
+obj-y += fpu-custom-riscv/f32_to_ui64_r_minMag.o
+obj-y += fpu-custom-riscv/f64_add.o
+obj-y += fpu-custom-riscv/f64_classify.o
+obj-y += fpu-custom-riscv/f64_div.o
+obj-y += fpu-custom-riscv/f64_eq.o
+obj-y += fpu-custom-riscv/f64_eq_signaling.o
+obj-y += fpu-custom-riscv/f64_isSignalingNaN.o
+obj-y += fpu-custom-riscv/f64_le.o
+obj-y += fpu-custom-riscv/f64_le_quiet.o
+obj-y += fpu-custom-riscv/f64_lt.o
+obj-y += fpu-custom-riscv/f64_lt_quiet.o
+obj-y += fpu-custom-riscv/f64_mulAdd.o
+obj-y += fpu-custom-riscv/f64_mul.o
+obj-y += fpu-custom-riscv/f64_rem.o
+obj-y += fpu-custom-riscv/f64_roundToInt.o
+obj-y += fpu-custom-riscv/f64_sqrt.o
+obj-y += fpu-custom-riscv/f64_sub.o
+obj-y += fpu-custom-riscv/f64_to_f32.o
+obj-y += fpu-custom-riscv/f64_to_i32.o
+obj-y += fpu-custom-riscv/f64_to_i32_r_minMag.o
+obj-y += fpu-custom-riscv/f64_to_i64.o
+obj-y += fpu-custom-riscv/f64_to_i64_r_minMag.o
+obj-y += fpu-custom-riscv/f64_to_ui32.o
+obj-y += fpu-custom-riscv/f64_to_ui32_r_minMag.o
+obj-y += fpu-custom-riscv/f64_to_ui64.o
+obj-y += fpu-custom-riscv/f64_to_ui64_r_minMag.o
+obj-y += fpu-custom-riscv/i32_to_f32.o
+obj-y += fpu-custom-riscv/i32_to_f64.o
+obj-y += fpu-custom-riscv/i64_to_f32.o
+obj-y += fpu-custom-riscv/i64_to_f64.o
+obj-y += fpu-custom-riscv/s_add128.o
+obj-y += fpu-custom-riscv/s_add192.o
+obj-y += fpu-custom-riscv/s_addMagsF32.o
+obj-y += fpu-custom-riscv/s_addMagsF64.o
+obj-y += fpu-custom-riscv/s_commonNaNToF32UI.o
+obj-y += fpu-custom-riscv/s_commonNaNToF64UI.o
+obj-y += fpu-custom-riscv/s_countLeadingZeros32.o
+obj-y += fpu-custom-riscv/s_countLeadingZeros64.o
+obj-y += fpu-custom-riscv/s_countLeadingZeros8.o
+obj-y += fpu-custom-riscv/s_eq128.o
+obj-y += fpu-custom-riscv/s_estimateDiv128To64.o
+obj-y += fpu-custom-riscv/s_estimateSqrt32.o
+obj-y += fpu-custom-riscv/s_f32UIToCommonNaN.o
+obj-y += fpu-custom-riscv/s_f64UIToCommonNaN.o
+obj-y += fpu-custom-riscv/s_isSigNaNF32UI.o
+obj-y += fpu-custom-riscv/s_isSigNaNF64UI.o
+obj-y += fpu-custom-riscv/s_le128.o
+obj-y += fpu-custom-riscv/s_lt128.o
+obj-y += fpu-custom-riscv/s_mul128By64To192.o
+obj-y += fpu-custom-riscv/s_mul128To256.o
+obj-y += fpu-custom-riscv/s_mul64To128.o
+obj-y += fpu-custom-riscv/s_mulAddF32.o
+obj-y += fpu-custom-riscv/s_mulAddF64.o
+obj-y += fpu-custom-riscv/s_normRoundPackToF32.o
+obj-y += fpu-custom-riscv/s_normRoundPackToF64.o
+obj-y += fpu-custom-riscv/s_normSubnormalF32Sig.o
+obj-y += fpu-custom-riscv/s_normSubnormalF64Sig.o
+obj-y += fpu-custom-riscv/softfloat_raiseFlags.o
+obj-y += fpu-custom-riscv/softfloat_state.o
+obj-y += fpu-custom-riscv/s_propagateNaNF32UI.o
+obj-y += fpu-custom-riscv/s_propagateNaNF64UI.o
+obj-y += fpu-custom-riscv/s_roundPackToF32.o
+obj-y += fpu-custom-riscv/s_roundPackToF64.o
+obj-y += fpu-custom-riscv/s_roundPackToI32.o
+obj-y += fpu-custom-riscv/s_roundPackToI64.o
+obj-y += fpu-custom-riscv/s_roundPackToUI32.o
+obj-y += fpu-custom-riscv/s_roundPackToUI64.o
+obj-y += fpu-custom-riscv/s_shift128ExtraRightJam.o
+obj-y += fpu-custom-riscv/s_shift128RightJam.o
+obj-y += fpu-custom-riscv/s_shift32RightJam.o
+obj-y += fpu-custom-riscv/s_shift64ExtraRightJam.o
+obj-y += fpu-custom-riscv/s_shift64RightJam.o
+obj-y += fpu-custom-riscv/s_shortShift128ExtraRightJam.o
+obj-y += fpu-custom-riscv/s_shortShift128Left.o
+obj-y += fpu-custom-riscv/s_shortShift128Right.o
+obj-y += fpu-custom-riscv/s_shortShift192Left.o
+obj-y += fpu-custom-riscv/s_shortShift32Right1Jam.o
+obj-y += fpu-custom-riscv/s_shortShift64ExtraRightJam.o
+obj-y += fpu-custom-riscv/s_shortShift64RightJam.o
+obj-y += fpu-custom-riscv/s_sub128.o
+obj-y += fpu-custom-riscv/s_sub192.o
+obj-y += fpu-custom-riscv/s_subMagsF32.o
+obj-y += fpu-custom-riscv/s_subMagsF64.o
+obj-y += fpu-custom-riscv/ui32_to_f32.o
+obj-y += fpu-custom-riscv/ui32_to_f64.o
+obj-y += fpu-custom-riscv/ui64_to_f32.o
+obj-y += fpu-custom-riscv/ui64_to_f64.o
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += machine.o
diff --git a/target-riscv/TODO b/target-riscv/TODO
new file mode 100644
index 0000000..84a2b86
--- /dev/null
+++ b/target-riscv/TODO
@@ -0,0 +1,17 @@
+Notes for RISCV, updated priv spec version
+-----------------------------------------------
+
+General
+-------
+- fpu-custom-riscv aka softfloat is taken directly from spike (riscv-isa-sim)
+  the current version is from 853391c2bb814451ad88b8dbff2aec8616fc6a12 of
+  spike
+
+
+OLD, left as notes:
+-------------------
+- In include/exec/cpu-defs.h, CPU_TLB_BITS has been set to zero for
+  debugging purposes, increase for performance
+    - this sets the TLBs to only hold one entry
+
+
diff --git a/target-riscv/cpu-qom.h b/target-riscv/cpu-qom.h
new file mode 100644
index 0000000..595bc19
--- /dev/null
+++ b/target-riscv/cpu-qom.h
@@ -0,0 +1,86 @@
+/*
+ * QEMU RISC-V CPU
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_RISCV_CPU_QOM_H
+#define QEMU_RISCV_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_RISCV_CPU "riscv-cpu"
+
+#define RISCV_CPU_CLASS(klass) \
+    OBJECT_CLASS_CHECK(RISCVCPUClass, (klass), TYPE_RISCV_CPU)
+#define RISCV_CPU(obj) \
+    OBJECT_CHECK(RISCVCPU, (obj), TYPE_RISCV_CPU)
+#define RISCV_CPU_GET_CLASS(obj) \
+    OBJECT_GET_CLASS(RISCVCPUClass, (obj), TYPE_RISCV_CPU)
+
+/**
+ * RISCVCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A RISCV CPU model.
+ */
+typedef struct RISCVCPUClass {
+    /*< private >*/
+    CPUClass parent_class;
+    /*< public >*/
+
+    DeviceRealize parent_realize;
+    void (*parent_reset)(CPUState *cpu);
+} RISCVCPUClass;
+
+/**
+ * RISCVCPU:
+ * @env: #CPURISCVState
+ *
+ * A RISCV CPU.
+ */
+typedef struct RISCVCPU {
+    /*< private >*/
+    CPUState parent_obj;
+    /*< public >*/
+
+    CPURISCVState env;
+} RISCVCPU;
+
+static inline RISCVCPU *riscv_env_get_cpu(CPURISCVState *env)
+{
+    return container_of(env, RISCVCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(riscv_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(RISCVCPU, env)
+
+void riscv_cpu_do_interrupt(CPUState *cpu);
+void riscv_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+                         int flags);
+hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int riscv_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
+void  riscv_cpu_do_unaligned_access(CPUState *cs,
+                                              target_ulong addr, int rw,
+                                              int is_user, uintptr_t retaddr);
+
+
+#endif
diff --git a/target-riscv/cpu.c b/target-riscv/cpu.c
new file mode 100644
index 0000000..a78ac42
--- /dev/null
+++ b/target-riscv/cpu.c
@@ -0,0 +1,143 @@
+/*
+ * QEMU RISC-V CPU
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+
+static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    env->active_tc.PC = value;
+}
+
+static void riscv_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    env->active_tc.PC = tb->pc;
+}
+
+static bool riscv_cpu_has_work(CPUState *cs)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    bool has_work = false;
+
+    if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+        int interruptno = cpu_riscv_hw_interrupts_pending(env);
+        if (interruptno + 1) {
+            has_work = true;
+        }
+    }
+
+    return has_work;
+}
+
+static void riscv_cpu_reset(CPUState *s)
+{
+    RISCVCPU *cpu = RISCV_CPU(s);
+    RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
+    CPURISCVState *env = &cpu->env;
+
+    mcc->parent_reset(s);
+    tlb_flush(s, 1);
+    cpu_state_reset(env);
+}
+
+static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) {
+    info->print_insn = print_insn_riscv;
+}
+
+static void riscv_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+    CPUState *cs = CPU(dev);
+    RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
+
+    cpu_reset(cs);
+    qemu_init_vcpu(cs);
+
+    mcc->parent_realize(dev, errp);
+}
+
+static void riscv_cpu_initfn(Object *obj)
+{
+    CPUState *cs = CPU(obj);
+    RISCVCPU *cpu = RISCV_CPU(obj);
+    CPURISCVState *env = &cpu->env;
+
+    cs->env_ptr = env;
+    cpu_exec_init(cs, &error_abort);
+
+    if (tcg_enabled()) {
+        riscv_tcg_init();
+    }
+}
+
+static void riscv_cpu_class_init(ObjectClass *c, void *data)
+{
+    RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
+    CPUClass *cc = CPU_CLASS(c);
+    DeviceClass *dc = DEVICE_CLASS(c);
+
+    mcc->parent_realize = dc->realize;
+    dc->realize = riscv_cpu_realizefn;
+
+    mcc->parent_reset = cc->reset;
+    cc->reset = riscv_cpu_reset;
+
+    cc->has_work = riscv_cpu_has_work;
+    cc->do_interrupt = riscv_cpu_do_interrupt;
+    cc->cpu_exec_interrupt = riscv_cpu_exec_interrupt;
+    cc->dump_state = riscv_cpu_dump_state;
+    cc->set_pc = riscv_cpu_set_pc;
+    cc->synchronize_from_tb = riscv_cpu_synchronize_from_tb;
+    cc->gdb_read_register = riscv_cpu_gdb_read_register;
+    cc->gdb_write_register = riscv_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+    cc->handle_mmu_fault = riscv_cpu_handle_mmu_fault;
+#else
+    cc->do_unassigned_access = riscv_cpu_unassigned_access;
+    cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
+    cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
+    // cc->vmsd = &vmstate_riscv_cpu;
+#endif
+
+    cc->disas_set_info = riscv_cpu_disas_set_info;
+    cc->gdb_num_core_regs = 132;
+    cc->gdb_stop_before_watchpoint = true;
+}
+
+static const TypeInfo riscv_cpu_type_info = {
+    .name = TYPE_RISCV_CPU,
+    .parent = TYPE_CPU,
+    .instance_size = sizeof(RISCVCPU),
+    .instance_init = riscv_cpu_initfn,
+    .abstract = false,
+    .class_size = sizeof(RISCVCPUClass),
+    .class_init = riscv_cpu_class_init,
+};
+
+static void riscv_cpu_register_types(void)
+{
+    type_register_static(&riscv_cpu_type_info);
+}
+
+type_init(riscv_cpu_register_types)
diff --git a/target-riscv/cpu.h b/target-riscv/cpu.h
new file mode 100644
index 0000000..3f89bc3
--- /dev/null
+++ b/target-riscv/cpu.h
@@ -0,0 +1,449 @@
+#if !defined (__RISCV_CPU_H__)
+#define __RISCV_CPU_H__
+
+//#define DEBUG_OP
+
+#define TARGET_HAS_ICE 1
+
+#define ELF_MACHINE    EM_RISCV
+
+#define CPUArchState struct CPURISCVState
+
+#define RISCV_START_PC 0x200
+
+#define ALIGNED_ONLY
+
+#include "config.h"
+#include "qemu-common.h"
+#include "riscv-defs.h"
+#include "exec/cpu-defs.h"
+
+#define TRANSLATE_FAIL -1
+#define TRANSLATE_SUCCESS 0
+
+#define NB_MMU_MODES 4
+
+struct CPURISCVState;
+
+#define PGSHIFT 12
+
+// uncomment for lots of debug printing
+//#define RISCV_DEBUG_PRINT
+
+#define get_field(reg, mask) (((reg) & (target_ulong)(mask)) / ((mask) & 
~((mask) << 1)))
+#define set_field(reg, mask, val) (((reg) & ~(target_ulong)(mask)) | 
(((target_ulong)(val) * ((mask) & ~((mask) << 1))) & (target_ulong)(mask)))
+
+
+#define FP_RD_NE  0
+#define FP_RD_0   1
+#define FP_RD_DN  2
+#define FP_RD_UP  3
+#define FP_RD_NMM 4
+
+#define FSR_RD_SHIFT 5
+#define FSR_RD   (0x7 << FSR_RD_SHIFT)
+
+#define FPEXC_NX 0x01
+#define FPEXC_UF 0x02
+#define FPEXC_OF 0x04
+#define FPEXC_DZ 0x08
+#define FPEXC_NV 0x10
+
+#define FSR_AEXC_SHIFT 0
+#define FSR_NVA  (FPEXC_NV << FSR_AEXC_SHIFT)
+#define FSR_OFA  (FPEXC_OF << FSR_AEXC_SHIFT)
+#define FSR_UFA  (FPEXC_UF << FSR_AEXC_SHIFT)
+#define FSR_DZA  (FPEXC_DZ << FSR_AEXC_SHIFT)
+#define FSR_NXA  (FPEXC_NX << FSR_AEXC_SHIFT)
+#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
+
+#define NEW_CSR_FFLAGS 0x1
+#define NEW_CSR_FRM 0x2
+#define NEW_CSR_FCSR 0x3
+#define NEW_CSR_CYCLE 0xc00
+#define NEW_CSR_TIME 0xc01
+#define NEW_CSR_INSTRET 0xc02
+#define NEW_CSR_STATS 0xc0
+#define NEW_CSR_UARCH0 0xcc0
+#define NEW_CSR_UARCH1 0xcc1
+#define NEW_CSR_UARCH2 0xcc2
+#define NEW_CSR_UARCH3 0xcc3
+#define NEW_CSR_UARCH4 0xcc4
+#define NEW_CSR_UARCH5 0xcc5
+#define NEW_CSR_UARCH6 0xcc6
+#define NEW_CSR_UARCH7 0xcc7
+#define NEW_CSR_UARCH8 0xcc8
+#define NEW_CSR_UARCH9 0xcc9
+#define NEW_CSR_UARCH10 0xcca
+#define NEW_CSR_UARCH11 0xccb
+#define NEW_CSR_UARCH12 0xccc
+#define NEW_CSR_UARCH13 0xccd
+#define NEW_CSR_UARCH14 0xcce
+#define NEW_CSR_UARCH15 0xccf
+#define NEW_CSR_SSTATUS 0x100
+#define NEW_CSR_STVEC 0x101
+#define NEW_CSR_SIE 0x104
+#define NEW_CSR_SSCRATCH 0x140
+#define NEW_CSR_SEPC 0x141
+#define NEW_CSR_SIP 0x144
+#define NEW_CSR_SPTBR 0x180
+#define NEW_CSR_SASID 0x181
+#define NEW_CSR_CYCLEW 0x900
+#define NEW_CSR_TIMEW 0x901
+#define NEW_CSR_INSTRETW 0x902
+#define NEW_CSR_STIME 0xd01
+#define NEW_CSR_SCAUSE 0xd42
+#define NEW_CSR_SBADADDR 0xd43
+#define NEW_CSR_STIMEW 0xa01
+#define NEW_CSR_MSTATUS 0x300
+#define NEW_CSR_MTVEC 0x301
+#define NEW_CSR_MTDELEG 0x302
+#define NEW_CSR_MIE 0x304
+#define NEW_CSR_MTIMECMP 0x321
+#define NEW_CSR_MSCRATCH 0x340
+#define NEW_CSR_MEPC 0x341
+#define NEW_CSR_MCAUSE 0x342
+#define NEW_CSR_MBADADDR 0x343
+#define NEW_CSR_MIP 0x344
+#define NEW_CSR_MTIME 0x701
+#define NEW_CSR_MCPUID 0xf00
+#define NEW_CSR_MIMPID 0xf01
+#define NEW_CSR_MHARTID 0xf10
+#define NEW_CSR_MTOHOST 0x780
+#define NEW_CSR_MFROMHOST 0x781
+#define NEW_CSR_MRESET 0x782
+#define NEW_CSR_MIPI 0x783
+#define NEW_CSR_MIOBASE 0x784
+#define NEW_CSR_CYCLEH 0xc80
+#define NEW_CSR_TIMEH 0xc81
+#define NEW_CSR_INSTRETH 0xc82
+#define NEW_CSR_CYCLEHW 0x980
+#define NEW_CSR_TIMEHW 0x981
+#define NEW_CSR_INSTRETHW 0x982
+#define NEW_CSR_STIMEH 0xd81
+#define NEW_CSR_STIMEHW 0xa81
+#define NEW_CSR_MTIMECMPH 0x361
+#define NEW_CSR_MTIMEH 0x741
+
+
+// RISCV Exception Codes
+#define EXCP_NONE                       -1   // not a real RISCV exception code
+#define NEW_RISCV_EXCP_INST_ADDR_MIS           0x0
+#define NEW_RISCV_EXCP_INST_ACCESS_FAULT       0x1
+#define NEW_RISCV_EXCP_ILLEGAL_INST            0x2
+#define NEW_RISCV_EXCP_BREAKPOINT              0x3
+#define NEW_RISCV_EXCP_LOAD_ADDR_MIS           0x4
+#define NEW_RISCV_EXCP_LOAD_ACCESS_FAULT       0x5
+#define NEW_RISCV_EXCP_STORE_AMO_ADDR_MIS      0x6
+#define NEW_RISCV_EXCP_STORE_AMO_ACCESS_FAULT  0x7
+#define NEW_RISCV_EXCP_U_ECALL                 0x8 // for convenience, report 
all
+                                                   // ECALLs as this, handler 
fixes
+#define NEW_RISCV_EXCP_S_ECALL                 0x9
+#define NEW_RISCV_EXCP_H_ECALL                 0xa
+#define NEW_RISCV_EXCP_M_ECALL                 0xb
+// >= 12 reserved
+// interrupts not listed here
+
+#define IS_RV_INTERRUPT(ival) (ival & (0x1 << 31))
+
+#define MSTATUS_IE          0x00000001
+#define MSTATUS_PRV         0x00000006
+#define MSTATUS_IE1         0x00000008
+#define MSTATUS_PRV1        0x00000030
+#define MSTATUS_IE2         0x00000040
+#define MSTATUS_PRV2        0x00000180
+#define MSTATUS_IE3         0x00000200
+#define MSTATUS_PRV3        0x00000C00
+#define MSTATUS_FS          0x00003000
+#define MSTATUS_XS          0x0000C000
+#define MSTATUS_MPRV        0x00010000
+#define MSTATUS_VM          0x003E0000
+#define MSTATUS32_SD        0x80000000
+#define MSTATUS64_SD        0x8000000000000000
+
+#define SSTATUS_IE          0x00000001
+#define SSTATUS_PIE         0x00000008
+#define SSTATUS_PS          0x00000010
+#define SSTATUS_FS          0x00003000
+#define SSTATUS_XS          0x0000C000
+#define SSTATUS_MPRV        0x00010000
+#define SSTATUS_TIE         0x01000000
+#define SSTATUS32_SD        0x80000000
+#define SSTATUS64_SD        0x8000000000000000
+
+#define MIP_SSIP            0x00000002
+#define MIP_HSIP            0x00000004
+#define MIP_MSIP            0x00000008
+#define MIP_STIP            0x00000020
+#define MIP_HTIP            0x00000040
+#define MIP_MTIP            0x00000080
+
+#define SIP_SSIP MIP_SSIP
+#define SIP_STIP MIP_STIP
+
+#define PRV_U 0
+#define PRV_S 1
+#define PRV_H 2
+#define PRV_M 3
+
+#define VM_MBARE 0
+#define VM_MBB   1
+#define VM_MBBID 2
+#define VM_SV32  8
+#define VM_SV39  9
+#define VM_SV48  10
+
+#define UA_RV32  0
+#define UA_RV64  4
+#define UA_RV128 8
+
+#define IRQ_SOFT   0
+#define IRQ_TIMER  1
+#define IRQ_HOST   2
+#define IRQ_COP    3
+
+#define IMPL_ROCKET 1
+
+#define DEFAULT_MTVEC 0x100
+
+// page table entry (PTE) fields
+#define PTE_V     0x001 // Valid
+#define PTE_TYPE  0x01E // Type
+#define PTE_R     0x020 // Referenced
+#define PTE_D     0x040 // Dirty
+#define PTE_SOFT  0x380 // Reserved for Software
+
+#define PTE_TYPE_TABLE        0x00
+#define PTE_TYPE_TABLE_GLOBAL 0x02
+#define PTE_TYPE_URX_SR       0x04
+#define PTE_TYPE_URWX_SRW     0x06
+#define PTE_TYPE_UR_SR        0x08
+#define PTE_TYPE_URW_SRW      0x0A
+#define PTE_TYPE_URX_SRX      0x0C
+#define PTE_TYPE_URWX_SRWX    0x0E
+#define PTE_TYPE_SR           0x10
+#define PTE_TYPE_SRW          0x12
+#define PTE_TYPE_SRX          0x14
+#define PTE_TYPE_SRWX         0x16
+#define PTE_TYPE_SR_GLOBAL    0x18
+#define PTE_TYPE_SRW_GLOBAL   0x1A
+#define PTE_TYPE_SRX_GLOBAL   0x1C
+#define PTE_TYPE_SRWX_GLOBAL  0x1E
+
+#define PTE_PPN_SHIFT 10
+
+#define PTE_TABLE(PTE) ((0x0000000AU >> ((PTE) & 0x1F)) & 1)
+#define PTE_UR(PTE)    ((0x0000AAA0U >> ((PTE) & 0x1F)) & 1)
+#define PTE_UW(PTE)    ((0x00008880U >> ((PTE) & 0x1F)) & 1)
+#define PTE_UX(PTE)    ((0x0000A0A0U >> ((PTE) & 0x1F)) & 1)
+#define PTE_SR(PTE)    ((0xAAAAAAA0U >> ((PTE) & 0x1F)) & 1)
+#define PTE_SW(PTE)    ((0x88888880U >> ((PTE) & 0x1F)) & 1)
+#define PTE_SX(PTE)    ((0xA0A0A000U >> ((PTE) & 0x1F)) & 1)
+
+#define PTE_CHECK_PERM(PTE, SUPERVISOR, STORE, FETCH) \
+  ((STORE) ? ((SUPERVISOR) ? PTE_SW(PTE) : PTE_UW(PTE)) : \
+   (FETCH) ? ((SUPERVISOR) ? PTE_SX(PTE) : PTE_UX(PTE)) : \
+             ((SUPERVISOR) ? PTE_SR(PTE) : PTE_UR(PTE)))
+
+typedef struct riscv_def_t riscv_def_t;
+
+typedef struct TCState TCState;
+struct TCState {
+    target_ulong gpr[32];
+    target_ulong fpr[32];
+    target_ulong PC;
+    target_ulong load_reservation;
+};
+
+typedef struct CPURISCVState CPURISCVState;
+struct CPURISCVState {
+    TCState active_tc;
+    uint32_t current_tc;
+    uint32_t SEGBITS;
+    uint32_t PABITS;
+
+    uint64_t csr[4096]; // RISCV CSR registers
+
+    /* QEMU */
+    CPU_COMMON
+
+    /* Fields from here on are preserved across CPU reset. */
+    const riscv_def_t *cpu_model;
+    size_t memsize;
+    void *irq[8];
+    QEMUTimer *timer; /* Internal timer */
+};
+
+#include "cpu-qom.h"
+
+#if !defined(CONFIG_USER_ONLY)
+void riscv_cpu_unassigned_access(CPUState *cpu, hwaddr addr, bool is_write,
+        bool is_exec, int unused, unsigned size);
+#endif
+
+void riscv_cpu_list (FILE *f, fprintf_function cpu_fprintf);
+
+#define cpu_exec cpu_riscv_exec
+#define cpu_signal_handler cpu_riscv_signal_handler
+#define cpu_list riscv_cpu_list
+
+#define CPU_SAVE_VERSION 3
+
+static inline int cpu_mmu_index (CPURISCVState *env, bool ifetch)
+{
+    int mode = get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_PRV);
+
+    // This should only apply to non-instruction fetches
+    // FIXED, but untested, so leave the print/exit
+    if (!ifetch && get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_MPRV)) {
+        printf("USED 0 cpu_mmu_index\n");
+        exit(1);
+        mode = get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_PRV1);
+    }
+    if (get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_VM) == VM_MBARE) {
+        mode = PRV_M;
+    }
+
+    return mode;
+}
+
+/*
+ * Return RISC-V IRQ number if an interrupt should be taken, else -1.
+ * Used in cpu-exec.c
+ */
+static inline int cpu_riscv_hw_interrupts_pending(CPURISCVState *env)
+{
+
+    int priv = get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_PRV);
+    int ie = get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_IE);
+    target_ulong interrupts = env->csr[NEW_CSR_MIE] & env->csr[NEW_CSR_MIP];
+
+    #ifdef RISCV_DEBUG_PRINT
+    printf("checking interrupts: ie %d, priv %d, interrupts %ld\n", ie, priv,
+            interrupts);
+    #endif
+
+    if (priv < PRV_M || (priv == PRV_M && ie)) {
+        if (interrupts & MIP_MSIP) {
+            #ifdef RISCV_DEBUG_PRINT
+            fprintf(stderr, "taking soft interrupt M\n");
+            #endif
+
+            // no irq to lower, that is done by the CPU
+            return IRQ_SOFT;
+        }
+
+        if (interrupts & MIP_MTIP) {
+            #ifdef RISCV_DEBUG_PRINT
+            fprintf(stderr, "taking timer interrupt M\n");
+            #endif
+
+            // we're handing it to the cpu now, so get rid of the qemu irq
+            qemu_irq_lower(env->irq[7]); // get rid of the irq request
+            return IRQ_TIMER;
+        }
+
+        if (env->csr[NEW_CSR_MFROMHOST]) {
+            #ifdef RISCV_DEBUG_PRINT
+            fprintf(stderr, "taking host interrupt\n");
+            #endif
+
+            // we're handing it to the cpu now, so get rid of the qemu irq
+            qemu_irq_lower(env->irq[4]); // get rid of the irq request
+            return IRQ_HOST;
+        }
+
+    }
+
+    if (priv < PRV_S || (priv == PRV_S && ie)) {
+        if (interrupts & MIP_SSIP) {
+            #ifdef RISCV_DEBUG_PRINT
+            fprintf(stderr, "taking soft interrupt S\n");
+            #endif
+
+            // no irq to lower, that is done by the CPU
+            return IRQ_SOFT;
+        }
+
+        if (interrupts & MIP_STIP) {
+            #ifdef RISCV_DEBUG_PRINT
+            fprintf(stderr, "taking timer interrupt S\n");
+            #endif
+
+            // no irq to lower, that is done by the CPU
+            return IRQ_TIMER;
+        }
+    }
+
+    // indicates no pending interrupt to handler in cpu-exec.c
+    return -1;
+}
+
+#include "exec/cpu-all.h"
+
+int cpu_riscv_exec(CPUState *cpu);
+void riscv_tcg_init(void);
+RISCVCPU *cpu_riscv_init(const char *cpu_model);
+int cpu_riscv_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#define cpu_init(cpu_model) CPU(cpu_riscv_init(cpu_model))
+
+void cpu_state_reset(CPURISCVState *s);
+
+/* hw/riscv/cputimer.c */
+uint64_t cpu_riscv_get_cycle (CPURISCVState *env);
+uint32_t cpu_riscv_get_random (CPURISCVState *env);
+void cpu_riscv_store_compare (CPURISCVState *env, uint64_t value);
+void cpu_riscv_start_count(CPURISCVState *env);
+
+
+void cpu_riscv_store_timew(CPURISCVState *env, uint64_t val_to_write);
+uint64_t cpu_riscv_read_mtime(CPURISCVState *env);
+uint64_t cpu_riscv_read_stime(CPURISCVState *env);
+uint64_t cpu_riscv_read_time(CPURISCVState *env);
+
+void cpu_riscv_store_instretw(CPURISCVState *env, uint64_t val_to_write);
+uint64_t cpu_riscv_read_instretw(CPURISCVState *env);
+
+/* hw/riscv/riscv_int.c */
+void cpu_riscv_soft_irq(CPURISCVState *env, int irq, int level);
+
+/* helper.c */
+int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+                              int mmu_idx);
+#if !defined(CONFIG_USER_ONLY)
+hwaddr cpu_riscv_translate_address (CPURISCVState *env, target_ulong address,
+                                              int rw);
+#endif
+
+static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
+                                        target_ulong *cs_base, int *flags)
+{
+    *pc = env->active_tc.PC;
+    *cs_base = 0;
+    *flags = 0; // necessary to avoid compiler warning
+}
+
+target_ulong push_priv_stack(target_ulong start_mstatus);
+target_ulong pop_priv_stack(target_ulong start_mstatus);
+
+#ifndef CONFIG_USER_ONLY
+void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
+        target_ulong csrno);
+target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno);
+inline void cpu_riscv_tlb_flush (CPURISCVState *env, int flush_global);
+#endif
+
+#define RISCV_RM ({ if(rm == 7) rm = env->csr[NEW_CSR_FRM]; \
+                    /* TODO: throw trap for rm > 4 */ \
+                    rm; })
+
+#define set_fp_exceptions ({ env->csr[NEW_CSR_FFLAGS] |= 
softfloat_exceptionFlags;\
+                             softfloat_exceptionFlags = 0; })
+
+void validate_csr(CPURISCVState *env, uint64_t which, uint64_t write, uint64_t
+        new_pc);
+
+#include "exec/exec-all.h"
+
+#endif /* !defined (__RISCV_CPU_H__) */
diff --git a/target-riscv/fpu-custom-riscv/8086/OLD-specialize.c 
b/target-riscv/fpu-custom-riscv/8086/OLD-specialize.c
new file mode 100755
index 0000000..ffb306d
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/OLD-specialize.c
@@ -0,0 +1,40 @@
+
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+/*----------------------------------------------------------------------------
+| Underflow tininess-detection mode, statically initialized to default value.
+| (The declaration in `softfloat.h' must match the `int8' type here.)
+*----------------------------------------------------------------------------*/
+bool float_detectTininess = float_tininess_afterRounding;
+
diff --git a/target-riscv/fpu-custom-riscv/8086/OLD-specialize.h 
b/target-riscv/fpu-custom-riscv/8086/OLD-specialize.h
new file mode 100755
index 0000000..9e4461c
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/OLD-specialize.h
@@ -0,0 +1,379 @@
+
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+/*----------------------------------------------------------------------------
+| Internal canonical NaN format.
+*----------------------------------------------------------------------------*/
+*** COMMON
+typedef struct {
+    flag sign;
+    uint128_t bits;
+} commonNaNT;
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated single-precision NaN.
+*----------------------------------------------------------------------------*/
+#define float32Bits_defaultNaN 0xFFC00000
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+*** COMMON
+#define softfloat_isNaNFloat32Bits( a ) ( 0xFF000000 < (uint32_t) ( a )<<1 )
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a signaling
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+inline bool softfloat_isSigNaNFloat32Bits( uint32_t a )
+    { return ( ( a>>22 & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); }
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+commonNaNT softfloat_NaNFromFloat32Bits( uint32_t );
+uint32_t softfloat_float32BitsFromNaN( commonNaNT );
+uint32_t softfloat_propNaNFloat32Bits( uint32_t, uint32_t );
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated double-precision NaN.
+*----------------------------------------------------------------------------*/
+#define float64Bits_defaultNaN 0xFFF8000000000000
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the double-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+*** COMMON
+#define softfloat_isNaNFloat64Bits( a ) ( 0xFFE0000000000000 < (uint64_t) ( a 
)<<1 )
+
+
+
+
+
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the double-precision floating-point value `a' is a signaling
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag float64_is_signaling_nan( float64 a )
+{
+
+    return
+           ( ( ( a>>51 ) & 0xFFF ) == 0xFFE )
+        && ( a & LIT64( 0x0007FFFFFFFFFFFF ) );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the double-precision floating-point NaN
+| `a' to the canonical NaN format.  If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+
+static commonNaNT float64ToCommonNaN( float64 a )
+{
+    commonNaNT z;
+
+    if ( float64_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
+    z.sign = a>>63;
+    z.low = 0;
+    z.high = a<<12;
+    return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the double-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+static float64 commonNaNToFloat64( commonNaNT a )
+{
+
+    return
+          ( ( (bits64) a.sign )<<63 )
+        | LIT64( 0x7FF8000000000000 )
+        | ( a.high>>12 );
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes two double-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+static float64 propagateFloat64NaN( float64 a, float64 b )
+{
+    flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
+
+    aIsNaN = float64_is_nan( a );
+    aIsSignalingNaN = float64_is_signaling_nan( a );
+    bIsNaN = float64_is_nan( b );
+    bIsSignalingNaN = float64_is_signaling_nan( b );
+    a |= LIT64( 0x0008000000000000 );
+    b |= LIT64( 0x0008000000000000 );
+    if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
+    if ( aIsSignalingNaN ) {
+        if ( bIsSignalingNaN ) goto returnLargerSignificand;
+        return bIsNaN ? b : a;
+    }
+    else if ( aIsNaN ) {
+        if ( bIsSignalingNaN | ! bIsNaN ) return a;
+ returnLargerSignificand:
+        if ( (bits64) ( a<<1 ) < (bits64) ( b<<1 ) ) return b;
+        if ( (bits64) ( b<<1 ) < (bits64) ( a<<1 ) ) return a;
+        return ( a < b ) ? a : b;
+    }
+    else {
+        return b;
+    }
+
+}
+
+#ifdef FLOATX80
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated extended double-precision NaN.  The
+| `high' and `low' values hold the most- and least-significant bits,
+| respectively.
+*----------------------------------------------------------------------------*/
+#define floatx80_default_nan_high 0xFFFF
+#define floatx80_default_nan_low  LIT64( 0xC000000000000000 )
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the extended double-precision floating-point value `a' is a
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag floatx80_is_nan( floatx80 a )
+{
+
+    return ( ( a.high & 0x7FFF ) == 0x7FFF ) && (bits64) ( a.low<<1 );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the extended double-precision floating-point value `a' is a
+| signaling NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag floatx80_is_signaling_nan( floatx80 a )
+{
+    bits64 aLow;
+
+    aLow = a.low & ~ LIT64( 0x4000000000000000 );
+    return
+           ( ( a.high & 0x7FFF ) == 0x7FFF )
+        && (bits64) ( aLow<<1 )
+        && ( a.low == aLow );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the extended double-precision floating-
+| point NaN `a' to the canonical NaN format.  If `a' is a signaling NaN, the
+| invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+static commonNaNT floatx80ToCommonNaN( floatx80 a )
+{
+    commonNaNT z;
+
+    if ( floatx80_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
+    z.sign = a.high>>15;
+    z.low = 0;
+    z.high = a.low<<1;
+    return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the extended
+| double-precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+static floatx80 commonNaNToFloatx80( commonNaNT a )
+{
+    floatx80 z;
+
+    z.low = LIT64( 0xC000000000000000 ) | ( a.high>>1 );
+    z.high = ( ( (bits16) a.sign )<<15 ) | 0x7FFF;
+    return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes two extended double-precision floating-point values `a' and `b', one
+| of which is a NaN, and returns the appropriate NaN result.  If either `a' or
+| `b' is a signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+static floatx80 propagateFloatx80NaN( floatx80 a, floatx80 b )
+{
+    flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
+
+    aIsNaN = floatx80_is_nan( a );
+    aIsSignalingNaN = floatx80_is_signaling_nan( a );
+    bIsNaN = floatx80_is_nan( b );
+    bIsSignalingNaN = floatx80_is_signaling_nan( b );
+    a.low |= LIT64( 0xC000000000000000 );
+    b.low |= LIT64( 0xC000000000000000 );
+    if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
+    if ( aIsSignalingNaN ) {
+        if ( bIsSignalingNaN ) goto returnLargerSignificand;
+        return bIsNaN ? b : a;
+    }
+    else if ( aIsNaN ) {
+        if ( bIsSignalingNaN | ! bIsNaN ) return a;
+ returnLargerSignificand:
+        if ( a.low < b.low ) return b;
+        if ( b.low < a.low ) return a;
+        return ( a.high < b.high ) ? a : b;
+    }
+    else {
+        return b;
+    }
+
+}
+
+#endif
+
+#ifdef FLOAT128
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated quadruple-precision NaN.  The `high' and
+| `low' values hold the most- and least-significant bits, respectively.
+*----------------------------------------------------------------------------*/
+#define float128_default_nan_high LIT64( 0xFFFF800000000000 )
+#define float128_default_nan_low  LIT64( 0x0000000000000000 )
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the quadruple-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag float128_is_nan( float128 a )
+{
+
+    return
+           ( LIT64( 0xFFFE000000000000 ) <= (bits64) ( a.high<<1 ) )
+        && ( a.low || ( a.high & LIT64( 0x0000FFFFFFFFFFFF ) ) );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the quadruple-precision floating-point value `a' is a
+| signaling NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag float128_is_signaling_nan( float128 a )
+{
+
+    return
+           ( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE )
+        && ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the quadruple-precision floating-point NaN
+| `a' to the canonical NaN format.  If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+
+static commonNaNT float128ToCommonNaN( float128 a )
+{
+    commonNaNT z;
+
+    if ( float128_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
+    z.sign = a.high>>63;
+    shortShift128Left( a.high, a.low, 16, &z.high, &z.low );
+    return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the quadruple-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+static float128 commonNaNToFloat128( commonNaNT a )
+{
+    float128 z;
+
+    shift128Right( a.high, a.low, 16, &z.high, &z.low );
+    z.high |= ( ( (bits64) a.sign )<<63 ) | LIT64( 0x7FFF800000000000 );
+    return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes two quadruple-precision floating-point values `a' and `b', one of
+| which is a NaN, and returns the appropriate NaN result.  If either `a' or
+| `b' is a signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+static float128 propagateFloat128NaN( float128 a, float128 b )
+{
+    flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
+
+    aIsNaN = float128_is_nan( a );
+    aIsSignalingNaN = float128_is_signaling_nan( a );
+    bIsNaN = float128_is_nan( b );
+    bIsSignalingNaN = float128_is_signaling_nan( b );
+    a.high |= LIT64( 0x0000800000000000 );
+    b.high |= LIT64( 0x0000800000000000 );
+    if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
+    if ( aIsSignalingNaN ) {
+        if ( bIsSignalingNaN ) goto returnLargerSignificand;
+        return bIsNaN ? b : a;
+    }
+    else if ( aIsNaN ) {
+        if ( bIsSignalingNaN | ! bIsNaN ) return a;
+ returnLargerSignificand:
+        if ( lt128( a.high<<1, a.low, b.high<<1, b.low ) ) return b;
+        if ( lt128( b.high<<1, b.low, a.high<<1, a.low ) ) return a;
+        return ( a.high < b.high ) ? a : b;
+    }
+    else {
+        return b;
+    }
+
+}
+
+#endif
+
diff --git a/target-riscv/fpu-custom-riscv/8086/platform.h 
b/target-riscv/fpu-custom-riscv/8086/platform.h
new file mode 100755
index 0000000..9355edf
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/platform.h
@@ -0,0 +1,38 @@
+
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define LITTLEENDIAN
+
diff --git a/target-riscv/fpu-custom-riscv/8086/s_commonNaNToF32UI.c 
b/target-riscv/fpu-custom-riscv/8086/s_commonNaNToF32UI.c
new file mode 100755
index 0000000..3b96c41
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/s_commonNaNToF32UI.c
@@ -0,0 +1,17 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the single-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN a )
+{
+
+    return (uint_fast32_t) a.sign<<31 | 0x7FC00000 | a.v64>>41;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/s_commonNaNToF64UI.c 
b/target-riscv/fpu-custom-riscv/8086/s_commonNaNToF64UI.c
new file mode 100755
index 0000000..474ceee
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/s_commonNaNToF64UI.c
@@ -0,0 +1,19 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the double-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+uint_fast64_t softfloat_commonNaNToF64UI( struct commonNaN a )
+{
+
+    return
+        (uint_fast64_t) a.sign<<63 | UINT64_C( 0x7FF8000000000000 )
+            | a.v64>>12;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/s_f32UIToCommonNaN.c 
b/target-riscv/fpu-custom-riscv/8086/s_f32UIToCommonNaN.c
new file mode 100755
index 0000000..067e8da
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/s_f32UIToCommonNaN.c
@@ -0,0 +1,25 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the single-precision floating-point NaN
+| `a' to the canonical NaN format.  If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f32UIToCommonNaN( uint_fast32_t uiA )
+{
+    struct commonNaN z;
+
+    if ( softfloat_isSigNaNF32UI( uiA ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    z.sign = uiA>>31;
+    z.v64 = (uint_fast64_t) uiA<<41;
+    z.v0 = 0;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/s_f64UIToCommonNaN.c 
b/target-riscv/fpu-custom-riscv/8086/s_f64UIToCommonNaN.c
new file mode 100755
index 0000000..f933ded
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/s_f64UIToCommonNaN.c
@@ -0,0 +1,25 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the double-precision floating-point NaN
+| `a' to the canonical NaN format.  If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f64UIToCommonNaN( uint_fast64_t uiA )
+{
+    struct commonNaN z;
+
+    if ( softfloat_isSigNaNF64UI( uiA ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    z.sign = uiA>>63;
+    z.v64 = uiA<<12;
+    z.v0 = 0;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/s_isSigNaNF32UI.c 
b/target-riscv/fpu-custom-riscv/8086/s_isSigNaNF32UI.c
new file mode 100755
index 0000000..0a9c33f
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/s_isSigNaNF32UI.c
@@ -0,0 +1,13 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+bool softfloat_isSigNaNF32UI( uint_fast32_t ui )
+{
+
+    return ( ( ui>>22 & 0x1FF ) == 0x1FE ) && ( ui & 0x003FFFFF );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/s_isSigNaNF64UI.c 
b/target-riscv/fpu-custom-riscv/8086/s_isSigNaNF64UI.c
new file mode 100755
index 0000000..d255213
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/s_isSigNaNF64UI.c
@@ -0,0 +1,15 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+bool softfloat_isSigNaNF64UI( uint_fast64_t ui )
+{
+
+    return
+        ( ( ui>>51 & 0xFFF ) == 0xFFE )
+            && ( ui & UINT64_C( 0x0007FFFFFFFFFFFF ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/s_propagateNaNF32UI.c 
b/target-riscv/fpu-custom-riscv/8086/s_propagateNaNF32UI.c
new file mode 100755
index 0000000..07774e8
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/s_propagateNaNF32UI.c
@@ -0,0 +1,55 @@
+
+/*** UPDATE COMMENTS. ***/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Takes two single-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+uint_fast32_t
+ softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB )
+{
+    bool isNaNA, isSigNaNA, isNaNB, isSigNaNB;
+    uint_fast32_t uiMagA, uiMagB;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    isNaNA = isNaNF32UI( uiA );
+    isSigNaNA = softfloat_isSigNaNF32UI( uiA );
+    isNaNB = isNaNF32UI( uiB );
+    isSigNaNB = softfloat_isSigNaNF32UI( uiB );
+    /*------------------------------------------------------------------------
+    | Make NaNs non-signaling.
+    *------------------------------------------------------------------------*/
+    uiA |= 0x00400000;
+    uiB |= 0x00400000;
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( isSigNaNA | isSigNaNB ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    if ( isSigNaNA ) {
+        if ( isSigNaNB ) goto returnLargerSignificand;
+        return isNaNB ? uiB : uiA;
+    } else if ( isNaNA ) {
+        if ( isSigNaNB || ! isNaNB ) return uiA;
+ returnLargerSignificand:
+        uiMagA = uiA<<1;
+        uiMagB = uiB<<1;
+        if ( uiMagA < uiMagB ) return uiB;
+        if ( uiMagB < uiMagA ) return uiA;
+        return ( uiA < uiB ) ? uiA : uiB;
+    } else {
+        return uiB;
+    }
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/s_propagateNaNF64UI.c 
b/target-riscv/fpu-custom-riscv/8086/s_propagateNaNF64UI.c
new file mode 100755
index 0000000..0ff6446
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/s_propagateNaNF64UI.c
@@ -0,0 +1,55 @@
+
+/*** UPDATE COMMENTS. ***/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Takes two double-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+uint_fast64_t
+ softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB )
+{
+    bool isNaNA, isSigNaNA, isNaNB, isSigNaNB;
+    uint_fast64_t uiMagA, uiMagB;
+
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    isNaNA = isNaNF64UI( uiA );
+    isSigNaNA = softfloat_isSigNaNF64UI( uiA );
+    isNaNB = isNaNF64UI( uiB );
+    isSigNaNB = softfloat_isSigNaNF64UI( uiB );
+    /*------------------------------------------------------------------------
+    | Make NaNs non-signaling.
+    *------------------------------------------------------------------------*/
+    uiA |= UINT64_C( 0x0008000000000000 );
+    uiB |= UINT64_C( 0x0008000000000000 );
+    /*------------------------------------------------------------------------
+    *------------------------------------------------------------------------*/
+    if ( isSigNaNA | isSigNaNB ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    if ( isSigNaNA ) {
+        if ( isSigNaNB ) goto returnLargerSignificand;
+        return isNaNB ? uiB : uiA;
+    } else if ( isNaNA ) {
+        if ( isSigNaNB || ! isNaNB ) return uiA;
+ returnLargerSignificand:
+        uiMagA = uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF );
+        uiMagB = uiB & UINT64_C( 0x7FFFFFFFFFFFFFFF );
+        if ( uiMagA < uiMagB ) return uiB;
+        if ( uiMagB < uiMagA ) return uiA;
+        return ( uiA < uiB ) ? uiA : uiB;
+    } else {
+        return uiB;
+    }
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/softfloat_raiseFlags.c 
b/target-riscv/fpu-custom-riscv/8086/softfloat_raiseFlags.c
new file mode 100755
index 0000000..c0c0dc8
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/softfloat_raiseFlags.c
@@ -0,0 +1,51 @@
+
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include "platform.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Raises the exceptions specified by `flags'.  Floating-point traps can be
+| defined here if desired.  It is currently not possible for such a trap
+| to substitute a result value.  If traps are not implemented, this routine
+| should be simply `float_exception_flags |= flags;'.
+*----------------------------------------------------------------------------*/
+
+void softfloat_raiseFlags( int_fast8_t flags )
+{
+
+    softfloat_exceptionFlags |= flags;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/8086/softfloat_types.h 
b/target-riscv/fpu-custom-riscv/8086/softfloat_types.h
new file mode 100755
index 0000000..b5c1828
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/softfloat_types.h
@@ -0,0 +1,16 @@
+
+#ifndef softfloat_types_h
+#define softfloat_types_h
+
+/*** COMMENTS. ***/
+
+#include <stdbool.h>
+#include <stdint.h>
+
+typedef struct { uint32_t v; } float32_t;
+typedef struct { uint64_t v; } float64_t;
+typedef struct { uint64_t v; uint16_t x; } floatx80_t;
+typedef struct { uint64_t v[ 2 ]; } float128_t;
+
+#endif
+
diff --git a/target-riscv/fpu-custom-riscv/8086/specialize.h 
b/target-riscv/fpu-custom-riscv/8086/specialize.h
new file mode 100755
index 0000000..ca0bb1d
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/8086/specialize.h
@@ -0,0 +1,113 @@
+
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define init_detectTininess softfloat_tininess_afterRounding;
+
+/*----------------------------------------------------------------------------
+| Structure used to transfer NaN representations from one format to another.
+*----------------------------------------------------------------------------*/
+struct commonNaN {
+    bool sign;
+    uint64_t v64, v0;
+};
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated single-precision NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF32UI 0xFFC00000
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a signaling
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool softfloat_isSigNaNF32UI( uint_fast32_t ui )
+    { return ( ( ui>>22 & 0x1FF ) == 0x1FE ) && ( ui & 0x003FFFFF ); }
+#else
+bool softfloat_isSigNaNF32UI( uint_fast32_t );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f32UIToCommonNaN( uint_fast32_t );
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN a )
+    { return (uint_fast32_t) a.sign<<31 | 0x7FC00000 | a.v64>>41; }
+#else
+uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN );
+#endif
+
+/*----------------------------------------------------------------------------
+| Takes two single-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+uint_fast32_t softfloat_propagateNaNF32UI( uint_fast32_t, uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated double-precision NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF64UI UINT64_C(0xFFF8000000000000)
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool softfloat_isSigNaNF64UI( uint_fast64_t ui )
+{
+    return
+        ( ( ui>>51 & 0xFFF ) == 0xFFE )
+            && ( ui & UINT64_C( 0x0007FFFFFFFFFFFF ) );
+}
+#else
+bool softfloat_isSigNaNF64UI( uint_fast64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+/*** MIGHT BE INLINE'D. ***/
+struct commonNaN softfloat_f64UIToCommonNaN( uint_fast64_t );
+uint_fast64_t softfloat_commonNaNToF64UI( struct commonNaN );
+
+/*----------------------------------------------------------------------------
+| Takes two double-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+uint_fast64_t softfloat_propagateNaNF64UI( uint_fast64_t, uint_fast64_t );
+
diff --git a/target-riscv/fpu-custom-riscv/f32_add.c 
b/target-riscv/fpu-custom-riscv/f32_add.c
new file mode 100755
index 0000000..dc53d68
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_add.c
@@ -0,0 +1,29 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t f32_add( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool signA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    bool signB;
+    float32_t ( *magsRoutine )( uint_fast32_t, uint_fast32_t, bool );
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF32UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+    signB = signF32UI( uiB );
+    magsRoutine =
+        ( signA == signB ) ? softfloat_addMagsF32 : softfloat_subMagsF32;
+    return magsRoutine( uiA, uiB, signA );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_classify.c 
b/target-riscv/fpu-custom-riscv/f32_classify.c
new file mode 100755
index 0000000..d16aa25
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_classify.c
@@ -0,0 +1,33 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+uint_fast16_t f32_classify( float32_t a )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+
+    uA.f = a;
+    uiA = uA.ui;
+
+    uint_fast16_t infOrNaN = expF32UI( uiA ) == 0xFF;
+    uint_fast16_t subnormalOrZero = expF32UI( uiA ) == 0;
+    bool sign = signF32UI( uiA );
+
+    return
+        (  sign && infOrNaN && fracF32UI( uiA ) == 0 )          << 0 |
+        (  sign && !infOrNaN && !subnormalOrZero )              << 1 |
+        (  sign && subnormalOrZero && fracF32UI( uiA ) )        << 2 |
+        (  sign && subnormalOrZero && fracF32UI( uiA ) == 0 )   << 3 |
+        ( !sign && infOrNaN && fracF32UI( uiA ) == 0 )          << 7 |
+        ( !sign && !infOrNaN && !subnormalOrZero )              << 6 |
+        ( !sign && subnormalOrZero && fracF32UI( uiA ) )        << 5 |
+        ( !sign && subnormalOrZero && fracF32UI( uiA ) == 0 )   << 4 |
+        ( isNaNF32UI( uiA ) &&  softfloat_isSigNaNF32UI( uiA )) << 8 |
+        ( isNaNF32UI( uiA ) && !softfloat_isSigNaNF32UI( uiA )) << 9;
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_div.c 
b/target-riscv/fpu-custom-riscv/f32_div.c
new file mode 100755
index 0000000..958b140
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_div.c
@@ -0,0 +1,96 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f32_div( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool signA;
+    int_fast16_t expA;
+    uint_fast32_t sigA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    bool signB;
+    int_fast16_t expB;
+    uint_fast32_t sigB;
+    bool signZ;
+    struct exp16_sig32 normExpSig;
+    int_fast16_t expZ;
+    uint_fast32_t sigZ;
+    uint_fast32_t uiZ;
+    union ui32_f32 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF32UI( uiA );
+    expA = expF32UI( uiA );
+    sigA = fracF32UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+    signB = signF32UI( uiB );
+    expB = expF32UI( uiB );
+    sigB = fracF32UI( uiB );
+    signZ = signA ^ signB;
+    if ( expA == 0xFF ) {
+        if ( sigA ) goto propagateNaN;
+        if ( expB == 0xFF ) {
+            if ( sigB ) goto propagateNaN;
+            goto invalid;
+        }
+        goto infinity;
+    }
+    if ( expB == 0xFF ) {
+        if ( sigB ) goto propagateNaN;
+        goto zero;
+    }
+    if ( ! expB ) {
+        if ( ! sigB ) {
+            if ( ! ( expA | sigA ) ) goto invalid;
+            softfloat_raiseFlags( softfloat_flag_infinity );
+            goto infinity;
+        }
+        normExpSig = softfloat_normSubnormalF32Sig( sigB );
+        expB = normExpSig.exp;
+        sigB = normExpSig.sig;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) goto zero;
+        normExpSig = softfloat_normSubnormalF32Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    expZ = expA - expB + 0x7D;
+    sigA = ( sigA | 0x00800000 )<<7;
+    sigB = ( sigB | 0x00800000 )<<8;
+    if ( sigB <= ( sigA + sigA ) ) {
+        ++expZ;
+        sigA >>= 1;
+    }
+    sigZ = ( (uint_fast64_t) sigA<<32 ) / sigB;
+    if ( ! ( sigZ & 0x3F ) ) {
+        sigZ |= ( (uint_fast64_t) sigB * sigZ != (uint_fast64_t) sigA<<32 );
+    }
+    return softfloat_roundPackToF32( signZ, expZ, sigZ );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+    goto uiZ;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    uiZ = defaultNaNF32UI;
+    goto uiZ;
+ infinity:
+    uiZ = packToF32UI( signZ, 0xFF, 0 );
+    goto uiZ;
+ zero:
+    uiZ = packToF32UI( signZ, 0, 0 );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_eq.c 
b/target-riscv/fpu-custom-riscv/f32_eq.c
new file mode 100755
index 0000000..8f2306b
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_eq.c
@@ -0,0 +1,34 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+bool f32_eq( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) )
+        || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) )
+    ) {
+        if (
+            softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB )
+        ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+        }
+        return false;
+    }
+    return ( uiA == uiB ) || ! (uint32_t) ( ( uiA | uiB )<<1 );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_eq_signaling.c 
b/target-riscv/fpu-custom-riscv/f32_eq_signaling.c
new file mode 100755
index 0000000..bfba48a
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_eq_signaling.c
@@ -0,0 +1,29 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+bool f32_eq_signaling( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) )
+        || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) )
+    ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        return false;
+    }
+    return ( uiA == uiB ) || ! (uint32_t) ( ( uiA | uiB )<<1 );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_isSignalingNaN.c 
b/target-riscv/fpu-custom-riscv/f32_isSignalingNaN.c
new file mode 100755
index 0000000..09aaa82
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_isSignalingNaN.c
@@ -0,0 +1,16 @@
+
+#include <stdbool.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+bool f32_isSignalingNaN( float32_t a )
+{
+    union ui32_f32 uA;
+
+    uA.f = a;
+    return softfloat_isSigNaNF32UI( uA.ui );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_le.c 
b/target-riscv/fpu-custom-riscv/f32_le.c
new file mode 100755
index 0000000..5f47be5
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_le.c
@@ -0,0 +1,34 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+bool f32_le( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    bool signA, signB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) )
+        || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) )
+    ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        return false;
+    }
+    signA = signF32UI( uiA );
+    signB = signF32UI( uiB );
+    return
+        ( signA != signB ) ? signA || ! (uint32_t) ( ( uiA | uiB )<<1 )
+            : ( uiA == uiB ) || ( signA ^ ( uiA < uiB ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_le_quiet.c 
b/target-riscv/fpu-custom-riscv/f32_le_quiet.c
new file mode 100755
index 0000000..2b541da
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_le_quiet.c
@@ -0,0 +1,39 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+bool f32_le_quiet( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    bool signA, signB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) )
+        || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) )
+    ) {
+        if (
+            softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB )
+        ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+        }
+        return false;
+    }
+    signA = signF32UI( uiA );
+    signB = signF32UI( uiB );
+    return
+        ( signA != signB ) ? signA || ! (uint32_t) ( ( uiA | uiB )<<1 )
+            : ( uiA == uiB ) || ( signA ^ ( uiA < uiB ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_lt.c 
b/target-riscv/fpu-custom-riscv/f32_lt.c
new file mode 100755
index 0000000..753b28a
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_lt.c
@@ -0,0 +1,34 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+bool f32_lt( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    bool signA, signB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) )
+        || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) )
+    ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        return false;
+    }
+    signA = signF32UI( uiA );
+    signB = signF32UI( uiB );
+    return
+        ( signA != signB ) ? signA && ( (uint32_t) ( ( uiA | uiB )<<1 ) != 0 )
+            : ( uiA != uiB ) && ( signA ^ ( uiA < uiB ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_lt_quiet.c 
b/target-riscv/fpu-custom-riscv/f32_lt_quiet.c
new file mode 100755
index 0000000..ecd90bf
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_lt_quiet.c
@@ -0,0 +1,39 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+bool f32_lt_quiet( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    bool signA, signB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) )
+        || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) )
+    ) {
+        if (
+            softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB )
+        ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+        }
+        return false;
+    }
+    signA = signF32UI( uiA );
+    signB = signF32UI( uiB );
+    return
+        ( signA != signB ) ? signA && ( (uint32_t) ( ( uiA | uiB )<<1 ) != 0 )
+            : ( uiA != uiB ) && ( signA ^ ( uiA < uiB ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_mul.c 
b/target-riscv/fpu-custom-riscv/f32_mul.c
new file mode 100755
index 0000000..d49c1dd
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_mul.c
@@ -0,0 +1,89 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f32_mul( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool signA;
+    int_fast16_t expA;
+    uint_fast32_t sigA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    bool signB;
+    int_fast16_t expB;
+    uint_fast32_t sigB;
+    bool signZ;
+    uint_fast32_t magBits;
+    struct exp16_sig32 normExpSig;
+    int_fast16_t expZ;
+    uint_fast32_t sigZ, uiZ;
+    union ui32_f32 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF32UI( uiA );
+    expA = expF32UI( uiA );
+    sigA = fracF32UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+    signB = signF32UI( uiB );
+    expB = expF32UI( uiB );
+    sigB = fracF32UI( uiB );
+    signZ = signA ^ signB;
+    if ( expA == 0xFF ) {
+        if ( sigA || ( ( expB == 0xFF ) && sigB ) ) goto propagateNaN;
+        magBits = expB | sigB;
+        goto infArg;
+    }
+    if ( expB == 0xFF ) {
+        if ( sigB ) goto propagateNaN;
+        magBits = expA | sigA;
+        goto infArg;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) goto zero;
+        normExpSig = softfloat_normSubnormalF32Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    if ( ! expB ) {
+        if ( ! sigB ) goto zero;
+        normExpSig = softfloat_normSubnormalF32Sig( sigB );
+        expB = normExpSig.exp;
+        sigB = normExpSig.sig;
+    }
+    expZ = expA + expB - 0x7F;
+    sigA = ( sigA | 0x00800000 )<<7;
+    sigB = ( sigB | 0x00800000 )<<8;
+    sigZ = softfloat_shortShift64RightJam( (uint_fast64_t) sigA * sigB, 32 );
+    if ( sigZ < 0x40000000 ) {
+        --expZ;
+        sigZ <<= 1;
+    }
+    return softfloat_roundPackToF32( signZ, expZ, sigZ );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+    goto uiZ;
+ infArg:
+    if ( ! magBits ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        uiZ = defaultNaNF32UI;
+    } else {
+        uiZ = packToF32UI( signZ, 0xFF, 0 );
+    }
+    goto uiZ;
+ zero:
+    uiZ = packToF32UI( signZ, 0, 0 );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_mulAdd.c 
b/target-riscv/fpu-custom-riscv/f32_mulAdd.c
new file mode 100755
index 0000000..3d4cee9
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_mulAdd.c
@@ -0,0 +1,25 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t f32_mulAdd( float32_t a, float32_t b, float32_t c )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    union ui32_f32 uC;
+    uint_fast32_t uiC;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    uC.f = c;
+    uiC = uC.ui;
+    return softfloat_mulAddF32( 0, uiA, uiB, uiC );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_rem.c 
b/target-riscv/fpu-custom-riscv/f32_rem.c
new file mode 100755
index 0000000..7172da8
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_rem.c
@@ -0,0 +1,124 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f32_rem( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool signA;
+    int_fast16_t expA;
+    uint_fast32_t sigA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+//    bool signB;
+    int_fast16_t expB;
+    uint_fast32_t sigB;
+    struct exp16_sig32 normExpSig;
+    int_fast16_t expDiff;
+    uint_fast32_t q;
+    uint_fast64_t sigA64, sigB64, q64;
+    uint_fast32_t alternateSigA;
+    uint32_t sigMean;
+    bool signZ;
+    uint_fast32_t uiZ;
+    union ui32_f32 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF32UI( uiA );
+    expA = expF32UI( uiA );
+    sigA = fracF32UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+//    signB = signF32UI( uiB );
+    expB = expF32UI( uiB );
+    sigB = fracF32UI( uiB );
+    if ( expA == 0xFF ) {
+        if ( sigA || ( ( expB == 0xFF ) && sigB ) ) goto propagateNaN;
+        goto invalid;
+    }
+    if ( expB == 0xFF ) {
+        if ( sigB ) goto propagateNaN;
+        return a;
+    }
+    if ( ! expB ) {
+        if ( ! sigB ) goto invalid;
+        normExpSig = softfloat_normSubnormalF32Sig( sigB );
+        expB = normExpSig.exp;
+        sigB = normExpSig.sig;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) return a;
+        normExpSig = softfloat_normSubnormalF32Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    expDiff = expA - expB;
+    sigA |= 0x00800000;
+    sigB |= 0x00800000;
+    if ( expDiff < 32 ) {
+        sigA <<= 8;
+        sigB <<= 8;
+        if ( expDiff < 0 ) {
+            if ( expDiff < -1 ) return a;
+            sigA >>= 1;
+        }
+        q = ( sigB <= sigA );
+        if ( q ) sigA -= sigB;
+        if ( 0 < expDiff ) {
+            q = ( (uint_fast64_t) sigA<<32 ) / sigB;
+            q >>= 32 - expDiff;
+            sigB >>= 2;
+            sigA = ( ( sigA>>1 )<<( expDiff - 1 ) ) - sigB * q;
+        } else {
+            sigA >>= 2;
+            sigB >>= 2;
+        }
+    } else {
+        if ( sigB <= sigA ) sigA -= sigB;
+        sigA64 = (uint_fast64_t) sigA<<40;
+        sigB64 = (uint_fast64_t) sigB<<40;
+        expDiff -= 64;
+        while ( 0 < expDiff ) {
+            q64 = softfloat_estimateDiv128To64( sigA64, 0, sigB64 );
+            q64 = ( 2 < q64 ) ? q64 - 2 : 0;
+            sigA64 = - ( ( sigB * q64 )<<38 );
+            expDiff -= 62;
+        }
+        expDiff += 64;
+        q64 = softfloat_estimateDiv128To64( sigA64, 0, sigB64 );
+        q64 = ( 2 < q64 ) ? q64 - 2 : 0;
+        q = q64>>( 64 - expDiff );
+        sigB <<= 6;
+        sigA = ( ( sigA64>>33 )<<( expDiff - 1 ) ) - sigB * q;
+    }
+    do {
+        alternateSigA = sigA;
+        ++q;
+        sigA -= sigB;
+    } while ( sigA < 0x80000000 );
+    sigMean = sigA + alternateSigA;
+    if ( ( 0x80000000 <= sigMean ) || ( ! sigMean && ( q & 1 ) ) ) {
+        sigA = alternateSigA;
+    }
+    signZ = ( 0x80000000 <= sigA );
+    if ( signZ ) sigA = - sigA;
+    return softfloat_normRoundPackToF32( signA ^ signZ, expB, sigA );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+    goto uiZ;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    uiZ = defaultNaNF32UI;
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_roundToInt.c 
b/target-riscv/fpu-custom-riscv/f32_roundToInt.c
new file mode 100755
index 0000000..f8f9114
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_roundToInt.c
@@ -0,0 +1,78 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f32_roundToInt( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    int_fast16_t expA;
+    uint_fast32_t uiZ;
+    bool signA;
+    uint_fast32_t lastBitMask, roundBitsMask;
+    union ui32_f32 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    expA = expF32UI( uiA );
+    if ( 0x96 <= expA ) {
+        if ( ( expA == 0xFF ) && fracF32UI( uiA ) ) {
+            uiZ = softfloat_propagateNaNF32UI( uiA, 0 );
+            goto uiZ;
+        }
+        return a;
+    }
+    if ( expA <= 0x7E ) {
+        if ( ! (uint32_t) ( uiA<<1 ) ) return a;
+        if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact;
+        signA = signF32UI( uiA );
+        switch ( roundingMode ) {
+         case softfloat_round_nearest_even:
+            if ( ( expA == 0x7E ) && fracF32UI( uiA ) ) {
+                uiZ = packToF32UI( signA, 0x7F, 0 );
+                goto uiZ;
+            }
+            break;
+         case softfloat_round_min:
+            uiZ = signA ? 0xBF800000 : 0;
+            goto uiZ;
+         case softfloat_round_max:
+            uiZ = signA ? 0x80000000 : 0x3F800000;
+            goto uiZ;
+         case softfloat_round_nearest_maxMag:
+            if ( expA == 0x7E ) {
+                uiZ = packToF32UI( signA, 0x7F, 0 );
+                goto uiZ;
+            }
+            break;
+        }
+        uiZ = packToF32UI( signA, 0, 0 );
+        goto uiZ;
+    }
+    lastBitMask = (uint_fast32_t) 1<<( 0x96 - expA );
+    roundBitsMask = lastBitMask - 1;
+    uiZ = uiA;
+    if ( roundingMode == softfloat_round_nearest_maxMag ) {
+        uiZ += lastBitMask>>1;
+    } else if ( roundingMode == softfloat_round_nearest_even ) {
+        uiZ += lastBitMask>>1;
+        if ( ! ( uiZ & roundBitsMask ) ) uiZ &= ~ lastBitMask;
+    } else if ( roundingMode != softfloat_round_minMag ) {
+        if ( signF32UI( uiZ ) ^ ( roundingMode == softfloat_round_max ) ) {
+            uiZ += roundBitsMask;
+        }
+    }
+    uiZ &= ~ roundBitsMask;
+    if ( exact && ( uiZ != uiA ) ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_sqrt.c 
b/target-riscv/fpu-custom-riscv/f32_sqrt.c
new file mode 100755
index 0000000..c9eb907
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_sqrt.c
@@ -0,0 +1,74 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f32_sqrt( float32_t a )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool signA;
+    int_fast16_t expA;
+    uint_fast32_t sigA, uiZ;
+    struct exp16_sig32 normExpSig;
+    int_fast16_t expZ;
+    uint_fast32_t sigZ;
+    uint_fast64_t term, rem;
+    union ui32_f32 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF32UI( uiA );
+    expA = expF32UI( uiA );
+    sigA = fracF32UI( uiA );
+    if ( expA == 0xFF ) {
+        if ( sigA ) {
+            uiZ = softfloat_propagateNaNF32UI( uiA, 0 );
+            goto uiZ;
+        }
+        if ( ! signA ) return a;
+        goto invalid;
+    }
+    if ( signA ) {
+        if ( ! ( expA | sigA ) ) return a;
+        goto invalid;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) return a;
+        normExpSig = softfloat_normSubnormalF32Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    expZ = ( ( expA - 0x7F )>>1 ) + 0x7E;
+    sigA = ( sigA | 0x00800000 )<<8;
+    sigZ = softfloat_estimateSqrt32( expA, sigA ) + 2;
+    if ( ( sigZ & 0x7F ) <= 5 ) {
+        if ( sigZ < 2 ) {
+            sigZ = 0x7FFFFFFF;
+            goto roundPack;
+        }
+        sigA >>= expA & 1;
+        term = (uint_fast64_t) sigZ * sigZ;
+        rem = ( (uint_fast64_t) sigA<<32 ) - term;
+        while ( UINT64_C( 0x8000000000000000 ) <= rem ) {
+            --sigZ;
+            rem += ( (uint_fast64_t) sigZ<<1 ) | 1;
+        }
+        sigZ |= ( rem != 0 );
+    }
+    sigZ = softfloat_shortShift32Right1Jam( sigZ );
+ roundPack:
+    return softfloat_roundPackToF32( 0, expZ, sigZ );
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    uiZ = defaultNaNF32UI;
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_sub.c 
b/target-riscv/fpu-custom-riscv/f32_sub.c
new file mode 100755
index 0000000..c64df8e
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_sub.c
@@ -0,0 +1,29 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t f32_sub( float32_t a, float32_t b )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool signA;
+    union ui32_f32 uB;
+    uint_fast32_t uiB;
+    bool signB;
+    float32_t ( *magsRoutine )( uint_fast32_t, uint_fast32_t, bool );
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF32UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+    signB = signF32UI( uiB );
+    magsRoutine =
+        ( signA == signB ) ? softfloat_subMagsF32 : softfloat_addMagsF32;
+    return magsRoutine( uiA, uiB ^ 0x80000000, signA );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_f64.c 
b/target-riscv/fpu-custom-riscv/f32_to_f64.c
new file mode 100755
index 0000000..9f0ae5c
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_f64.c
@@ -0,0 +1,47 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f32_to_f64( float32_t a )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    uint_fast64_t uiZ;
+    struct exp16_sig32 normExpSig;
+    union ui64_f64 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF32UI( uiA );
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    if ( exp == 0xFF ) {
+        uiZ =
+            sig ? softfloat_commonNaNToF64UI(
+                      softfloat_f32UIToCommonNaN( uiA ) )
+                : packToF64UI( sign, 0x7FF, 0 );
+        goto uiZ;
+    }
+    if ( ! exp ) {
+        if ( ! sig ) {
+            uiZ = packToF64UI( sign, 0, 0 );
+            goto uiZ;
+        }
+        normExpSig = softfloat_normSubnormalF32Sig( sig );
+        exp = normExpSig.exp - 1;
+        sig = normExpSig.sig;
+    }
+    uiZ = packToF64UI( sign, exp + 0x380, (uint_fast64_t) sig<<29 );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_i32.c 
b/target-riscv/fpu-custom-riscv/f32_to_i32.c
new file mode 100755
index 0000000..bbbaee0
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_i32.c
@@ -0,0 +1,34 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t f32_to_i32( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    uint_fast64_t sig64;
+    int_fast16_t shiftCount;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF32UI( uiA );
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    if ( ( exp == 0xFF ) && sig ) sign = 0;
+    if ( exp ) sig |= 0x00800000;
+    sig64 = (uint_fast64_t) sig<<32;
+    shiftCount = 0xAF - exp;
+    if ( 0 < shiftCount ) {
+        sig64 = softfloat_shift64RightJam( sig64, shiftCount );
+    }
+    return softfloat_roundPackToI32( sign, sig64, roundingMode, exact );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_i32_r_minMag.c 
b/target-riscv/fpu-custom-riscv/f32_to_i32_r_minMag.c
new file mode 100755
index 0000000..63ff1e2
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_i32_r_minMag.c
@@ -0,0 +1,45 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t f32_to_i32_r_minMag( float32_t a, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    bool sign;
+    int_fast16_t shiftCount;
+    int_fast32_t absZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    if ( exp < 0x7F ) {
+        if ( exact && ( exp | sig ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+        return 0;
+    }
+    sign = signF32UI( uiA );
+    shiftCount = 0x9E - exp;
+    if ( shiftCount <= 0 ) {
+        if ( uiA != packToF32UI( 1, 0x9E, 0 ) ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+            if ( ! sign || ( ( exp == 0xFF ) && sig ) ) return 0x7FFFFFFF;
+        }
+        return -0x7FFFFFFF - 1;
+    }
+    sig = ( sig | 0x00800000 )<<8;
+    absZ = sig>>shiftCount;
+    if ( exact && (uint32_t) ( sig<<( ( - shiftCount ) & 31 ) ) ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+    return sign ? - absZ : absZ;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_i64.c 
b/target-riscv/fpu-custom-riscv/f32_to_i64.c
new file mode 100755
index 0000000..c0b8981
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_i64.c
@@ -0,0 +1,44 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t f32_to_i64( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    int_fast16_t shiftCount;
+    uint_fast64_t sig64, extra;
+    struct uint64_extra sig64Extra;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF32UI( uiA );
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    shiftCount = 0xBE - exp;
+    if ( shiftCount < 0 ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        if ( ! sign || ( ( exp == 0xFF ) && sig ) ) {
+            return INT64_C( 0x7FFFFFFFFFFFFFFF );
+        }
+        return - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1;
+    }
+    if ( exp ) sig |= 0x00800000;
+    sig64 = (uint_fast64_t) sig<<40;
+    extra = 0;
+    if ( shiftCount ) {
+        sig64Extra = softfloat_shift64ExtraRightJam( sig64, 0, shiftCount );
+        sig64 = sig64Extra.v;
+        extra = sig64Extra.extra;
+    }
+    return softfloat_roundPackToI64( sign, sig64, extra, roundingMode, exact );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_i64_r_minMag.c 
b/target-riscv/fpu-custom-riscv/f32_to_i64_r_minMag.c
new file mode 100755
index 0000000..33bff93
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_i64_r_minMag.c
@@ -0,0 +1,52 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t f32_to_i64_r_minMag( float32_t a, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    bool sign;
+    int_fast16_t shiftCount;
+    uint_fast64_t sig64;
+    int_fast64_t absZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    if ( exp < 0x7F ) {
+        if ( exact && ( exp | sig ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+        return 0;
+    }
+    sign = signF32UI( uiA );
+    shiftCount = 0xBE - exp;
+    if ( shiftCount <= 0 ) {
+        if ( uiA != packToF32UI( 1, 0xBE, 0 ) ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+            if ( ! sign || ( ( exp == 0xFF ) && sig ) ) {
+                return INT64_C( 0x7FFFFFFFFFFFFFFF );
+            }
+        }
+        return - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1;
+    }
+    sig |= 0x00800000;
+    sig64 = (uint_fast64_t) sig<<40;
+    absZ = sig64>>shiftCount;
+    shiftCount = 40 - shiftCount;
+    if (
+        exact && ( shiftCount < 0 ) && (uint32_t) ( sig<<( shiftCount & 31 ) )
+    ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+    return sign ? - absZ : absZ;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_ui32.c 
b/target-riscv/fpu-custom-riscv/f32_to_ui32.c
new file mode 100755
index 0000000..3501db8
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_ui32.c
@@ -0,0 +1,33 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t f32_to_ui32( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    uint_fast64_t sig64;
+    int_fast16_t shiftCount;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF32UI( uiA );
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    if ( exp ) sig |= 0x00800000;
+    sig64 = (uint_fast64_t) sig<<32;
+    shiftCount = 0xAF - exp;
+    if ( 0 < shiftCount ) {
+        sig64 = softfloat_shift64RightJam( sig64, shiftCount );
+    }
+    return softfloat_roundPackToUI32( sign, sig64, roundingMode, exact );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_ui32_r_minMag.c 
b/target-riscv/fpu-custom-riscv/f32_to_ui32_r_minMag.c
new file mode 100755
index 0000000..edd858d
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_ui32_r_minMag.c
@@ -0,0 +1,41 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t f32_to_ui32_r_minMag( float32_t a, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    int_fast16_t shiftCount;
+    uint_fast32_t z;
+
+    uA.f = a;
+    uiA = uA.ui;
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    if ( exp < 0x7F ) {
+        if ( exact && ( exp | sig ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+        return 0;
+    }
+    if ( signF32UI( uiA ) ) goto invalid;
+    shiftCount = 0x9E - exp;
+    if ( shiftCount < 0 ) goto invalid;
+    sig = ( sig | 0x00800000 )<<8;
+    z = sig>>shiftCount;
+    if ( exact && ( sig & ( ( (uint_fast32_t) 1<<shiftCount ) - 1 ) ) ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+    return z;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    return 0xFFFFFFFF;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_ui64.c 
b/target-riscv/fpu-custom-riscv/f32_to_ui64.c
new file mode 100755
index 0000000..6cdcf74
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_ui64.c
@@ -0,0 +1,42 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t f32_to_ui64( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    int_fast16_t shiftCount;
+    uint_fast64_t sig64, extra;
+    struct uint64_extra sig64Extra;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF32UI( uiA );
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    shiftCount = 0xBE - exp;
+    if ( shiftCount < 0 ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+    }
+    if ( exp ) sig |= 0x00800000;
+    sig64 = (uint_fast64_t) sig<<40;
+    extra = 0;
+    if ( shiftCount ) {
+        sig64Extra = softfloat_shift64ExtraRightJam( sig64, 0, shiftCount );
+        sig64 = sig64Extra.v;
+        extra = sig64Extra.extra;
+    }
+    return
+        softfloat_roundPackToUI64( sign, sig64, extra, roundingMode, exact );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f32_to_ui64_r_minMag.c 
b/target-riscv/fpu-custom-riscv/f32_to_ui64_r_minMag.c
new file mode 100755
index 0000000..738d6b1
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f32_to_ui64_r_minMag.c
@@ -0,0 +1,45 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t f32_to_ui64_r_minMag( float32_t a, bool exact )
+{
+    union ui32_f32 uA;
+    uint_fast32_t uiA;
+    int_fast16_t exp;
+    uint_fast32_t sig;
+    int_fast16_t shiftCount;
+    uint_fast64_t sig64, z;
+
+    uA.f = a;
+    uiA = uA.ui;
+    exp = expF32UI( uiA );
+    sig = fracF32UI( uiA );
+    if ( exp < 0x7F ) {
+        if ( exact && ( exp | sig ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+        return 0;
+    }
+    if ( signF32UI( uiA ) ) goto invalid;
+    shiftCount = 0xBE - exp;
+    if ( shiftCount < 0 ) goto invalid;
+    sig |= 0x00800000;
+    sig64 = (uint_fast64_t) sig<<40;
+    z = sig64>>shiftCount;
+    shiftCount = 40 - shiftCount;
+    if (
+        exact && ( shiftCount < 0 ) && (uint32_t) ( sig<<( shiftCount & 31 ) )
+    ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+    return z;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_add.c 
b/target-riscv/fpu-custom-riscv/f64_add.c
new file mode 100755
index 0000000..9ec4b5f
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_add.c
@@ -0,0 +1,29 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t f64_add( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool signA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    bool signB;
+    float64_t ( *magsRoutine )( uint_fast64_t, uint_fast64_t, bool );
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF64UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+    signB = signF64UI( uiB );
+    magsRoutine =
+        ( signA == signB ) ? softfloat_addMagsF64 : softfloat_subMagsF64;
+    return magsRoutine( uiA, uiB, signA );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_classify.c 
b/target-riscv/fpu-custom-riscv/f64_classify.c
new file mode 100755
index 0000000..2ec124b
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_classify.c
@@ -0,0 +1,33 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+uint_fast16_t f64_classify( float64_t a )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+
+    uA.f = a;
+    uiA = uA.ui;
+
+    uint_fast16_t infOrNaN = expF64UI( uiA ) == 0x7FF;
+    uint_fast16_t subnormalOrZero = expF64UI( uiA ) == 0;
+    bool sign = signF64UI( uiA );
+
+    return
+        (  sign && infOrNaN && fracF64UI( uiA ) == 0 )          << 0 |
+        (  sign && !infOrNaN && !subnormalOrZero )              << 1 |
+        (  sign && subnormalOrZero && fracF64UI( uiA ) )        << 2 |
+        (  sign && subnormalOrZero && fracF64UI( uiA ) == 0 )   << 3 |
+        ( !sign && infOrNaN && fracF64UI( uiA ) == 0 )          << 7 |
+        ( !sign && !infOrNaN && !subnormalOrZero )              << 6 |
+        ( !sign && subnormalOrZero && fracF64UI( uiA ) )        << 5 |
+        ( !sign && subnormalOrZero && fracF64UI( uiA ) == 0 )   << 4 |
+        ( isNaNF64UI( uiA ) &&  softfloat_isSigNaNF64UI( uiA )) << 8 |
+        ( isNaNF64UI( uiA ) && !softfloat_isSigNaNF64UI( uiA )) << 9;
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_div.c 
b/target-riscv/fpu-custom-riscv/f64_div.c
new file mode 100755
index 0000000..9bc72b3
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_div.c
@@ -0,0 +1,104 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f64_div( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool signA;
+    int_fast16_t expA;
+    uint_fast64_t sigA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    bool signB;
+    int_fast16_t expB;
+    uint_fast64_t sigB;
+    bool signZ;
+    struct exp16_sig64 normExpSig;
+    int_fast16_t expZ;
+    uint_fast64_t sigZ;
+    struct uint128 term, rem;
+    uint_fast64_t uiZ;
+    union ui64_f64 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF64UI( uiA );
+    expA = expF64UI( uiA );
+    sigA = fracF64UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+    signB = signF64UI( uiB );
+    expB = expF64UI( uiB );
+    sigB = fracF64UI( uiB );
+    signZ = signA ^ signB;
+    if ( expA == 0x7FF ) {
+        if ( sigA ) goto propagateNaN;
+        if ( expB == 0x7FF ) {
+            if ( sigB ) goto propagateNaN;
+            goto invalid;
+        }
+        goto infinity;
+    }
+    if ( expB == 0x7FF ) {
+        if ( sigB ) goto propagateNaN;
+        goto zero;
+    }
+    if ( ! expB ) {
+        if ( ! sigB ) {
+            if ( ! ( expA | sigA ) ) goto invalid;
+            softfloat_raiseFlags( softfloat_flag_infinity );
+            goto infinity;
+        }
+        normExpSig = softfloat_normSubnormalF64Sig( sigB );
+        expB = normExpSig.exp;
+        sigB = normExpSig.sig;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) goto zero;
+        normExpSig = softfloat_normSubnormalF64Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    expZ = expA - expB + 0x3FD;
+    sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<10;
+    sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<11;
+    if ( sigB <= ( sigA + sigA ) ) {
+        ++expZ;
+        sigA >>= 1;
+    }
+    sigZ = softfloat_estimateDiv128To64( sigA, 0, sigB );
+    if ( ( sigZ & 0x1FF ) <= 2 ) {
+        term = softfloat_mul64To128( sigB, sigZ );
+        rem = softfloat_sub128( sigA, 0, term.v64, term.v0 );
+        while ( UINT64_C( 0x8000000000000000 ) <= rem.v64 ) {
+            --sigZ;
+            rem = softfloat_add128( rem.v64, rem.v0, 0, sigB );
+        }
+        sigZ |= ( rem.v0 != 0 );
+    }
+    return softfloat_roundPackToF64( signZ, expZ, sigZ );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF64UI( uiA, uiB );
+    goto uiZ;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    uiZ = defaultNaNF64UI;
+    goto uiZ;
+ infinity:
+    uiZ = packToF64UI( signZ, 0x7FF, 0 );
+    goto uiZ;
+ zero:
+    uiZ = packToF64UI( signZ, 0, 0 );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_eq.c 
b/target-riscv/fpu-custom-riscv/f64_eq.c
new file mode 100755
index 0000000..925aabc
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_eq.c
@@ -0,0 +1,35 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+bool f64_eq( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) )
+        || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) )
+    ) {
+        if (
+            softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB )
+        ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+        }
+        return false;
+    }
+    return
+        ( uiA == uiB ) || ! ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_eq_signaling.c 
b/target-riscv/fpu-custom-riscv/f64_eq_signaling.c
new file mode 100755
index 0000000..7a54dc1
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_eq_signaling.c
@@ -0,0 +1,30 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+bool f64_eq_signaling( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) )
+        || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) )
+    ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        return false;
+    }
+    return
+        ( uiA == uiB ) || ! ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_isSignalingNaN.c 
b/target-riscv/fpu-custom-riscv/f64_isSignalingNaN.c
new file mode 100755
index 0000000..d720ac1
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_isSignalingNaN.c
@@ -0,0 +1,16 @@
+
+#include <stdbool.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+bool f64_isSignalingNaN( float64_t a )
+{
+    union ui64_f64 uA;
+
+    uA.f = a;
+    return softfloat_isSigNaNF64UI( uA.ui );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_le.c 
b/target-riscv/fpu-custom-riscv/f64_le.c
new file mode 100755
index 0000000..e6c5caf
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_le.c
@@ -0,0 +1,35 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+bool f64_le( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    bool signA, signB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) )
+        || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) )
+    ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        return false;
+    }
+    signA = signF64UI( uiA );
+    signB = signF64UI( uiB );
+    return
+        ( signA != signB )
+            ? signA || ! ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) )
+            : ( uiA == uiB ) || ( signA ^ ( uiA < uiB ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_le_quiet.c 
b/target-riscv/fpu-custom-riscv/f64_le_quiet.c
new file mode 100755
index 0000000..e9b7ede
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_le_quiet.c
@@ -0,0 +1,40 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+bool f64_le_quiet( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    bool signA, signB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) )
+        || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) )
+    ) {
+        if (
+            softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB )
+        ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+        }
+        return false;
+    }
+    signA = signF64UI( uiA );
+    signB = signF64UI( uiB );
+    return
+        ( signA != signB )
+            ? signA || ! ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) )
+            : ( uiA == uiB ) || ( signA ^ ( uiA < uiB ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_lt.c 
b/target-riscv/fpu-custom-riscv/f64_lt.c
new file mode 100755
index 0000000..1b2f696
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_lt.c
@@ -0,0 +1,35 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+bool f64_lt( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    bool signA, signB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) )
+        || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) )
+    ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        return false;
+    }
+    signA = signF64UI( uiA );
+    signB = signF64UI( uiB );
+    return
+        ( signA != signB )
+            ? signA && ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) )
+            : ( uiA != uiB ) && ( signA ^ ( uiA < uiB ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_lt_quiet.c 
b/target-riscv/fpu-custom-riscv/f64_lt_quiet.c
new file mode 100755
index 0000000..f27e6da
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_lt_quiet.c
@@ -0,0 +1,40 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+bool f64_lt_quiet( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    bool signA, signB;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    if (
+           ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) )
+        || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) )
+    ) {
+        if (
+            softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB )
+        ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+        }
+        return false;
+    }
+    signA = signF64UI( uiA );
+    signB = signF64UI( uiB );
+    return
+        ( signA != signB )
+            ? signA && ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) )
+            : ( uiA != uiB ) && ( signA ^ ( uiA < uiB ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_mul.c 
b/target-riscv/fpu-custom-riscv/f64_mul.c
new file mode 100755
index 0000000..4b5dc4e
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_mul.c
@@ -0,0 +1,91 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f64_mul( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool signA;
+    int_fast16_t expA;
+    uint_fast64_t sigA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    bool signB;
+    int_fast16_t expB;
+    uint_fast64_t sigB;
+    bool signZ;
+    uint_fast64_t magBits;
+    struct exp16_sig64 normExpSig;
+    int_fast16_t expZ;
+    struct uint128 sigZ128;
+    uint_fast64_t sigZ, uiZ;
+    union ui64_f64 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF64UI( uiA );
+    expA = expF64UI( uiA );
+    sigA = fracF64UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+    signB = signF64UI( uiB );
+    expB = expF64UI( uiB );
+    sigB = fracF64UI( uiB );
+    signZ = signA ^ signB;
+    if ( expA == 0x7FF ) {
+        if ( sigA || ( ( expB == 0x7FF ) && sigB ) ) goto propagateNaN;
+        magBits = expB | sigB;
+        goto infArg;
+    }
+    if ( expB == 0x7FF ) {
+        if ( sigB ) goto propagateNaN;
+        magBits = expA | sigA;
+        goto infArg;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) goto zero;
+        normExpSig = softfloat_normSubnormalF64Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    if ( ! expB ) {
+        if ( ! sigB ) goto zero;
+        normExpSig = softfloat_normSubnormalF64Sig( sigB );
+        expB = normExpSig.exp;
+        sigB = normExpSig.sig;
+    }
+    expZ = expA + expB - 0x3FF;
+    sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<10;
+    sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<11;
+    sigZ128 = softfloat_mul64To128( sigA, sigB );
+    sigZ = sigZ128.v64 | ( sigZ128.v0 != 0 );
+    if ( sigZ < UINT64_C( 0x4000000000000000 ) ) {
+        --expZ;
+        sigZ <<= 1;
+    }
+    return softfloat_roundPackToF64( signZ, expZ, sigZ );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF64UI( uiA, uiB );
+    goto uiZ;
+ infArg:
+    if ( ! magBits ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        uiZ = defaultNaNF64UI;
+    } else {
+        uiZ = packToF64UI( signZ, 0x7FF, 0 );
+    }
+    goto uiZ;
+ zero:
+    uiZ = packToF64UI( signZ, 0, 0 );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_mulAdd.c 
b/target-riscv/fpu-custom-riscv/f64_mulAdd.c
new file mode 100755
index 0000000..fa1669a
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_mulAdd.c
@@ -0,0 +1,25 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t f64_mulAdd( float64_t a, float64_t b, float64_t c )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    union ui64_f64 uC;
+    uint_fast64_t uiC;
+
+    uA.f = a;
+    uiA = uA.ui;
+    uB.f = b;
+    uiB = uB.ui;
+    uC.f = c;
+    uiC = uC.ui;
+    return softfloat_mulAddF64( 0, uiA, uiB, uiC );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_rem.c 
b/target-riscv/fpu-custom-riscv/f64_rem.c
new file mode 100755
index 0000000..ffb031f
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_rem.c
@@ -0,0 +1,113 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f64_rem( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool signA;
+    int_fast16_t expA;
+    uint_fast64_t sigA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+//    bool signB;
+    int_fast16_t expB;
+    uint_fast64_t sigB;
+    struct exp16_sig64 normExpSig;
+    int_fast16_t expDiff;
+    uint_fast64_t q, alternateSigA;
+    uint64_t sigMean;
+    bool signZ;
+    uint_fast64_t uiZ;
+    union ui64_f64 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF64UI( uiA );
+    expA = expF64UI( uiA );
+    sigA = fracF64UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+//    signB = signF64UI( uiB );
+    expB = expF64UI( uiB );
+    sigB = fracF64UI( uiB );
+    if ( expA == 0x7FF ) {
+        if ( sigA || ( ( expB == 0x7FF ) && sigB ) ) goto propagateNaN;
+        goto invalid;
+    }
+    if ( expB == 0x7FF ) {
+        if ( sigB ) goto propagateNaN;
+        return a;
+    }
+    if ( ! expB ) {
+        if ( ! sigB ) goto invalid;
+        normExpSig = softfloat_normSubnormalF64Sig( sigB );
+        expB = normExpSig.exp;
+        sigB = normExpSig.sig;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) return a;
+        normExpSig = softfloat_normSubnormalF64Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    expDiff = expA - expB;
+    sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<11;
+    sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<11;
+    if ( expDiff < 0 ) {
+        if ( expDiff < -1 ) return a;
+        sigA >>= 1;
+    }
+    q = ( sigB <= sigA );
+    if ( q ) sigA -= sigB;
+    expDiff -= 64;
+    while ( 0 < expDiff ) {
+        q = softfloat_estimateDiv128To64( sigA, 0, sigB );
+        q = ( 2 < q ) ? q - 2 : 0;
+        sigA = - ( ( sigB>>2 ) * q );
+        expDiff -= 62;
+    }
+    expDiff += 64;
+    if ( 0 < expDiff ) {
+        q = softfloat_estimateDiv128To64( sigA, 0, sigB );
+        q = ( 2 < q ) ? q - 2 : 0;
+        q >>= 64 - expDiff;
+        sigB >>= 2;
+        sigA = ( ( sigA>>1 )<<( expDiff - 1 ) ) - sigB * q;
+    } else {
+        sigA >>= 2;
+        sigB >>= 2;
+    }
+    do {
+        alternateSigA = sigA;
+        ++q;
+        sigA -= sigB;
+    } while ( sigA < UINT64_C( 0x8000000000000000 ) );
+    sigMean = sigA + alternateSigA;
+    if (
+        ( UINT64_C( 0x8000000000000000 ) <= sigMean )
+            || ( ! sigMean && ( q & 1 ) )
+    ) {
+        sigA = alternateSigA;
+    }
+    signZ = ( UINT64_C( 0x8000000000000000 ) <= sigA );
+    if ( signZ ) sigA = - sigA;
+    return softfloat_normRoundPackToF64( signA ^ signZ, expB, sigA );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF64UI( uiA, uiB );
+    goto uiZ;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    uiZ = defaultNaNF64UI;
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_roundToInt.c 
b/target-riscv/fpu-custom-riscv/f64_roundToInt.c
new file mode 100755
index 0000000..ef16dfa
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_roundToInt.c
@@ -0,0 +1,80 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f64_roundToInt( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    int_fast16_t expA;
+    uint_fast64_t uiZ;
+    bool signA;
+    uint_fast64_t lastBitMask, roundBitsMask;
+    union ui64_f64 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    expA = expF64UI( uiA );
+    if ( 0x433 <= expA ) {
+        if ( ( expA == 0x7FF ) && fracF64UI( uiA ) ) {
+            uiZ = softfloat_propagateNaNF64UI( uiA, 0 );
+            goto uiZ;
+        }
+        return a;
+    }
+    if ( expA <= 0x3FE ) {
+        if ( ! ( uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) ) return a;
+        if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact;
+        signA = signF64UI( uiA );
+        switch ( roundingMode ) {
+         case softfloat_round_nearest_even:
+            if ( ( expA == 0x3FE ) && fracF64UI( uiA ) ) {
+                uiZ = packToF64UI( signA, 0x3FF, 0 );
+                goto uiZ;
+            }
+            break;
+         case softfloat_round_min:
+            uiZ = signA ? UINT64_C( 0xBFF0000000000000 ) : 0;
+            goto uiZ;
+         case softfloat_round_max:
+            uiZ =
+                signA ? UINT64_C( 0x8000000000000000 )
+                    : UINT64_C( 0x3FF0000000000000 );
+            goto uiZ;
+         case softfloat_round_nearest_maxMag:
+            if ( expA == 0x3FE ) {
+                uiZ = packToF64UI( signA, 0x3FF, 0 );
+                goto uiZ;
+            }
+            break;
+        }
+        uiZ = packToF64UI( signA, 0, 0 );
+        goto uiZ;
+    }
+    lastBitMask = (uint_fast64_t) 1<<( 0x433 - expA );
+    roundBitsMask = lastBitMask - 1;
+    uiZ = uiA;
+    if ( roundingMode == softfloat_round_nearest_maxMag ) {
+        uiZ += lastBitMask>>1;
+    } else if ( roundingMode == softfloat_round_nearest_even ) {
+        uiZ += lastBitMask>>1;
+        if ( ! ( uiZ & roundBitsMask ) ) uiZ &= ~ lastBitMask;
+    } else if ( roundingMode != softfloat_round_minMag ) {
+        if ( signF64UI( uiZ ) ^ ( roundingMode == softfloat_round_max ) ) {
+            uiZ += roundBitsMask;
+        }
+    }
+    uiZ &= ~ roundBitsMask;
+    if ( exact && ( uiZ != uiA ) ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_sqrt.c 
b/target-riscv/fpu-custom-riscv/f64_sqrt.c
new file mode 100755
index 0000000..cd91010
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_sqrt.c
@@ -0,0 +1,74 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f64_sqrt( float64_t a )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool signA;
+    int_fast16_t expA;
+    uint_fast64_t sigA, uiZ;
+    struct exp16_sig64 normExpSig;
+    int_fast16_t expZ;
+    uint_fast32_t sigZ32;
+    uint_fast64_t sigZ;
+    struct uint128 term, rem;
+    union ui64_f64 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF64UI( uiA );
+    expA = expF64UI( uiA );
+    sigA = fracF64UI( uiA );
+    if ( expA == 0x7FF ) {
+        if ( sigA ) {
+            uiZ = softfloat_propagateNaNF64UI( uiA, 0 );
+            goto uiZ;
+        }
+        if ( ! signA ) return a;
+        goto invalid;
+    }
+    if ( signA ) {
+        if ( ! ( expA | sigA ) ) return a;
+        goto invalid;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) return a;
+        normExpSig = softfloat_normSubnormalF64Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    expZ = ( ( expA - 0x3FF )>>1 ) + 0x3FE;
+    sigA |= UINT64_C( 0x0010000000000000 );
+    sigZ32 = softfloat_estimateSqrt32( expA, sigA>>21 );
+    sigA <<= 9 - ( expA & 1 );
+    sigZ =
+        softfloat_estimateDiv128To64( sigA, 0, (uint_fast64_t) sigZ32<<32 )
+            + ( (uint_fast64_t) sigZ32<<30 );
+    if ( ( sigZ & 0x1FF ) <= 5 ) {
+        term = softfloat_mul64To128( sigZ, sigZ );
+        rem = softfloat_sub128( sigA, 0, term.v64, term.v0 );
+        while ( UINT64_C( 0x8000000000000000 ) <= rem.v64 ) {
+            --sigZ;
+            rem =
+                softfloat_add128(
+                    rem.v64, rem.v0, sigZ>>63, (uint64_t) ( sigZ<<1 ) );
+        }
+        sigZ |= ( ( rem.v64 | rem.v0 ) != 0 );
+    }
+    return softfloat_roundPackToF64( 0, expZ, sigZ );
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    uiZ = defaultNaNF64UI;
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_sub.c 
b/target-riscv/fpu-custom-riscv/f64_sub.c
new file mode 100755
index 0000000..38bd574
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_sub.c
@@ -0,0 +1,29 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t f64_sub( float64_t a, float64_t b )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool signA;
+    union ui64_f64 uB;
+    uint_fast64_t uiB;
+    bool signB;
+    float64_t ( *magsRoutine )( uint_fast64_t, uint_fast64_t, bool );
+
+    uA.f = a;
+    uiA = uA.ui;
+    signA = signF64UI( uiA );
+    uB.f = b;
+    uiB = uB.ui;
+    signB = signF64UI( uiB );
+    magsRoutine =
+        ( signA == signB ) ? softfloat_subMagsF64 : softfloat_addMagsF64;
+    return magsRoutine( uiA, uiB, signA );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_f32.c 
b/target-riscv/fpu-custom-riscv/f64_to_f32.c
new file mode 100755
index 0000000..395d6c6
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_f32.c
@@ -0,0 +1,43 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f64_to_f32( float64_t a )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    uint_fast32_t uiZ, sig32;
+    union ui32_f32 uZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF64UI( uiA );
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    if ( exp == 0x7FF ) {
+        uiZ =
+            sig ? softfloat_commonNaNToF32UI(
+                      softfloat_f64UIToCommonNaN( uiA ) )
+                : packToF32UI( sign, 0xFF, 0 );
+        goto uiZ;
+    }
+    sig32 = softfloat_shortShift64RightJam( sig, 22 );
+    if ( ! ( exp | sig32 ) ) {
+        uiZ = packToF32UI( sign, 0, 0 );
+        goto uiZ;
+    }
+    return softfloat_roundPackToF32( sign, exp - 0x381, sig32 | 0x40000000 );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_i32.c 
b/target-riscv/fpu-custom-riscv/f64_to_i32.c
new file mode 100755
index 0000000..0778a86
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_i32.c
@@ -0,0 +1,30 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t f64_to_i32( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    int_fast16_t shiftCount;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF64UI( uiA );
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    if ( ( exp == 0x7FF ) && sig ) sign = 0;
+    if ( exp ) sig |= UINT64_C( 0x0010000000000000 );
+    shiftCount = 0x42C - exp;
+    if ( 0 < shiftCount ) sig = softfloat_shift64RightJam( sig, shiftCount );
+    return softfloat_roundPackToI32( sign, sig, roundingMode, exact );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_i32_r_minMag.c 
b/target-riscv/fpu-custom-riscv/f64_to_i32_r_minMag.c
new file mode 100755
index 0000000..39246c2
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_i32_r_minMag.c
@@ -0,0 +1,50 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t f64_to_i32_r_minMag( float64_t a, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    bool sign;
+    int_fast16_t shiftCount;
+    uint_fast32_t absZ;
+    union { uint32_t ui; int32_t i; } uZ;
+    int_fast32_t z;
+
+    uA.f = a;
+    uiA = uA.ui;
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    if ( exp < 0x3FF ) {
+        if ( exact && ( exp | sig ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+        return 0;
+    }
+    sign = signF64UI( uiA );
+    if ( 0x41E < exp ) {
+        if ( ( exp == 0x7FF ) && sig ) sign = 0;
+        goto invalid;
+    }
+    sig |= UINT64_C( 0x0010000000000000 );
+    shiftCount = 0x433 - exp;
+    absZ = sig>>shiftCount;
+    uZ.ui = sign ? - absZ : absZ;
+    z = uZ.i;
+    if ( ( z < 0 ) != sign ) goto invalid;
+    if ( exact && ( (uint_fast64_t) absZ<<shiftCount != sig ) ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+    return z;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    return sign ? -0x7FFFFFFF - 1 : 0x7FFFFFFF;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_i64.c 
b/target-riscv/fpu-custom-riscv/f64_to_i64.c
new file mode 100755
index 0000000..676e944
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_i64.c
@@ -0,0 +1,46 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t f64_to_i64( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    int_fast16_t shiftCount;
+    struct uint64_extra sigExtra;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF64UI( uiA );
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    if ( exp ) sig |= UINT64_C( 0x0010000000000000 );
+    shiftCount = 0x433 - exp;
+    if ( shiftCount <= 0 ) {
+        if ( 0x43E < exp ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+            return
+                ! sign
+                    || ( ( exp == 0x7FF )
+                             && fracF64UI( uiA ) )
+                    ? INT64_C( 0x7FFFFFFFFFFFFFFF )
+                    : - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1;
+        }
+        sigExtra.v = sig<<( - shiftCount );
+        sigExtra.extra = 0;
+    } else {
+        sigExtra = softfloat_shift64ExtraRightJam( sig, 0, shiftCount );
+    }
+    return
+        softfloat_roundPackToI64(
+            sign, sigExtra.v, sigExtra.extra, roundingMode, exact );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_i64_r_minMag.c 
b/target-riscv/fpu-custom-riscv/f64_to_i64_r_minMag.c
new file mode 100755
index 0000000..525705b
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_i64_r_minMag.c
@@ -0,0 +1,52 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t f64_to_i64_r_minMag( float64_t a, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    int_fast16_t shiftCount;
+    int_fast64_t absZ;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF64UI( uiA );
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    shiftCount = exp - 0x433;
+    if ( 0 <= shiftCount ) {
+        if ( 0x43E <= exp ) {
+            if ( uiA != packToF64UI( 1, 0x43E, 0 ) ) {
+                softfloat_raiseFlags( softfloat_flag_invalid );
+                if ( ! sign || ( ( exp == 0x7FF ) && sig ) ) {
+                    return INT64_C( 0x7FFFFFFFFFFFFFFF );
+                }
+            }
+            return - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1;
+        }
+        sig |= UINT64_C( 0x0010000000000000 );
+        absZ = sig<<shiftCount;
+    } else {
+        if ( exp < 0x3FF ) {
+            if ( exact && ( exp | sig ) ) {
+                softfloat_exceptionFlags |= softfloat_flag_inexact;
+            }
+            return 0;
+        }
+        sig |= UINT64_C( 0x0010000000000000 );
+        absZ = sig>>( - shiftCount );
+        if ( exact && (uint64_t) ( sig<<( shiftCount & 63 ) ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+    }
+    return sign ? - absZ : absZ;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_ui32.c 
b/target-riscv/fpu-custom-riscv/f64_to_ui32.c
new file mode 100755
index 0000000..b186605
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_ui32.c
@@ -0,0 +1,29 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t f64_to_ui32( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    int_fast16_t shiftCount;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF64UI( uiA );
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    if ( exp ) sig |= UINT64_C( 0x0010000000000000 );
+    shiftCount = 0x42C - exp;
+    if ( 0 < shiftCount ) sig = softfloat_shift64RightJam( sig, shiftCount );
+    return softfloat_roundPackToUI32( sign, sig, roundingMode, exact );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_ui32_r_minMag.c 
b/target-riscv/fpu-custom-riscv/f64_to_ui32_r_minMag.c
new file mode 100755
index 0000000..9f1dd4d
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_ui32_r_minMag.c
@@ -0,0 +1,40 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t f64_to_ui32_r_minMag( float64_t a, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    int_fast16_t shiftCount;
+    uint_fast32_t z;
+
+    uA.f = a;
+    uiA = uA.ui;
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    if ( exp < 0x3FF ) {
+        if ( exact && ( exp | sig ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+        return 0;
+    }
+    if ( signF64UI( uiA ) || ( 0x41E < exp ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        return 0xFFFFFFFF;
+    }
+    sig |= UINT64_C( 0x0010000000000000 );
+    shiftCount = 0x433 - exp;
+    z = sig>>shiftCount;
+    if ( exact && ( (uint_fast64_t) z<<shiftCount != sig ) ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_ui64.c 
b/target-riscv/fpu-custom-riscv/f64_to_ui64.c
new file mode 100755
index 0000000..9afebd7
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_ui64.c
@@ -0,0 +1,41 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t f64_to_ui64( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    bool sign;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    int_fast16_t shiftCount;
+    struct uint64_extra sigExtra;
+
+    uA.f = a;
+    uiA = uA.ui;
+    sign = signF64UI( uiA );
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    if ( exp ) sig |= UINT64_C( 0x0010000000000000 );
+    shiftCount = 0x433 - exp;
+    if ( shiftCount <= 0 ) {
+        if ( 0x43E < exp ) {
+            softfloat_raiseFlags( softfloat_flag_invalid );
+            return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+        }
+        sigExtra.v = sig<<( - shiftCount );
+        sigExtra.extra = 0;
+    } else {
+        sigExtra = softfloat_shift64ExtraRightJam( sig, 0, shiftCount );
+    }
+    return
+        softfloat_roundPackToUI64(
+            sign, sigExtra.v, sigExtra.extra, roundingMode, exact );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/f64_to_ui64_r_minMag.c 
b/target-riscv/fpu-custom-riscv/f64_to_ui64_r_minMag.c
new file mode 100755
index 0000000..a66d3ff
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/f64_to_ui64_r_minMag.c
@@ -0,0 +1,45 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t f64_to_ui64_r_minMag( float64_t a, bool exact )
+{
+    union ui64_f64 uA;
+    uint_fast64_t uiA;
+    int_fast16_t exp;
+    uint_fast64_t sig;
+    int_fast16_t shiftCount;
+    uint_fast64_t z;
+
+    uA.f = a;
+    uiA = uA.ui;
+    exp = expF64UI( uiA );
+    sig = fracF64UI( uiA );
+    if ( exp < 0x3FF ) {
+        if ( exact && ( exp | sig ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+        return 0;
+    }
+    if ( signF64UI( uiA ) ) goto invalid;
+    shiftCount = exp - 0x433;
+    if ( 0 <= shiftCount ) {
+        if ( 0x43E < exp ) goto invalid;
+        z = ( sig | UINT64_C( 0x0010000000000000 ) )<<shiftCount;
+    } else {
+        sig |= UINT64_C( 0x0010000000000000 );
+        z = sig>>( - shiftCount );
+        if ( exact && (uint64_t) ( sig<<( shiftCount & 63 ) ) ) {
+            softfloat_exceptionFlags |= softfloat_flag_inexact;
+        }
+    }
+    return z;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/i32_to_f32.c 
b/target-riscv/fpu-custom-riscv/i32_to_f32.c
new file mode 100755
index 0000000..f51facd
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/i32_to_f32.c
@@ -0,0 +1,21 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t i32_to_f32( int_fast32_t a )
+{
+    bool sign;
+    union ui32_f32 uZ;
+
+    sign = ( a < 0 );
+    if ( ! ( a & 0x7FFFFFFF ) ) {
+        uZ.ui = sign ? packToF32UI( 1, 0x9E, 0 ) : 0;
+        return uZ.f;
+    }
+    return softfloat_normRoundPackToF32( sign, 0x9C, sign ? - a : a );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/i32_to_f64.c 
b/target-riscv/fpu-custom-riscv/i32_to_f64.c
new file mode 100755
index 0000000..d42cbe8
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/i32_to_f64.c
@@ -0,0 +1,31 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t i32_to_f64( int_fast32_t a )
+{
+    uint_fast64_t uiZ;
+    bool sign;
+    uint_fast32_t absA;
+    int shiftCount;
+    union ui64_f64 uZ;
+
+    if ( ! a ) {
+        uiZ = 0;
+    } else {
+        sign = ( a < 0 );
+        absA = sign ? - a : a;
+        shiftCount = softfloat_countLeadingZeros32( absA ) + 21;
+        uiZ =
+            packToF64UI(
+                sign, 0x432 - shiftCount, (uint_fast64_t) absA<<shiftCount );
+    }
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/i64_to_f32.c 
b/target-riscv/fpu-custom-riscv/i64_to_f32.c
new file mode 100755
index 0000000..4fecbb9
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/i64_to_f32.c
@@ -0,0 +1,36 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t i64_to_f32( int_fast64_t a )
+{
+    bool sign;
+    uint_fast64_t absA;
+    int shiftCount;
+    union ui32_f32 u;
+    uint_fast32_t sig;
+
+    sign = ( a < 0 );
+    absA = sign ? - (uint_fast64_t) a : a;
+    shiftCount = softfloat_countLeadingZeros64( absA ) - 40;
+    if ( 0 <= shiftCount ) {
+        u.ui =
+            a ? packToF32UI(
+                    sign, 0x95 - shiftCount, (uint_fast32_t) absA<<shiftCount )
+                : 0;
+        return u.f;
+    } else {
+        shiftCount += 7;
+        sig =
+            ( shiftCount < 0 )
+                ? softfloat_shortShift64RightJam( absA, - shiftCount )
+                : (uint_fast32_t) absA<<shiftCount;
+        return softfloat_roundPackToF32( sign, 0x9C - shiftCount, sig );
+    }
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/i64_to_f64.c 
b/target-riscv/fpu-custom-riscv/i64_to_f64.c
new file mode 100755
index 0000000..1add960
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/i64_to_f64.c
@@ -0,0 +1,21 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t i64_to_f64( int_fast64_t a )
+{
+    bool sign;
+    union ui64_f64 uZ;
+
+    sign = ( a < 0 );
+    if ( ! ( a & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) ) {
+        uZ.ui = sign ? packToF64UI( 1, 0x43E, 0 ) : 0;
+        return uZ.f;
+    }
+    return softfloat_normRoundPackToF64( sign, 0x43C, sign ? - a : a );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/internals.h 
b/target-riscv/fpu-custom-riscv/internals.h
new file mode 100755
index 0000000..5e6fd76
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/internals.h
@@ -0,0 +1,232 @@
+
+/*** UPDATE COMMENTS. ***/
+
+#include "softfloat_types.h"
+
+union ui32_f32 { uint32_t ui; float32_t f; };
+union ui64_f64 { uint64_t ui; float64_t f; };
+#ifdef LITTLEENDIAN
+union ui128_f128 { uint64_t ui0, ui64; float128_t f; };
+#else
+union ui128_f128 { uint64_t ui64, ui0; float128_t f; };
+#endif
+
+enum {
+    softfloat_mulAdd_subC    = 1,
+    softfloat_mulAdd_subProd = 2
+};
+
+uint_fast32_t
+ softfloat_roundPackToUI32( bool, uint_fast64_t, int_fast8_t, bool );
+uint_fast64_t
+ softfloat_roundPackToUI64(
+     bool, uint_fast64_t, uint_fast64_t, int_fast8_t, bool );
+/*----------------------------------------------------------------------------
+| Takes a 64-bit fixed-point value `absZ' with binary point between bits 6
+| and 7, and returns the properly rounded 32-bit integer corresponding to the
+| input.  If `zSign' is 1, the input is negated before being converted to an
+| integer.  Bit 63 of `absZ' must be zero.  Ordinarily, the fixed-point input
+| is simply rounded to an integer, with the inexact exception raised if the
+| input cannot be represented exactly as an integer.  However, if the fixed-
+| point input is too large, the invalid exception is raised and the largest
+| positive or negative integer is returned.
+*----------------------------------------------------------------------------*/
+int_fast32_t
+ softfloat_roundPackToI32( bool, uint_fast64_t, int_fast8_t, bool );
+/*----------------------------------------------------------------------------
+| Takes the 128-bit fixed-point value formed by concatenating `absZ0' and
+| `absZ1', with binary point between bits 63 and 64 (between the input words),
+| and returns the properly rounded 64-bit integer corresponding to the input.
+| If `zSign' is 1, the input is negated before being converted to an integer.
+| Ordinarily, the fixed-point input is simply rounded to an integer, with
+| the inexact exception raised if the input cannot be represented exactly as
+| an integer.  However, if the fixed-point input is too large, the invalid
+| exception is raised and the largest positive or negative integer is
+| returned.
+*----------------------------------------------------------------------------*/
+int_fast64_t
+ softfloat_roundPackToI64(
+     bool, uint_fast64_t, uint_fast64_t, int_fast8_t, bool );
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#define isNaNF32UI( ui ) (0xFF000000<(uint32_t)((uint_fast32_t)(ui)<<1))
+/*----------------------------------------------------------------------------
+| Returns the sign bit of the single-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define signF32UI( a ) ((bool)((uint32_t)(a)>>31))
+/*----------------------------------------------------------------------------
+| Returns the exponent bits of the single-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define expF32UI( a ) ((int_fast16_t)((a)>>23)&0xFF)
+/*----------------------------------------------------------------------------
+| Returns the fraction bits of the single-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define fracF32UI( a ) ((a)&0x007FFFFF)
+/*----------------------------------------------------------------------------
+| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a
+| single-precision floating-point value, returning the result.  After being
+| shifted into the proper positions, the three fields are simply added
+| together to form the result.  This means that any integer portion of `zSig'
+| will be added into the exponent.  Since a properly normalized significand
+| will have an integer portion equal to 1, the `zExp' input should be 1 less
+| than the desired result exponent whenever `zSig' is a complete, normalized
+| significand.
+*----------------------------------------------------------------------------*/
+#define packToF32UI( sign, exp, sig ) 
(((uint32_t)(sign)<<31)+((uint32_t)(exp)<<23)+(sig))
+
+/*----------------------------------------------------------------------------
+| Normalizes the subnormal single-precision floating-point value represented
+| by the denormalized significand `aSig'.  The normalized exponent and
+| significand are stored at the locations pointed to by `zExpPtr' and
+| `zSigPtr', respectively.
+*----------------------------------------------------------------------------*/
+struct exp16_sig32 { int_fast16_t exp; uint_fast32_t sig; };
+struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand `zSig', and returns the proper single-precision floating-
+| point value corresponding to the abstract input.  Ordinarily, the abstract
+| value is simply rounded and packed into the single-precision format, with
+| the inexact exception raised if the abstract input cannot be represented
+| exactly.  However, if the abstract value is too large, the overflow and
+| inexact exceptions are raised and an infinity or maximal finite value is
+| returned.  If the abstract value is too small, the input value is rounded to
+| a subnormal number, and the underflow and inexact exceptions are raised if
+| the abstract input cannot be represented exactly as a subnormal single-
+| precision floating-point number.
+|     The input significand `zSig' has its binary point between bits 30
+| and 29, which is 7 bits to the left of the usual location.  This shifted
+| significand must be normalized or smaller.  If `zSig' is not normalized,
+| `zExp' must be 0; in that case, the result returned is a subnormal number,
+| and it must not require rounding.  In the usual case that `zSig' is
+| normalized, `zExp' must be 1 less than the ``true'' floating-point exponent.
+| The handling of underflow and overflow follows the IEC/IEEE Standard for
+| Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float32_t softfloat_roundPackToF32( bool, int_fast16_t, uint_fast32_t );
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand `zSig', and returns the proper single-precision floating-
+| point value corresponding to the abstract input.  This routine is just like
+| `roundAndPackFloat32' except that `zSig' does not have to be normalized.
+| Bit 31 of `zSig' must be zero, and `zExp' must be 1 less than the ``true''
+| floating-point exponent.
+*----------------------------------------------------------------------------*/
+float32_t softfloat_normRoundPackToF32( bool, int_fast16_t, uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| Returns the result of adding the absolute values of the single-precision
+| floating-point values `a' and `b'.  If `zSign' is 1, the sum is negated
+| before being returned.  `zSign' is ignored if the result is a NaN.
+| The addition is performed according to the IEC/IEEE Standard for Binary
+| Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float32_t softfloat_addMagsF32( uint_fast32_t, uint_fast32_t, bool );
+/*----------------------------------------------------------------------------
+| Returns the result of subtracting the absolute values of the single-
+| precision floating-point values `a' and `b'.  If `zSign' is 1, the
+| difference is negated before being returned.  `zSign' is ignored if the
+| result is a NaN.  The subtraction is performed according to the IEC/IEEE
+| Standard for Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float32_t softfloat_subMagsF32( uint_fast32_t, uint_fast32_t, bool );
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+float32_t
+ softfloat_mulAddF32( int, uint_fast32_t, uint_fast32_t, uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the double-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#define isNaNF64UI( ui ) 
(UINT64_C(0xFFE0000000000000)<(uint64_t)((uint_fast64_t)(ui)<<1))
+/*----------------------------------------------------------------------------
+| Returns the sign bit of the double-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define signF64UI( a ) ((bool)((uint64_t)(a)>>63))
+/*----------------------------------------------------------------------------
+| Returns the exponent bits of the double-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define expF64UI( a ) ((int_fast16_t)((a)>>52)&0x7FF)
+/*----------------------------------------------------------------------------
+| Returns the fraction bits of the double-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define fracF64UI( a ) ((a)&UINT64_C(0x000FFFFFFFFFFFFF))
+/*----------------------------------------------------------------------------
+| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a
+| double-precision floating-point value, returning the result.  After being
+| shifted into the proper positions, the three fields are simply added
+| together to form the result.  This means that any integer portion of `zSig'
+| will be added into the exponent.  Since a properly normalized significand
+| will have an integer portion equal to 1, the `zExp' input should be 1 less
+| than the desired result exponent whenever `zSig' is a complete, normalized
+| significand.
+*----------------------------------------------------------------------------*/
+#define packToF64UI( sign, exp, sig ) 
(((uint64_t)(sign)<<63)+((uint64_t)(exp)<<52)+(sig))
+
+/*----------------------------------------------------------------------------
+| Normalizes the subnormal double-precision floating-point value represented
+| by the denormalized significand `aSig'.  The normalized exponent and
+| significand are stored at the locations pointed to by `zExpPtr' and
+| `zSigPtr', respectively.
+*----------------------------------------------------------------------------*/
+struct exp16_sig64 { int_fast16_t exp; uint_fast64_t sig; };
+struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t );
+
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand `zSig', and returns the proper double-precision floating-
+| point value corresponding to the abstract input.  Ordinarily, the abstract
+| value is simply rounded and packed into the double-precision format, with
+| the inexact exception raised if the abstract input cannot be represented
+| exactly.  However, if the abstract value is too large, the overflow and
+| inexact exceptions are raised and an infinity or maximal finite value is
+| returned.  If the abstract value is too small, the input value is rounded
+| to a subnormal number, and the underflow and inexact exceptions are raised
+| if the abstract input cannot be represented exactly as a subnormal double-
+| precision floating-point number.
+|     The input significand `zSig' has its binary point between bits 62
+| and 61, which is 10 bits to the left of the usual location.  This shifted
+| significand must be normalized or smaller.  If `zSig' is not normalized,
+| `zExp' must be 0; in that case, the result returned is a subnormal number,
+| and it must not require rounding.  In the usual case that `zSig' is
+| normalized, `zExp' must be 1 less than the ``true'' floating-point exponent.
+| The handling of underflow and overflow follows the IEC/IEEE Standard for
+| Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float64_t softfloat_roundPackToF64( bool, int_fast16_t, uint_fast64_t );
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand `zSig', and returns the proper double-precision floating-
+| point value corresponding to the abstract input.  This routine is just like
+| `roundAndPackFloat64' except that `zSig' does not have to be normalized.
+| Bit 63 of `zSig' must be zero, and `zExp' must be 1 less than the ``true''
+| floating-point exponent.
+*----------------------------------------------------------------------------*/
+float64_t softfloat_normRoundPackToF64( bool, int_fast16_t, uint_fast64_t );
+
+/*----------------------------------------------------------------------------
+| Returns the result of adding the absolute values of the double-precision
+| floating-point values `a' and `b'.  If `zSign' is 1, the sum is negated
+| before being returned.  `zSign' is ignored if the result is a NaN.
+| The addition is performed according to the IEC/IEEE Standard for Binary
+| Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float64_t softfloat_addMagsF64( uint_fast64_t, uint_fast64_t, bool );
+/*----------------------------------------------------------------------------
+| Returns the result of subtracting the absolute values of the double-
+| precision floating-point values `a' and `b'.  If `zSign' is 1, the
+| difference is negated before being returned.  `zSign' is ignored if the
+| result is a NaN.  The subtraction is performed according to the IEC/IEEE
+| Standard for Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float64_t softfloat_subMagsF64( uint_fast64_t, uint_fast64_t, bool );
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+float64_t
+ softfloat_mulAddF64( int, uint_fast64_t, uint_fast64_t, uint_fast64_t );
+
diff --git a/target-riscv/fpu-custom-riscv/platform.h 
b/target-riscv/fpu-custom-riscv/platform.h
new file mode 100755
index 0000000..6c54313
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/platform.h
@@ -0,0 +1,42 @@
+
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define LITTLEENDIAN
+
+#ifndef UINT64_C
+# define UINT64_C(x) (x ## ULL)
+# define INT64_C(x) (x ## LL)
+#endif
diff --git a/target-riscv/fpu-custom-riscv/primitives.h 
b/target-riscv/fpu-custom-riscv/primitives.h
new file mode 100755
index 0000000..71038ea
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/primitives.h
@@ -0,0 +1,628 @@
+
+/*============================================================================
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 3.
+
+*** UPDATE
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal notice) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/*** CHANGE TO USE `fast' INTEGER TYPES? ***/
+/*** ADD 80-BIT FUNCTIONS? ***/
+
+#ifdef LITTLEENDIAN
+struct uintx80 { uint64_t v0; uint16_t v64; };
+struct uint128 { uint64_t v0, v64; };
+struct uint192 { uint64_t v0, v64, v128; };
+struct uint256 { uint64_t v0, v64, v128, v192; };
+#else
+struct uintx80 { uint16_t v64; uint64_t v0; };
+struct uint128 { uint64_t v64, v0; };
+struct uint192 { uint64_t v128, v64, v0; };
+struct uint256 { uint64_t v256, v128, v64, v0; };
+#endif
+
+struct uint64_extra { uint64_t v, extra; };
+struct uint128_extra { uint64_t v64; uint64_t v0; uint64_t extra; };
+
+
+/*** SHIFT COUNTS CANNOT BE ZERO.  MUST CHECK BEFORE CALLING! ***/
+
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1'
+| is equal to the 128-bit value formed by concatenating `b0' and `b1'.
+| Otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool
+ softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+    { return ( a64 == b64 ) && ( a0 == b0 ); }
+#else
+bool softfloat_eq128( uint64_t, uint64_t, uint64_t, uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less
+| than or equal to the 128-bit value formed by concatenating `b0' and `b1'.
+| Otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool
+ softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+    { return ( a64 < b64 ) || ( ( a64 == b64 ) && ( a0 <= b0 ) ); }
+#else
+bool softfloat_le128( uint64_t, uint64_t, uint64_t, uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less
+| than the 128-bit value formed by concatenating `b0' and `b1'.  Otherwise,
+| returns 0.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool
+ softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+    { return ( a64 < b64 ) || ( ( a64 == b64 ) && ( a0 < b0 ) ); }
+#else
+bool softfloat_lt128( uint64_t, uint64_t, uint64_t, uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the
+| number of bits given in `count'.  Any bits shifted off are lost.  The value
+| of `count' must be less than 64.  The result is broken into two 64-bit
+| pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL )
+INLINE struct uint128
+ softfloat_shortShift128Left( uint64_t a64, uint64_t a0, unsigned int count )
+{
+    struct uint128 z;
+    z.v64 = a64<<count | a0>>( ( - count ) & 63 );
+    z.v0 = a0<<count;
+    return z;
+}
+#else
+struct uint128 softfloat_shortShift128Left( uint64_t, uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left
+| by the number of bits given in `count'.  Any bits shifted off are lost.
+| The value of `count' must be less than 64.  The result is broken into three
+| 64-bit pieces which are stored at the locations pointed to by `z0Ptr',
+| `z1Ptr', and `z2Ptr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL )
+INLINE struct uint192
+ softfloat_shortShift192Left(
+     uint64_t a128, uint64_t a64, uint64_t a0, unsigned int count )
+{
+    unsigned int negCount = - count;
+    struct uint192 z;
+    z.v128 = a128<<count | a64>>( negCount & 63 );
+    z.v64 = a64<<count | a0>>( negCount & 63 );
+    z.v0 = a0<<count;
+    return z;
+}
+#else
+struct uint192
+ softfloat_shortShift192Left( uint64_t, uint64_t, uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shifts `a' right by the number of bits given in `count'.  If any nonzero
+| bits are shifted off, they are ``jammed'' into the least significant bit of
+| the result by setting the least significant bit to 1.  The value of `count'
+| can be arbitrarily large; in particular, if `count' is greater than 32, the
+| result will be either 0 or 1, depending on whether `a' is zero or nonzero.
+| The result is stored in the location pointed to by `zPtr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL )
+INLINE uint32_t softfloat_shift32RightJam( uint32_t a, unsigned int count )
+{
+    return
+        ( count < 32 )
+            ? a>>count | ( (uint32_t) ( a<<( ( - count ) & 31 ) ) != 0 )
+            : ( a != 0 );
+}
+#else
+uint32_t softfloat_shift32RightJam( uint32_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shift count is less than 32.
+*----------------------------------------------------------------------------*/
+#if defined INLINE
+INLINE uint32_t softfloat_shortShift32Right1Jam( uint32_t a )
+    { return a>>1 | ( a & 1 ); }
+#else
+uint32_t softfloat_shortShift32Right1Jam( uint32_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shifts `a' right by the number of bits given in `count'.  If any nonzero
+| bits are shifted off, they are ``jammed'' into the least significant bit of
+| the result by setting the least significant bit to 1.  The value of `count'
+| can be arbitrarily large; in particular, if `count' is greater than 64, the
+| result will be either 0 or 1, depending on whether `a' is zero or nonzero.
+| The result is stored in the location pointed to by `zPtr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL )
+INLINE uint64_t softfloat_shift64RightJam( uint64_t a, unsigned int count )
+{
+    return
+        ( count < 64 )
+            ? a>>count | ( (uint64_t) ( a<<( ( - count ) & 63 ) ) != 0 )
+            : ( a != 0 );
+}
+#else
+uint64_t softfloat_shift64RightJam( uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shift count is less than 64.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL )
+INLINE uint64_t
+ softfloat_shortShift64RightJam( uint64_t a, unsigned int count )
+    { return a>>count | ( ( a & ( ( (uint64_t) 1<<count ) - 1 ) ) != 0 ); }
+#else
+uint64_t softfloat_shortShift64RightJam( uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64
+| _plus_ the number of bits given in `count'.  The shifted result is at most
+| 64 nonzero bits; this is stored at the location pointed to by `z0Ptr'.  The
+| bits shifted off form a second 64-bit result as follows:  The _last_ bit
+| shifted off is the most-significant bit of the extra result, and the other
+| 63 bits of the extra result are all zero if and only if _all_but_the_last_
+| bits shifted off were all zero.  This extra result is stored in the location
+| pointed to by `z1Ptr'.  The value of `count' can be arbitrarily large.
+|     (This routine makes more sense if `a0' and `a1' are considered to form
+| a fixed-point value with binary point between `a0' and `a1'.  This fixed-
+| point value is shifted right by the number of bits given in `count', and
+| the integer part of the result is returned at the location pointed to by
+| `z0Ptr'.  The fractional part of the result may be slightly corrupted as
+| described above, and is returned at the location pointed to by `z1Ptr'.)
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL )
+INLINE struct uint64_extra
+ softfloat_shift64ExtraRightJam(
+     uint64_t a, uint64_t extra, unsigned int count )
+{
+    struct uint64_extra z;
+    if ( count < 64 ) {
+        z.v = a>>count;
+        z.extra = a<<( ( - count ) & 63 );
+    } else {
+        z.v = 0;
+        z.extra = ( count == 64 ) ? a : ( a != 0 );
+    }
+    z.extra |= ( extra != 0 );
+    return z;
+}
+#else
+struct uint64_extra
+ softfloat_shift64ExtraRightJam( uint64_t, uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shift count is less than 64.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL )
+INLINE struct uint64_extra
+ softfloat_shortShift64ExtraRightJam(
+     uint64_t a, uint64_t extra, unsigned int count )
+{
+    struct uint64_extra z;
+    z.v = a>>count;
+    z.extra = a<<( ( - count ) & 63 ) | ( extra != 0 );
+    return z;
+}
+#else
+struct uint64_extra
+ softfloat_shortShift64ExtraRightJam( uint64_t, uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the
+| number of bits given in `count'.  Any bits shifted off are lost.  The value
+| of `count' can be arbitrarily large; in particular, if `count' is greater
+| than 128, the result will be 0.  The result is broken into two 64-bit pieces
+| which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+*----------------------------------------------------------------------------*/
+/*----------------------------------------------------------------------------
+| Shift count is less than 64.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL )
+INLINE struct uint128
+ softfloat_shortShift128Right( uint64_t a64, uint64_t a0, unsigned int count )
+{
+    struct uint128 z;
+    z.v64 = a64>>count;
+    z.v0 = a64<<( ( - count ) & 63 ) | a0>>count;
+    return z;
+}
+#else
+struct uint128
+ softfloat_shortShift128Right( uint64_t, uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the
+| number of bits given in `count'.  If any nonzero bits are shifted off, they
+| are ``jammed'' into the least significant bit of the result by setting the
+| least significant bit to 1.  The value of `count' can be arbitrarily large;
+| in particular, if `count' is greater than 128, the result will be either
+| 0 or 1, depending on whether the concatenation of `a0' and `a1' is zero or
+| nonzero.  The result is broken into two 64-bit pieces which are stored at
+| the locations pointed to by `z0Ptr' and `z1Ptr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 4 <= INLINE_LEVEL )
+INLINE struct uint128
+ softfloat_shift128RightJam( uint64_t a64, uint64_t a0, unsigned int count )
+{
+    unsigned int negCount;
+    struct uint128 z;
+    if ( count < 64 ) {
+        negCount = - count;
+        z.v64 = a64>>( count & 63 );
+        z.v0 =
+            a64<<( negCount & 63 ) | a0>>count
+                | ( (uint64_t) ( a0<<( negCount & 63 ) ) != 0 );
+    } else {
+        z.v64 = 0;
+        z.v0 =
+            ( count < 128 )
+                ? a64>>( count & 63 )
+                      | ( ( ( a64 & ( ( (uint64_t) 1<<( count & 63 ) ) - 1 ) )
+                                | a0 )
+                              != 0 )
+                : ( ( a64 | a0 ) != 0 );
+    }
+    return z;
+}
+#else
+struct uint128
+ softfloat_shift128RightJam( uint64_t, uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' right
+| by 64 _plus_ the number of bits given in `count'.  The shifted result is
+| at most 128 nonzero bits; these are broken into two 64-bit pieces which are
+| stored at the locations pointed to by `z0Ptr' and `z1Ptr'.  The bits shifted
+| off form a third 64-bit result as follows:  The _last_ bit shifted off is
+| the most-significant bit of the extra result, and the other 63 bits of the
+| extra result are all zero if and only if _all_but_the_last_ bits shifted off
+| were all zero.  This extra result is stored in the location pointed to by
+| `z2Ptr'.  The value of `count' can be arbitrarily large.
+|     (This routine makes more sense if `a0', `a1', and `a2' are considered
+| to form a fixed-point value with binary point between `a1' and `a2'.  This
+| fixed-point value is shifted right by the number of bits given in `count',
+| and the integer part of the result is returned at the locations pointed to
+| by `z0Ptr' and `z1Ptr'.  The fractional part of the result may be slightly
+| corrupted as described above, and is returned at the location pointed to by
+| `z2Ptr'.)
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 5 <= INLINE_LEVEL )
+INLINE struct uint128_extra
+ softfloat_shift128ExtraRightJam(
+     uint64_t a64, uint64_t a0, uint64_t extra, unsigned int count )
+{
+    unsigned int negCount = - count;
+    struct uint128_extra z;
+    if ( count < 64 ) {
+        z.v64 = a64>>count;
+        z.v0 = a64<<( negCount & 63 ) | a0>>count;
+        z.extra = a0<<( negCount & 63 );
+    } else {
+        z.v64 = 0;
+        if ( count == 64 ) {
+            z.v0 = a64;
+            z.extra = a0;
+        } else {
+            extra |= a0;
+            if ( count < 128 ) {
+                z.v0 = a64>>( count & 63 );
+                z.extra = a64<<( negCount & 63 );
+            } else {
+                z.v0 = 0;
+                z.extra = ( count == 128 ) ? a64 : ( a64 != 0 );
+            }
+        }
+    }
+    z.extra |= ( extra != 0 );
+    return z;
+}
+#else
+struct uint128_extra
+ softfloat_shift128ExtraRightJam( uint64_t, uint64_t, uint64_t, unsigned int );
+#endif
+
+/*----------------------------------------------------------------------------
+| Shift count is less than 64.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL )
+INLINE struct uint128_extra
+ softfloat_shortShift128ExtraRightJam(
+     uint64_t a64, uint64_t a0, uint64_t extra, unsigned int count )
+{
+    unsigned int negCount = - count;
+    struct uint128_extra z;
+    z.v64 = a64>>count;
+    z.v0 = a64<<( negCount & 63 ) | a0>>count;
+    z.extra = a0<<( negCount & 63 ) | ( extra != 0 );
+    return z;
+}
+#else
+struct uint128_extra
+ softfloat_shortShift128ExtraRightJam(
+     uint64_t, uint64_t, uint64_t, unsigned int );
+#endif
+
+extern const uint8_t softfloat_countLeadingZeros8[ 256 ];
+
+/*----------------------------------------------------------------------------
+| Returns the number of leading 0 bits before the most-significant 1 bit of
+| `a'.  If `a' is zero, 32 is returned.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL )
+INLINE int softfloat_countLeadingZeros32( uint32_t a )
+{
+    int count = 0;
+    if ( a < 0x10000 ) {
+        count = 16;
+        a <<= 16;
+    }
+    if ( a < 0x1000000 ) {
+        count += 8;
+        a <<= 8;
+    }
+    count += softfloat_countLeadingZeros8[ a>>24 ];
+    return count;
+}
+#else
+int softfloat_countLeadingZeros32( uint32_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Returns the number of leading 0 bits before the most-significant 1 bit of
+| `a'.  If `a' is zero, 64 is returned.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 4 <= INLINE_LEVEL )
+INLINE int softfloat_countLeadingZeros64( uint64_t a )
+{
+    int count = 32;
+    uint32_t a32 = a;
+    if ( UINT64_C( 0x100000000 ) <= a ) {
+        count = 0;
+        a32 = a>>32;
+    }
+    /*------------------------------------------------------------------------
+    | From here, result is current count + count leading zeros of `a32'.
+    *------------------------------------------------------------------------*/
+    if ( a32 < 0x10000 ) {
+        count += 16;
+        a32 <<= 16;
+    }
+    if ( a32 < 0x1000000 ) {
+        count += 8;
+        a32 <<= 8;
+    }
+    count += softfloat_countLeadingZeros8[ a32>>24 ];
+    return count;
+}
+#else
+int softfloat_countLeadingZeros64( uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Adds the 128-bit value formed by concatenating `a0' and `a1' to the 128-bit
+| value formed by concatenating `b0' and `b1'.  Addition is modulo 2^128, so
+| any carry out is lost.  The result is broken into two 64-bit pieces which
+| are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL )
+INLINE struct uint128
+ softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+    struct uint128 z;
+    z.v0 = a0 + b0;
+    z.v64 = a64 + b64;
+    z.v64 += ( z.v0 < a0 );
+    return z;
+}
+#else
+struct uint128 softfloat_add128( uint64_t, uint64_t, uint64_t, uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Adds the 192-bit value formed by concatenating `a0', `a1', and `a2' to the
+| 192-bit value formed by concatenating `b0', `b1', and `b2'.  Addition is
+| modulo 2^192, so any carry out is lost.  The result is broken into three
+| 64-bit pieces which are stored at the locations pointed to by `z0Ptr',
+| `z1Ptr', and `z2Ptr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL )
+INLINE struct uint192
+ softfloat_add192(
+     uint64_t a128,
+     uint64_t a64,
+     uint64_t a0,
+     uint64_t b128,
+     uint64_t b64,
+     uint64_t b0
+ )
+{
+    struct uint192 z;
+    unsigned int carry64, carry128;
+    z.v0 = a0 + b0;
+    carry64 = ( z.v0 < a0 );
+    z.v64 = a64 + b64;
+    carry128 = ( z.v64 < a64 );
+    z.v128 = a128 + b128;
+    z.v64 += carry64;
+    carry128 += ( z.v64 < carry64 );
+    z.v128 += carry128;
+    return z;
+}
+#else
+struct uint192
+ softfloat_add192(
+     uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the
+| 128-bit value formed by concatenating `a0' and `a1'.  Subtraction is modulo
+| 2^128, so any borrow out (carry out) is lost.  The result is broken into two
+| 64-bit pieces which are stored at the locations pointed to by `z0Ptr' and
+| `z1Ptr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL )
+INLINE struct uint128
+ softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+    struct uint128 z;
+    z.v0 = a0 - b0;
+    z.v64 = a64 - b64;
+    z.v64 -= ( a0 < b0 );
+    return z;
+}
+#else
+struct uint128 softfloat_sub128( uint64_t, uint64_t, uint64_t, uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2'
+| from the 192-bit value formed by concatenating `a0', `a1', and `a2'.
+| Subtraction is modulo 2^192, so any borrow out (carry out) is lost.  The
+| result is broken into three 64-bit pieces which are stored at the locations
+| pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL )
+INLINE struct uint192
+ softfloat_sub192(
+     uint64_t a128,
+     uint64_t a64,
+     uint64_t a0,
+     uint64_t b128,
+     uint64_t b64,
+     uint64_t b0
+ )
+{
+    struct uint192 z;
+    unsigned int borrow64, borrow128;
+    z.v0 = a0 - b0;
+    borrow64 = ( a0 < b0 );
+    z.v64 = a64 - b64;
+    borrow128 = ( a64 < b64 );
+    z.v128 = a128 - b128;
+    borrow128 += ( z.v64 < borrow64 );
+    z.v64 -= borrow64;
+    z.v128 -= borrow128;
+    return z;
+}
+#else
+struct uint192
+ softfloat_sub192(
+     uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Multiplies `a' by `b' to obtain a 128-bit product.  The product is broken
+| into two 64-bit pieces which are stored at the locations pointed to by
+| `z0Ptr' and `z1Ptr'.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 4 <= INLINE_LEVEL )
+INLINE struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b )
+{
+    uint32_t a32 = a>>32;
+    uint32_t a0 = a;
+    uint32_t b32 = b>>32;
+    uint32_t b0 = b;
+    struct uint128 z;
+    uint64_t mid1, mid2, mid;
+    z.v0 = (uint64_t) a0 * b0;
+    mid1 = (uint64_t) a32 * b0;
+    mid2 = (uint64_t) a0 * b32;
+    z.v64 = (uint64_t) a32 * b32;
+    mid = mid1 + mid2;
+    z.v64 += ( (uint64_t) ( mid < mid1 ) )<<32 | mid>>32;
+    mid <<= 32;
+    z.v0 += mid;
+    z.v64 += ( z.v0 < mid );
+    return z;
+}
+#else
+struct uint128 softfloat_mul64To128( uint64_t, uint64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+| Multiplies the 128-bit value formed by concatenating `a0' and `a1' by
+| `b' to obtain a 192-bit product.  The product is broken into three 64-bit
+| pieces which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and
+| `z2Ptr'.
+*----------------------------------------------------------------------------*/
+struct uint192 softfloat_mul128By64To192( uint64_t, uint64_t, uint64_t );
+/*----------------------------------------------------------------------------
+| Multiplies the 128-bit value formed by concatenating `a0' and `a1' to the
+| 128-bit value formed by concatenating `b0' and `b1' to obtain a 256-bit
+| product.  The product is broken into four 64-bit pieces which are stored at
+| the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
+*----------------------------------------------------------------------------*/
+struct uint256 softfloat_mul128To256( uint64_t, uint64_t, uint64_t, uint64_t );
+
+/*----------------------------------------------------------------------------
+| Returns an approximation to the 64-bit integer quotient obtained by dividing
+| `b' into the 128-bit value formed by concatenating `a0' and `a1'.  The
+| divisor `b' must be at least 2^63.  If q is the exact quotient truncated
+| toward zero, the approximation returned lies between q and q + 2 inclusive.
+| If the exact quotient q is larger than 64 bits, the maximum positive 64-bit
+| unsigned integer is returned.
+*----------------------------------------------------------------------------*/
+uint64_t softfloat_estimateDiv128To64( uint64_t, uint64_t, uint64_t );
+
+/*----------------------------------------------------------------------------
+| Returns an approximation to the square root of the 32-bit significand given
+| by `a'.  Considered as an integer, `a' must be at least 2^31.  If bit 0 of
+| `aExp' (the least significant bit) is 1, the integer returned approximates
+| 2^31*sqrt(`a'/2^31), where `a' is considered an integer.  If bit 0 of `aExp'
+| is 0, the integer returned approximates 2^31*sqrt(`a'/2^30).  In either
+| case, the approximation returned lies strictly within +/-2 of the exact
+| value.
+*----------------------------------------------------------------------------*/
+uint32_t softfloat_estimateSqrt32( unsigned int, uint32_t );
+
diff --git a/target-riscv/fpu-custom-riscv/s_add128.c 
b/target-riscv/fpu-custom-riscv/s_add128.c
new file mode 100755
index 0000000..59c0348
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_add128.c
@@ -0,0 +1,17 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+    struct uint128 z;
+
+    z.v0 = a0 + b0;
+    z.v64 = a64 + b64;
+    z.v64 += ( z.v0 < a0 );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_add192.c 
b/target-riscv/fpu-custom-riscv/s_add192.c
new file mode 100755
index 0000000..543eb5d
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_add192.c
@@ -0,0 +1,30 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint192
+ softfloat_add192(
+     uint64_t a128,
+     uint64_t a64,
+     uint64_t a0,
+     uint64_t b128,
+     uint64_t b64,
+     uint64_t b0
+ )
+{
+    struct uint192 z;
+    unsigned int carry64, carry128;
+
+    z.v0 = a0 + b0;
+    carry64 = ( z.v0 < a0 );
+    z.v64 = a64 + b64;
+    carry128 = ( z.v64 < a64 );
+    z.v128 = a128 + b128;
+    z.v64 += carry64;
+    carry128 += ( z.v64 < carry64 );
+    z.v128 += carry128;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_addMagsF32.c 
b/target-riscv/fpu-custom-riscv/s_addMagsF32.c
new file mode 100755
index 0000000..f361e2b
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_addMagsF32.c
@@ -0,0 +1,75 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+
+float32_t
+ softfloat_addMagsF32( uint_fast32_t uiA, uint_fast32_t uiB, bool signZ )
+{
+    int_fast16_t expA;
+    uint_fast32_t sigA;
+    int_fast16_t expB;
+    uint_fast32_t sigB;
+    int_fast16_t expDiff;
+    uint_fast32_t uiZ;
+    int_fast16_t expZ;
+    uint_fast32_t sigZ;
+    union ui32_f32 uZ;
+
+    expA = expF32UI( uiA );
+    sigA = fracF32UI( uiA );
+    expB = expF32UI( uiB );
+    sigB = fracF32UI( uiB );
+    expDiff = expA - expB;
+    sigA <<= 6;
+    sigB <<= 6;
+    if ( ! expDiff ) {
+        if ( expA == 0xFF ) {
+            if ( sigA | sigB ) goto propagateNaN;
+            uiZ = uiA;
+            goto uiZ;
+        }
+        if ( ! expA ) {
+            uiZ = packToF32UI( signZ, 0, ( uiA + uiB ) & 0x7FFFFFFF );
+            goto uiZ;
+        }
+        expZ = expA;
+        sigZ = 0x40000000 + sigA + sigB;
+    } else {
+        if ( expDiff < 0 ) {
+            if ( expB == 0xFF ) {
+                if ( sigB ) goto propagateNaN;
+                uiZ = packToF32UI( signZ, 0xFF, 0 );
+                goto uiZ;
+            }
+            expZ = expB;
+            sigA += expA ? 0x20000000 : sigA;
+            sigA = softfloat_shift32RightJam( sigA, - expDiff );
+        } else {
+            if ( expA == 0xFF ) {
+                if ( sigA ) goto propagateNaN;
+                uiZ = uiA;
+                goto uiZ;
+            }
+            expZ = expA;
+            sigB += expB ? 0x20000000 : sigB;
+            sigB = softfloat_shift32RightJam( sigB, expDiff );
+        }
+        sigZ = 0x20000000 + sigA + sigB;
+        if ( sigZ < 0x40000000 ) {
+            --expZ;
+            sigZ <<= 1;
+        }
+    }
+    return softfloat_roundPackToF32( signZ, expZ, sigZ );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_addMagsF64.c 
b/target-riscv/fpu-custom-riscv/s_addMagsF64.c
new file mode 100755
index 0000000..a81c3e4
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_addMagsF64.c
@@ -0,0 +1,77 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+
+float64_t
+ softfloat_addMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ )
+{
+    int_fast16_t expA;
+    uint_fast64_t sigA;
+    int_fast16_t expB;
+    uint_fast64_t sigB;
+    int_fast16_t expDiff;
+    uint_fast64_t uiZ;
+    int_fast16_t expZ;
+    uint_fast64_t sigZ;
+    union ui64_f64 uZ;
+
+    expA = expF64UI( uiA );
+    sigA = fracF64UI( uiA );
+    expB = expF64UI( uiB );
+    sigB = fracF64UI( uiB );
+    expDiff = expA - expB;
+    sigA <<= 9;
+    sigB <<= 9;
+    if ( ! expDiff ) {
+        if ( expA == 0x7FF ) {
+            if ( sigA | sigB ) goto propagateNaN;
+            uiZ = uiA;
+            goto uiZ;
+        }
+        if ( ! expA ) {
+            uiZ =
+                packToF64UI(
+                    signZ, 0, ( uiA + uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) );
+            goto uiZ;
+        }
+        expZ = expA;
+        sigZ = UINT64_C( 0x4000000000000000 ) + sigA + sigB;
+    } else {
+        if ( expDiff < 0 ) {
+            if ( expB == 0x7FF ) {
+                if ( sigB ) goto propagateNaN;
+                uiZ = packToF64UI( signZ, 0x7FF, 0 );
+                goto uiZ;
+            }
+            expZ = expB;
+            sigA += expA ? UINT64_C( 0x2000000000000000 ) : sigA;
+            sigA = softfloat_shift64RightJam( sigA, - expDiff );
+        } else {
+            if ( expA == 0x7FF ) {
+                if ( sigA ) goto propagateNaN;
+                uiZ = uiA;
+                goto uiZ;
+            }
+            expZ = expA;
+            sigB += expB ? UINT64_C( 0x2000000000000000 ) : sigB;
+            sigB = softfloat_shift64RightJam( sigB, expDiff );
+        }
+        sigZ = UINT64_C( 0x2000000000000000 ) + sigA + sigB;
+        if ( sigZ < UINT64_C( 0x4000000000000000 ) ) {
+            --expZ;
+            sigZ <<= 1;
+        }
+    }
+    return softfloat_roundPackToF64( signZ, expZ, sigZ );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF64UI( uiA, uiB );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_commonNaNToF32UI.c 
b/target-riscv/fpu-custom-riscv/s_commonNaNToF32UI.c
new file mode 100755
index 0000000..e16950c
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_commonNaNToF32UI.c
@@ -0,0 +1,17 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the single-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN a )
+{
+
+    return defaultNaNF32UI;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_commonNaNToF64UI.c 
b/target-riscv/fpu-custom-riscv/s_commonNaNToF64UI.c
new file mode 100755
index 0000000..0555e1f
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_commonNaNToF64UI.c
@@ -0,0 +1,17 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the double-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+uint_fast64_t softfloat_commonNaNToF64UI( struct commonNaN a )
+{
+
+    return defaultNaNF64UI;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_countLeadingZeros32.c 
b/target-riscv/fpu-custom-riscv/s_countLeadingZeros32.c
new file mode 100755
index 0000000..0bd17e1
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_countLeadingZeros32.c
@@ -0,0 +1,22 @@
+
+#include <stdint.h>
+#include "primitives.h"
+
+int softfloat_countLeadingZeros32( uint32_t a )
+{
+    int count;
+
+    count = 0;
+    if ( a < 0x10000 ) {
+        count = 16;
+        a <<= 16;
+    }
+    if ( a < 0x1000000 ) {
+        count += 8;
+        a <<= 8;
+    }
+    count += softfloat_countLeadingZeros8[ a>>24 ];
+    return count;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_countLeadingZeros64.c 
b/target-riscv/fpu-custom-riscv/s_countLeadingZeros64.c
new file mode 100755
index 0000000..79f4280
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_countLeadingZeros64.c
@@ -0,0 +1,32 @@
+
+#include <stdint.h>
+#include "primitives.h"
+#include "platform.h"
+
+int softfloat_countLeadingZeros64( uint64_t a )
+{
+    int count;
+    uint32_t a32;
+
+    count = 32;
+    a32 = a;
+    if ( UINT64_C( 0x100000000 ) <= a ) {
+        count = 0;
+        a32 = a>>32;
+    }
+    /*------------------------------------------------------------------------
+    | From here, result is current count + count leading zeros of `a32'.
+    *------------------------------------------------------------------------*/
+    if ( a32 < 0x10000 ) {
+        count += 16;
+        a32 <<= 16;
+    }
+    if ( a32 < 0x1000000 ) {
+        count += 8;
+        a32 <<= 8;
+    }
+    count += softfloat_countLeadingZeros8[ a32>>24 ];
+    return count;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_countLeadingZeros8.c 
b/target-riscv/fpu-custom-riscv/s_countLeadingZeros8.c
new file mode 100755
index 0000000..4eca7e9
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_countLeadingZeros8.c
@@ -0,0 +1,24 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+const uint8_t softfloat_countLeadingZeros8[ 256 ] = {
+    8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
diff --git a/target-riscv/fpu-custom-riscv/s_eq128.c 
b/target-riscv/fpu-custom-riscv/s_eq128.c
new file mode 100755
index 0000000..7261dc4
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_eq128.c
@@ -0,0 +1,13 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+bool softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+
+    return ( a64 == b64 ) && ( a0 == b0 );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_estimateDiv128To64.c 
b/target-riscv/fpu-custom-riscv/s_estimateDiv128To64.c
new file mode 100755
index 0000000..f8610a2
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_estimateDiv128To64.c
@@ -0,0 +1,28 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+uint64_t softfloat_estimateDiv128To64( uint64_t a64, uint64_t a0, uint64_t b )
+{
+    uint32_t b32;
+    uint64_t z;
+    struct uint128 term, rem;
+    uint64_t rem32;
+
+    if ( b <= a64 ) return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+    b32 = b>>32;
+    z = ( (uint64_t) b32<<32 <= a64 ) ? UINT64_C( 0xFFFFFFFF00000000 )
+            : ( a64 / b32 )<<32;
+    term = softfloat_mul64To128( b, z );
+    rem = softfloat_sub128( a64, a0, term.v64, term.v0 );
+    while ( UINT64_C( 0x8000000000000000 ) <= rem.v64 ) {
+        z -= UINT64_C( 0x100000000 );
+        rem = softfloat_add128( rem.v64, rem.v0, b32, (uint64_t) ( b<<32 ) );
+    }
+    rem32 = ( rem.v64<<32 ) | ( rem.v0>>32 );
+    z |= ( (uint64_t) b32<<32 <= rem32 ) ? 0xFFFFFFFF : rem32 / b32;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_estimateSqrt32.c 
b/target-riscv/fpu-custom-riscv/s_estimateSqrt32.c
new file mode 100755
index 0000000..e22a9dc
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_estimateSqrt32.c
@@ -0,0 +1,37 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+uint32_t softfloat_estimateSqrt32( unsigned int expA, uint32_t a )
+{
+    static const uint16_t sqrtOddAdjustments[] = {
+        0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0,
+        0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67
+    };
+    static const uint16_t sqrtEvenAdjustments[] = {
+        0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E,
+        0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002
+    };
+    int index;
+    uint32_t z;
+    union { uint32_t ui; int32_t i; } u32;
+
+    index = ( a>>27 ) & 15;
+    if ( expA & 1 ) {
+        z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ];
+        z = ( ( a / z )<<14 ) + ( z<<15 );
+        a >>= 1;
+    } else {
+        z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ];
+        z = a / z + z;
+        z = ( 0x20000 <= z ) ? 0xFFFF8000 : z<<15;
+        if ( z <= a ) {
+            u32.ui = a;
+            return u32.i>>1;
+        }
+    }
+    return (uint32_t) ( ( (uint64_t) a<<31 ) / z ) + ( z>>1 );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_f32UIToCommonNaN.c 
b/target-riscv/fpu-custom-riscv/s_f32UIToCommonNaN.c
new file mode 100755
index 0000000..9ee0db9
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_f32UIToCommonNaN.c
@@ -0,0 +1,25 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the single-precision floating-point NaN
+| `a' to the canonical NaN format.  If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f32UIToCommonNaN( uint_fast32_t uiA )
+{
+    struct commonNaN z;
+
+    if ( softfloat_isSigNaNF32UI( uiA ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    z.sign = uiA>>31;
+    z.v64 = (uint_fast64_t) 0x7FFFF <<41;
+    z.v0 = 0;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_f64UIToCommonNaN.c 
b/target-riscv/fpu-custom-riscv/s_f64UIToCommonNaN.c
new file mode 100755
index 0000000..84d8ca0
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_f64UIToCommonNaN.c
@@ -0,0 +1,25 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the double-precision floating-point NaN
+| `a' to the canonical NaN format.  If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f64UIToCommonNaN( uint_fast64_t uiA )
+{
+    struct commonNaN z;
+
+    if ( softfloat_isSigNaNF64UI( uiA ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    z.sign = uiA>>63;
+    z.v64 = (uint_fast64_t) 0xFFFFFFFFFFFFF <<12;
+    z.v0 = 0;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_isSigNaNF32UI.c 
b/target-riscv/fpu-custom-riscv/s_isSigNaNF32UI.c
new file mode 100755
index 0000000..0a9c33f
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_isSigNaNF32UI.c
@@ -0,0 +1,13 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+bool softfloat_isSigNaNF32UI( uint_fast32_t ui )
+{
+
+    return ( ( ui>>22 & 0x1FF ) == 0x1FE ) && ( ui & 0x003FFFFF );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_isSigNaNF64UI.c 
b/target-riscv/fpu-custom-riscv/s_isSigNaNF64UI.c
new file mode 100755
index 0000000..d255213
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_isSigNaNF64UI.c
@@ -0,0 +1,15 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+bool softfloat_isSigNaNF64UI( uint_fast64_t ui )
+{
+
+    return
+        ( ( ui>>51 & 0xFFF ) == 0xFFE )
+            && ( ui & UINT64_C( 0x0007FFFFFFFFFFFF ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_le128.c 
b/target-riscv/fpu-custom-riscv/s_le128.c
new file mode 100755
index 0000000..83b1d7f
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_le128.c
@@ -0,0 +1,13 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+bool softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+
+    return ( a64 < b64 ) || ( ( a64 == b64 ) && ( a0 <= b0 ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_lt128.c 
b/target-riscv/fpu-custom-riscv/s_lt128.c
new file mode 100755
index 0000000..33a3df4
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_lt128.c
@@ -0,0 +1,13 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+bool softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+
+    return ( a64 < b64 ) || ( ( a64 == b64 ) && ( a0 < b0 ) );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_mul128By64To192.c 
b/target-riscv/fpu-custom-riscv/s_mul128By64To192.c
new file mode 100755
index 0000000..dfa8825
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_mul128By64To192.c
@@ -0,0 +1,20 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint192
+ softfloat_mul128By64To192( uint64_t a64, uint64_t a0, uint64_t b )
+{
+    struct uint128 p0, p64;
+    struct uint192 z;
+
+    p0 = softfloat_mul64To128( a0, b );
+    z.v0 = p0.v0;
+    p64 = softfloat_mul64To128( a64, b );
+    z.v64 = p64.v0 + p0.v64;
+    z.v128 = p64.v64 + ( z.v64 < p64.v0 );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_mul128To256.c 
b/target-riscv/fpu-custom-riscv/s_mul128To256.c
new file mode 100755
index 0000000..a96cd94
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_mul128To256.c
@@ -0,0 +1,28 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint256
+ softfloat_mul128To256( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+    struct uint128 p0, p64, p128;
+    struct uint256 z;
+
+    p0 = softfloat_mul64To128( a0, b0 );
+    z.v0 = p0.v0;
+    p64 = softfloat_mul64To128( a64, b0 );
+    z.v64 = p64.v0 + p0.v64;
+    z.v128 = p64.v64 + ( z.v64 < p64.v0 );
+    p128 = softfloat_mul64To128( a64, b64 );
+    z.v128 += p128.v0;
+    z.v192 = p128.v64 + ( z.v128 < p128.v0 );
+    p64 = softfloat_mul64To128( a0, b64 );
+    z.v64 += p64.v0;
+    p64.v64 += ( z.v64 < p64.v0 );
+    z.v128 += p64.v64;
+    z.v192 += ( z.v128 < p64.v64 );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_mul64To128.c 
b/target-riscv/fpu-custom-riscv/s_mul64To128.c
new file mode 100755
index 0000000..c17780b
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_mul64To128.c
@@ -0,0 +1,28 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b )
+{
+    uint32_t a32, a0, b32, b0;
+    struct uint128 z;
+    uint64_t mid1, mid2, mid;
+
+    a32 = a>>32;
+    a0 = a;
+    b32 = b>>32;
+    b0 = b;
+    z.v0 = (uint64_t) a0 * b0;
+    mid1 = (uint64_t) a32 * b0;
+    mid2 = (uint64_t) a0 * b32;
+    z.v64 = (uint64_t) a32 * b32;
+    mid = mid1 + mid2;
+    z.v64 += ( (uint64_t) ( mid < mid1 ) )<<32 | mid>>32;
+    mid <<= 32;
+    z.v0 += mid;
+    z.v64 += ( z.v0 < mid );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_mulAddF32.c 
b/target-riscv/fpu-custom-riscv/s_mulAddF32.c
new file mode 100755
index 0000000..5479728
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_mulAddF32.c
@@ -0,0 +1,171 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t
+ softfloat_mulAddF32(
+     int op, uint_fast32_t uiA, uint_fast32_t uiB, uint_fast32_t uiC )
+{
+    bool signA;
+    int_fast16_t expA;
+    uint_fast32_t sigA;
+    bool signB;
+    int_fast16_t expB;
+    uint_fast32_t sigB;
+    bool signC;
+    int_fast16_t expC;
+    uint_fast32_t sigC;
+    bool signProd;
+    uint_fast32_t magBits, uiZ;
+    struct exp16_sig32 normExpSig;
+    int_fast16_t expProd;
+    uint_fast64_t sigProd;
+    bool signZ;
+    int_fast16_t expZ;
+    uint_fast32_t sigZ;
+    int_fast16_t expDiff;
+    uint_fast64_t sigZ64, sigC64;
+    int shiftCount;
+    union ui32_f32 uZ;
+
+    signA = signF32UI( uiA );
+    expA = expF32UI( uiA );
+    sigA = fracF32UI( uiA );
+    signB = signF32UI( uiB );
+    expB = expF32UI( uiB );
+    sigB = fracF32UI( uiB );
+    signC = signF32UI( uiC ) ^ ( op == softfloat_mulAdd_subC );
+    expC = expF32UI( uiC );
+    sigC = fracF32UI( uiC );
+    signProd = signA ^ signB ^ ( op == softfloat_mulAdd_subProd );
+    if ( expA == 0xFF ) {
+        if ( sigA || ( ( expB == 0xFF ) && sigB ) ) goto propagateNaN_ABC;
+        magBits = expB | sigB;
+        goto infProdArg;
+    }
+    if ( expB == 0xFF ) {
+        if ( sigB ) goto propagateNaN_ABC;
+        magBits = expA | sigA;
+        goto infProdArg;
+    }
+    if ( expC == 0xFF ) {
+        if ( sigC ) {
+            uiZ = 0;
+            goto propagateNaN_ZC;
+        }
+        uiZ = uiC;
+        goto uiZ;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) goto zeroProd;
+        normExpSig = softfloat_normSubnormalF32Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    if ( ! expB ) {
+        if ( ! sigB ) goto zeroProd;
+        normExpSig = softfloat_normSubnormalF32Sig( sigB );
+        expB = normExpSig.exp;
+        sigB = normExpSig.sig;
+    }
+    expProd = expA + expB - 0x7E;
+    sigA = ( sigA | 0x00800000 )<<7;
+    sigB = ( sigB | 0x00800000 )<<7;
+    sigProd = (uint_fast64_t) sigA * sigB;
+    if ( sigProd < UINT64_C( 0x2000000000000000 ) ) {
+        --expProd;
+        sigProd <<= 1;
+    }
+    signZ = signProd;
+    if ( ! expC ) {
+        if ( ! sigC ) {
+            expZ = expProd - 1;
+            sigZ = softfloat_shortShift64RightJam( sigProd, 31 );
+            goto roundPack;
+        }
+        normExpSig = softfloat_normSubnormalF32Sig( sigC );
+        expC = normExpSig.exp;
+        sigC = normExpSig.sig;
+    }
+    sigC = ( sigC | 0x00800000 )<<6;
+    expDiff = expProd - expC;
+    if ( signProd == signC ) {
+        if ( expDiff <= 0 ) {
+            expZ = expC;
+            sigZ = sigC + softfloat_shift64RightJam( sigProd, 32 - expDiff );
+        } else {
+            expZ = expProd;
+            sigZ64 =
+                sigProd
+                    + softfloat_shift64RightJam(
+                          (uint_fast64_t) sigC<<32, expDiff );
+            sigZ = softfloat_shortShift64RightJam( sigZ64, 32 );
+        }
+        if ( sigZ < 0x40000000 ) {
+            --expZ;
+            sigZ <<= 1;
+        }
+    } else {
+/*** OPTIMIZE BETTER? ***/
+        sigC64 = (uint_fast64_t) sigC<<32;
+        if ( expDiff < 0 ) {
+            signZ = signC;
+            expZ = expC;
+            sigZ64 = sigC64 - softfloat_shift64RightJam( sigProd, - expDiff );
+        } else if ( ! expDiff ) {
+            expZ = expProd;
+            sigZ64 = sigProd - sigC64;
+            if ( ! sigZ64 ) goto completeCancellation;
+            if ( sigZ64 & UINT64_C( 0x8000000000000000 ) ) {
+                signZ ^= 1;
+                sigZ64 = - sigZ64;
+            }
+        } else {
+            expZ = expProd;
+            sigZ64 = sigProd - softfloat_shift64RightJam( sigC64, expDiff );
+        }
+        shiftCount = softfloat_countLeadingZeros64( sigZ64 ) - 1;
+        expZ -= shiftCount;
+        shiftCount -= 32;
+        if ( shiftCount < 0 ) {
+            sigZ = softfloat_shortShift64RightJam( sigZ64, - shiftCount );
+        } else {
+            sigZ = (uint_fast32_t) sigZ64<<shiftCount;
+        }
+    }
+ roundPack:
+    return softfloat_roundPackToF32( signZ, expZ, sigZ );
+ propagateNaN_ABC:
+    uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+    goto propagateNaN_ZC;
+ infProdArg:
+    if ( magBits ) {
+        uiZ = packToF32UI( signProd, 0xFF, 0 );
+        if ( expC != 0xFF ) goto uiZ;
+        if ( sigC ) goto propagateNaN_ZC;
+        if ( signProd == signC ) goto uiZ;
+    }
+// invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    uiZ = defaultNaNF32UI;
+ propagateNaN_ZC:
+    uiZ = softfloat_propagateNaNF32UI( uiZ, uiC );
+    goto uiZ;
+ zeroProd:
+    uiZ = uiC;
+    if ( ! ( expC | sigC ) && ( signProd != signC ) ) {
+ completeCancellation:
+        uiZ =
+            packToF32UI( softfloat_roundingMode == softfloat_round_min, 0, 0 );
+    }
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_mulAddF64.c 
b/target-riscv/fpu-custom-riscv/s_mulAddF64.c
new file mode 100755
index 0000000..6e06422
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_mulAddF64.c
@@ -0,0 +1,188 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t
+ softfloat_mulAddF64(
+     int op, uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC )
+{
+    bool signA;
+    int_fast16_t expA;
+    uint_fast64_t sigA;
+    bool signB;
+    int_fast16_t expB;
+    uint_fast64_t sigB;
+    bool signC;
+    int_fast16_t expC;
+    uint_fast64_t sigC;
+    bool signProd;
+    uint_fast64_t magBits, uiZ;
+    struct exp16_sig64 normExpSig;
+    int_fast16_t expProd;
+    struct uint128 sigProd;
+    bool signZ;
+    int_fast16_t expZ;
+    uint_fast64_t sigZ;
+    int_fast16_t expDiff;
+    struct uint128 sigC128, sigZ128;
+    int shiftCount;
+    union ui64_f64 uZ;
+
+    signA = signF64UI( uiA );
+    expA = expF64UI( uiA );
+    sigA = fracF64UI( uiA );
+    signB = signF64UI( uiB );
+    expB = expF64UI( uiB );
+    sigB = fracF64UI( uiB );
+    signC = signF64UI( uiC ) ^ ( op == softfloat_mulAdd_subC );
+    expC = expF64UI( uiC );
+    sigC = fracF64UI( uiC );
+    signProd = signA ^ signB ^ ( op == softfloat_mulAdd_subProd );
+    if ( expA == 0x7FF ) {
+        if ( sigA || ( ( expB == 0x7FF ) && sigB ) ) goto propagateNaN_ABC;
+        magBits = expB | sigB;
+        goto infProdArg;
+    }
+    if ( expB == 0x7FF ) {
+        if ( sigB ) goto propagateNaN_ABC;
+        magBits = expA | sigA;
+        goto infProdArg;
+    }
+    if ( expC == 0x7FF ) {
+        if ( sigC ) {
+            uiZ = 0;
+            goto propagateNaN_ZC;
+        }
+        uiZ = uiC;
+        goto uiZ;
+    }
+    if ( ! expA ) {
+        if ( ! sigA ) goto zeroProd;
+        normExpSig = softfloat_normSubnormalF64Sig( sigA );
+        expA = normExpSig.exp;
+        sigA = normExpSig.sig;
+    }
+    if ( ! expB ) {
+        if ( ! sigB ) goto zeroProd;
+        normExpSig = softfloat_normSubnormalF64Sig( sigB );
+        expB = normExpSig.exp;
+        sigB = normExpSig.sig;
+    }
+    expProd = expA + expB - 0x3FE;
+    sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<10;
+    sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<10;
+    sigProd = softfloat_mul64To128( sigA, sigB );
+    if ( sigProd.v64 < UINT64_C( 0x2000000000000000 ) ) {
+        --expProd;
+        sigProd = softfloat_shortShift128Left( sigProd.v64, sigProd.v0, 1 );
+    }
+    signZ = signProd;
+    if ( ! expC ) {
+        if ( ! sigC ) {
+            expZ = expProd - 1;
+            sigZ = sigProd.v64<<1 | ( sigProd.v0 != 0 );
+            goto roundPack;
+        }
+        normExpSig = softfloat_normSubnormalF64Sig( sigC );
+        expC = normExpSig.exp;
+        sigC = normExpSig.sig;
+    }
+    sigC = ( sigC | UINT64_C( 0x0010000000000000 ) )<<9;
+    expDiff = expProd - expC;
+    if ( signProd == signC ) {
+        if ( expDiff <= 0 ) {
+            expZ = expC;
+            if ( expDiff ) {
+                sigProd.v64 =
+                    softfloat_shift64RightJam( sigProd.v64, - expDiff );
+            }
+            sigZ = ( sigC + sigProd.v64 ) | ( sigProd.v0 != 0 );
+        } else {
+            expZ = expProd;
+            sigC128 = softfloat_shift128RightJam( sigC, 0, expDiff );
+            sigZ128 =
+                softfloat_add128(
+                    sigProd.v64, sigProd.v0, sigC128.v64, sigC128.v0 );
+            sigZ = sigZ128.v64 | ( sigZ128.v0 != 0 );
+        }
+        if ( sigZ < UINT64_C( 0x4000000000000000 ) ) {
+            --expZ;
+            sigZ <<= 1;
+        }
+    } else {
+/*** OPTIMIZE BETTER? ***/
+        if ( expDiff < 0 ) {
+            signZ = signC;
+            expZ = expC;
+            sigProd =
+                softfloat_shift128RightJam(
+                    sigProd.v64, sigProd.v0, - expDiff );
+            sigZ128 = softfloat_sub128( sigC, 0, sigProd.v64, sigProd.v0 );
+        } else if ( ! expDiff ) {
+            expZ = expProd;
+            sigZ128 = softfloat_sub128( sigProd.v64, sigProd.v0, sigC, 0 );
+            if ( ! ( sigZ128.v64 | sigZ128.v0 ) ) goto completeCancellation;
+            if ( sigZ128.v64 & UINT64_C( 0x8000000000000000 ) ) {
+                signZ ^= 1;
+                sigZ128 = softfloat_sub128( 0, 0, sigZ128.v64, sigZ128.v0 );
+            }
+        } else {
+            expZ = expProd;
+            sigC128 = softfloat_shift128RightJam( sigC, 0, expDiff );
+            sigZ128 =
+                softfloat_sub128(
+                    sigProd.v64, sigProd.v0, sigC128.v64, sigC128.v0 );
+        }
+        if ( ! sigZ128.v64 ) {
+            expZ -= 64;
+            sigZ128.v64 = sigZ128.v0;
+            sigZ128.v0 = 0;
+        }
+        shiftCount = softfloat_countLeadingZeros64( sigZ128.v64 ) - 1;
+        expZ -= shiftCount;
+        if ( shiftCount < 0 ) {
+            sigZ = softfloat_shortShift64RightJam( sigZ128.v64, - shiftCount );
+        } else {
+            sigZ128 =
+                softfloat_shortShift128Left(
+                    sigZ128.v64, sigZ128.v0, shiftCount );
+            sigZ = sigZ128.v64;
+        }
+        sigZ |= ( sigZ128.v0 != 0 );
+    }
+ roundPack:
+    return softfloat_roundPackToF64( signZ, expZ, sigZ );
+ propagateNaN_ABC:
+    uiZ = softfloat_propagateNaNF64UI( uiA, uiB );
+    goto propagateNaN_ZC;
+ infProdArg:
+    if ( magBits ) {
+        uiZ = packToF64UI( signProd, 0x7FF, 0 );
+        if ( expC != 0x7FF ) goto uiZ;
+        if ( sigC ) goto propagateNaN_ZC;
+        if ( signProd == signC ) goto uiZ;
+    }
+// invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    uiZ = defaultNaNF64UI;
+ propagateNaN_ZC:
+    uiZ = softfloat_propagateNaNF64UI( uiZ, uiC );
+    goto uiZ;
+ zeroProd:
+    uiZ = uiC;
+    if ( ! ( expC | sigC ) && ( signProd != signC ) ) {
+ completeCancellation:
+        uiZ =
+            packToF64UI( softfloat_roundingMode == softfloat_round_min, 0, 0 );
+    }
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_normRoundPackToF32.c 
b/target-riscv/fpu-custom-riscv/s_normRoundPackToF32.c
new file mode 100755
index 0000000..2e6f4b0
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_normRoundPackToF32.c
@@ -0,0 +1,24 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+
+float32_t
+ softfloat_normRoundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig )
+{
+    int shiftCount;
+    union ui32_f32 uZ;
+
+    shiftCount = softfloat_countLeadingZeros32( sig ) - 1;
+    exp -= shiftCount;
+    if ( ( 7 <= shiftCount ) && ( (uint16_t) exp < 0xFD ) ) {
+        uZ.ui = packToF32UI( sign, sig ? exp : 0, sig<<( shiftCount - 7 ) );
+        return uZ.f;
+    } else {
+        return softfloat_roundPackToF32( sign, exp, sig<<shiftCount );
+    }
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_normRoundPackToF64.c 
b/target-riscv/fpu-custom-riscv/s_normRoundPackToF64.c
new file mode 100755
index 0000000..64dced4
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_normRoundPackToF64.c
@@ -0,0 +1,24 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+
+float64_t
+ softfloat_normRoundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig )
+{
+    int shiftCount;
+    union ui64_f64 uZ;
+
+    shiftCount = softfloat_countLeadingZeros64( sig ) - 1;
+    exp -= shiftCount;
+    if ( ( 10 <= shiftCount ) && ( (uint16_t) exp < 0x7FD ) ) {
+        uZ.ui = packToF64UI( sign, sig ? exp : 0, sig<<( shiftCount - 10 ) );
+        return uZ.f;
+    } else {
+        return softfloat_roundPackToF64( sign, exp, sig<<shiftCount );
+    }
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_normSubnormalF32Sig.c 
b/target-riscv/fpu-custom-riscv/s_normSubnormalF32Sig.c
new file mode 100755
index 0000000..b98eb86
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_normSubnormalF32Sig.c
@@ -0,0 +1,18 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+
+struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t sig )
+{
+    int shiftCount;
+    struct exp16_sig32 z;
+
+    shiftCount = softfloat_countLeadingZeros32( sig ) - 8;
+    z.exp = 1 - shiftCount;
+    z.sig = sig<<shiftCount;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_normSubnormalF64Sig.c 
b/target-riscv/fpu-custom-riscv/s_normSubnormalF64Sig.c
new file mode 100755
index 0000000..45a7c9e
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_normSubnormalF64Sig.c
@@ -0,0 +1,18 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+
+struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t sig )
+{
+    int shiftCount;
+    struct exp16_sig64 z;
+
+    shiftCount = softfloat_countLeadingZeros64( sig ) - 11;
+    z.exp = 1 - shiftCount;
+    z.sig = sig<<shiftCount;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_propagateNaNF32UI.c 
b/target-riscv/fpu-custom-riscv/s_propagateNaNF32UI.c
new file mode 100755
index 0000000..d8738d1
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_propagateNaNF32UI.c
@@ -0,0 +1,25 @@
+
+/*** UPDATE COMMENTS. ***/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Takes two single-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+uint_fast32_t
+ softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB )
+{
+    if ( softfloat_isSigNaNF32UI( uiA ) | softfloat_isSigNaNF32UI( uiB ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    return defaultNaNF32UI;
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_propagateNaNF64UI.c 
b/target-riscv/fpu-custom-riscv/s_propagateNaNF64UI.c
new file mode 100755
index 0000000..871989d
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_propagateNaNF64UI.c
@@ -0,0 +1,25 @@
+
+/*** UPDATE COMMENTS. ***/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Takes two double-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+uint_fast64_t
+ softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB )
+{
+    if ( softfloat_isSigNaNF64UI( uiA ) | softfloat_isSigNaNF64UI( uiB ) ) {
+        softfloat_raiseFlags( softfloat_flag_invalid );
+    }
+    return defaultNaNF64UI;
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_roundPackToF32.c 
b/target-riscv/fpu-custom-riscv/s_roundPackToF32.c
new file mode 100755
index 0000000..11764f1
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_roundPackToF32.c
@@ -0,0 +1,65 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t
+ softfloat_roundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig )
+{
+    int roundingMode;
+    bool roundNearestEven;
+    int roundIncrement, roundBits;
+    bool isTiny;
+    uint_fast32_t uiZ;
+    union ui32_f32 uZ;
+
+    roundingMode = softfloat_roundingMode;
+    roundNearestEven = ( roundingMode == softfloat_round_nearest_even );
+    roundIncrement = 0x40;
+    if (
+           ! roundNearestEven
+        && ( roundingMode != softfloat_round_nearest_maxMag )
+    ) {
+        roundIncrement =
+               ( roundingMode == softfloat_round_minMag )
+            || ( roundingMode
+                     == ( sign ? softfloat_round_max : softfloat_round_min ) )
+                ? 0
+                : 0x7F;
+    }
+    roundBits = sig & 0x7F;
+    if ( 0xFD <= (uint16_t) exp ) {
+        if ( exp < 0 ) {
+            isTiny =
+                   ( softfloat_detectTininess
+                         == softfloat_tininess_beforeRounding )
+                || ( exp < -1 )
+                || ( sig + roundIncrement < 0x80000000 );
+            sig = softfloat_shift32RightJam( sig, - exp );
+            exp = 0;
+            roundBits = sig & 0x7F;
+            if ( isTiny && roundBits ) {
+                softfloat_raiseFlags( softfloat_flag_underflow );
+            }
+        } else if (
+            ( 0xFD < exp ) || ( 0x80000000 <= sig + roundIncrement )
+        ) {
+            softfloat_raiseFlags(
+                softfloat_flag_overflow | softfloat_flag_inexact );
+            uiZ = packToF32UI( sign, 0xFF, 0 ) - ! roundIncrement;
+            goto uiZ;
+        }
+    }
+    if ( roundBits ) softfloat_exceptionFlags |= softfloat_flag_inexact;
+    sig = ( sig + roundIncrement )>>7;
+    sig &= ~ ( ! ( roundBits ^ 0x40 ) & roundNearestEven );
+    uiZ = packToF32UI( sign, sig ? exp : 0, sig );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_roundPackToF64.c 
b/target-riscv/fpu-custom-riscv/s_roundPackToF64.c
new file mode 100755
index 0000000..fb0ef1d
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_roundPackToF64.c
@@ -0,0 +1,66 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t
+ softfloat_roundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig )
+{
+    int roundingMode;
+    bool roundNearestEven;
+    int roundIncrement, roundBits;
+    bool isTiny;
+    uint_fast64_t uiZ;
+    union ui64_f64 uZ;
+
+    roundingMode = softfloat_roundingMode;
+    roundNearestEven = ( roundingMode == softfloat_round_nearest_even );
+    roundIncrement = 0x200;
+    if (
+           ! roundNearestEven
+        && ( roundingMode != softfloat_round_nearest_maxMag )
+    ) {
+        roundIncrement =
+               ( roundingMode == softfloat_round_minMag )
+            || ( roundingMode
+                     == ( sign ? softfloat_round_max : softfloat_round_min ) )
+                ? 0
+                : 0x3FF;
+    }
+    roundBits = sig & 0x3FF;
+    if ( 0x7FD <= (uint16_t) exp ) {
+        if ( exp < 0 ) {
+            isTiny =
+                   ( softfloat_detectTininess
+                         == softfloat_tininess_beforeRounding )
+                || ( exp < -1 )
+                || ( sig + roundIncrement < UINT64_C( 0x8000000000000000 ) );
+            sig = softfloat_shift64RightJam( sig, - exp );
+            exp = 0;
+            roundBits = sig & 0x3FF;
+            if ( isTiny && roundBits ) {
+                softfloat_raiseFlags( softfloat_flag_underflow );
+            }
+        } else if (
+            ( 0x7FD < exp )
+                || ( UINT64_C( 0x8000000000000000 ) <= sig + roundIncrement )
+        ) {
+            softfloat_raiseFlags(
+                softfloat_flag_overflow | softfloat_flag_inexact );
+            uiZ = packToF64UI( sign, 0x7FF, 0 ) - ! roundIncrement;
+            goto uiZ;
+        }
+    }
+    if ( roundBits ) softfloat_exceptionFlags |= softfloat_flag_inexact;
+    sig = ( sig + roundIncrement )>>10;
+    sig &= ~ ( ! ( roundBits ^ 0x200 ) & roundNearestEven );
+    uiZ = packToF64UI( sign, sig ? exp : 0, sig );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_roundPackToI32.c 
b/target-riscv/fpu-custom-riscv/s_roundPackToI32.c
new file mode 100755
index 0000000..1c91497
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_roundPackToI32.c
@@ -0,0 +1,48 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t
+ softfloat_roundPackToI32(
+     bool sign, uint_fast64_t sig, int_fast8_t roundingMode, bool exact )
+{
+    bool roundNearestEven;
+    int roundIncrement, roundBits;
+    uint_fast32_t sig32;
+    union { uint32_t ui; int32_t i; } uZ;
+    int_fast32_t z;
+
+    roundNearestEven = ( roundingMode == softfloat_round_nearest_even );
+    roundIncrement = 0x40;
+    if (
+           ! roundNearestEven
+        && ( roundingMode != softfloat_round_nearest_maxMag )
+    ) {
+        roundIncrement =
+               ( roundingMode == softfloat_round_minMag )
+            || ( roundingMode
+                     == ( sign ? softfloat_round_max : softfloat_round_min ) )
+                ? 0
+                : 0x7F;
+    }
+    roundBits = sig & 0x7F;
+    sig += roundIncrement;
+    if ( sig & UINT64_C( 0xFFFFFF8000000000 ) ) goto invalid;
+    sig32 = sig>>7;
+    sig32 &= ~ ( ! ( roundBits ^ 0x40 ) & roundNearestEven );
+    uZ.ui = sign ? - sig32 : sig32;
+    z = uZ.i;
+    if ( z && ( ( z < 0 ) ^ sign ) ) goto invalid;
+    if ( exact && roundBits ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+    return z;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    return sign ? -0x7FFFFFFF - 1 : 0x7FFFFFFF;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_roundPackToI64.c 
b/target-riscv/fpu-custom-riscv/s_roundPackToI64.c
new file mode 100755
index 0000000..b2f5d63
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_roundPackToI64.c
@@ -0,0 +1,52 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t
+ softfloat_roundPackToI64(
+     bool sign,
+     uint_fast64_t sig64,
+     uint_fast64_t sig0,
+     int_fast8_t roundingMode,
+     bool exact
+ )
+{
+    bool roundNearestEven, increment;
+    union { uint64_t ui; int64_t i; } uZ;
+    int_fast64_t z;
+
+    roundNearestEven = ( roundingMode == softfloat_round_nearest_even );
+    increment = ( UINT64_C( 0x8000000000000000 ) <= sig0 );
+    if (
+           ! roundNearestEven
+        && ( roundingMode != softfloat_round_nearest_maxMag )
+    ) {
+        increment =
+               ( roundingMode != softfloat_round_minMag )
+            && ( roundingMode
+                     == ( sign ? softfloat_round_min : softfloat_round_max ) )
+            && sig0;
+    }
+    if ( increment ) {
+        ++sig64;
+        if ( ! sig64 ) goto invalid;
+        sig64 &=
+            ~ ( ! ( sig0 & UINT64_C( 0x7FFFFFFFFFFFFFFF ) )
+                    & roundNearestEven );
+    }
+    uZ.ui = sign ? - sig64 : sig64;
+    z = uZ.i;
+    if ( z && ( ( z < 0 ) ^ sign ) ) goto invalid;
+    if ( exact && sig0 ) softfloat_exceptionFlags |= softfloat_flag_inexact;
+    return z;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    return
+        sign ? - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1
+            : INT64_C( 0x7FFFFFFFFFFFFFFF );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_roundPackToUI32.c 
b/target-riscv/fpu-custom-riscv/s_roundPackToUI32.c
new file mode 100755
index 0000000..ab44ec7
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_roundPackToUI32.c
@@ -0,0 +1,44 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t
+ softfloat_roundPackToUI32(
+     bool sign, uint_fast64_t sig, int_fast8_t roundingMode, bool exact )
+{
+    bool roundNearestEven;
+    int roundIncrement, roundBits;
+    uint_fast32_t z;
+
+    roundNearestEven = ( roundingMode == softfloat_round_nearest_even );
+    roundIncrement = 0x40;
+    if (
+           ! roundNearestEven
+        && ( roundingMode != softfloat_round_nearest_maxMag )
+    ) {
+        roundIncrement =
+               ( roundingMode == softfloat_round_minMag )
+            || ( roundingMode
+                     == ( sign ? softfloat_round_max : softfloat_round_min ) )
+                ? 0
+                : 0x7F;
+    }
+    roundBits = sig & 0x7F;
+    sig += roundIncrement;
+    if ( sig & UINT64_C( 0xFFFFFF8000000000 ) ) goto invalid;
+    z = sig>>7;
+    z &= ~ ( ! ( roundBits ^ 0x40 ) & roundNearestEven );
+    if ( sign && z ) goto invalid;
+    if ( exact && roundBits ) {
+        softfloat_exceptionFlags |= softfloat_flag_inexact;
+    }
+    return z;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    return 0xFFFFFFFF;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_roundPackToUI64.c 
b/target-riscv/fpu-custom-riscv/s_roundPackToUI64.c
new file mode 100755
index 0000000..d42266f
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_roundPackToUI64.c
@@ -0,0 +1,46 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t
+ softfloat_roundPackToUI64(
+     bool sign,
+     uint_fast64_t sig64,
+     uint_fast64_t sig0,
+     int_fast8_t roundingMode,
+     bool exact
+ )
+{
+    bool roundNearestEven, increment;
+
+    roundNearestEven = ( roundingMode == softfloat_round_nearest_even );
+    increment = ( UINT64_C( 0x8000000000000000 ) <= sig0 );
+    if (
+           ! roundNearestEven
+        && ( roundingMode != softfloat_round_nearest_maxMag )
+    ) {
+        increment =
+               ( roundingMode != softfloat_round_minMag )
+            && ( roundingMode
+                     == ( sign ? softfloat_round_min : softfloat_round_max ) )
+            && sig0;
+    }
+    if ( increment ) {
+        ++sig64;
+        if ( ! sig64 ) goto invalid;
+        sig64 &=
+            ~ ( ! ( sig0 & UINT64_C( 0x7FFFFFFFFFFFFFFF ) )
+                    & roundNearestEven );
+    }
+    if ( sign && sig64 ) goto invalid;
+    if ( exact && sig0 ) softfloat_exceptionFlags |= softfloat_flag_inexact;
+    return sig64;
+ invalid:
+    softfloat_raiseFlags( softfloat_flag_invalid );
+    return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shift128ExtraRightJam.c 
b/target-riscv/fpu-custom-riscv/s_shift128ExtraRightJam.c
new file mode 100755
index 0000000..6c57974
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shift128ExtraRightJam.c
@@ -0,0 +1,38 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128_extra
+ softfloat_shift128ExtraRightJam(
+     uint64_t a64, uint64_t a0, uint64_t extra, unsigned int count )
+{
+    unsigned int negCount;
+    struct uint128_extra z;
+
+    negCount = - count;
+    if ( count < 64 ) {
+        z.v64 = a64>>count;
+        z.v0 = a64<<( negCount & 63 ) | a0>>count;
+        z.extra = a0<<( negCount & 63 );
+    } else {
+        z.v64 = 0;
+        if ( count == 64 ) {
+            z.v0 = a64;
+            z.extra = a0;
+        } else {
+            extra |= a0;
+            if ( count < 128 ) {
+                z.v0 = a64>>( count & 63 );
+                z.extra = a64<<( negCount & 63 );
+            } else {
+                z.v0 = 0;
+                z.extra = ( count == 128 ) ? a64 : ( a64 != 0 );
+            }
+        }
+    }
+    z.extra |= ( extra != 0 );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shift128RightJam.c 
b/target-riscv/fpu-custom-riscv/s_shift128RightJam.c
new file mode 100755
index 0000000..5a4e188
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shift128RightJam.c
@@ -0,0 +1,31 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_shift128RightJam( uint64_t a64, uint64_t a0, unsigned int count )
+{
+    unsigned int negCount;
+    struct uint128 z;
+
+    if ( count < 64 ) {
+        negCount = - count;
+        z.v64 = a64>>( count & 63 );
+        z.v0 =
+            a64<<( negCount & 63 ) | a0>>count
+                | ( (uint64_t) ( a0<<( negCount & 63 ) ) != 0 );
+    } else {
+        z.v64 = 0;
+        z.v0 =
+            ( count < 128 )
+                ? a64>>( count & 63 )
+                      | ( ( ( a64 & ( ( (uint64_t) 1<<( count & 63 ) ) - 1 ) )
+                                | a0 )
+                              != 0 )
+                : ( ( a64 | a0 ) != 0 );
+    }
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shift32RightJam.c 
b/target-riscv/fpu-custom-riscv/s_shift32RightJam.c
new file mode 100755
index 0000000..b697a34
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shift32RightJam.c
@@ -0,0 +1,15 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+uint32_t softfloat_shift32RightJam( uint32_t a, unsigned int count )
+{
+
+    return
+        ( count < 32 )
+            ? a>>count | ( (uint32_t) ( a<<( ( - count ) & 31 ) ) != 0 )
+            : ( a != 0 );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shift64ExtraRightJam.c 
b/target-riscv/fpu-custom-riscv/s_shift64ExtraRightJam.c
new file mode 100755
index 0000000..167ea54
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shift64ExtraRightJam.c
@@ -0,0 +1,23 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint64_extra
+ softfloat_shift64ExtraRightJam(
+     uint64_t a, uint64_t extra, unsigned int count )
+{
+    struct uint64_extra z;
+
+    if ( count < 64 ) {
+        z.v = a>>count;
+        z.extra = a<<( ( - count ) & 63 );
+    } else {
+        z.v = 0;
+        z.extra = ( count == 64 ) ? a : ( a != 0 );
+    }
+    z.extra |= ( extra != 0 );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shift64RightJam.c 
b/target-riscv/fpu-custom-riscv/s_shift64RightJam.c
new file mode 100755
index 0000000..ebebb61
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shift64RightJam.c
@@ -0,0 +1,15 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+uint64_t softfloat_shift64RightJam( uint64_t a, unsigned int count )
+{
+
+    return
+        ( count < 64 )
+            ? a>>count | ( (uint64_t) ( a<<( ( - count ) & 63 ) ) != 0 )
+            : ( a != 0 );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shortShift128ExtraRightJam.c 
b/target-riscv/fpu-custom-riscv/s_shortShift128ExtraRightJam.c
new file mode 100755
index 0000000..c772740
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shortShift128ExtraRightJam.c
@@ -0,0 +1,20 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128_extra
+ softfloat_shortShift128ExtraRightJam(
+     uint64_t a64, uint64_t a0, uint64_t extra, unsigned int count )
+{
+    unsigned int negCount;
+    struct uint128_extra z;
+
+    negCount = - count;
+    z.v64 = a64>>count;
+    z.v0 = a64<<( negCount & 63 ) | a0>>count;
+    z.extra = a0<<( negCount & 63 ) | ( extra != 0 );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shortShift128Left.c 
b/target-riscv/fpu-custom-riscv/s_shortShift128Left.c
new file mode 100755
index 0000000..9c29988
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shortShift128Left.c
@@ -0,0 +1,16 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_shortShift128Left( uint64_t a64, uint64_t a0, unsigned int count )
+{
+    struct uint128 z;
+
+    z.v64 = a64<<count | a0>>( ( - count ) & 63 );
+    z.v0 = a0<<count;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shortShift128Right.c 
b/target-riscv/fpu-custom-riscv/s_shortShift128Right.c
new file mode 100755
index 0000000..f7f4ce8
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shortShift128Right.c
@@ -0,0 +1,16 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_shortShift128Right( uint64_t a64, uint64_t a0, unsigned int count )
+{
+    struct uint128 z;
+
+    z.v64 = a64>>count;
+    z.v0 = a64<<( ( - count ) & 63 ) | a0>>count;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shortShift192Left.c 
b/target-riscv/fpu-custom-riscv/s_shortShift192Left.c
new file mode 100755
index 0000000..cf1e55d
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shortShift192Left.c
@@ -0,0 +1,20 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint192
+ softfloat_shortShift192Left(
+     uint64_t a128, uint64_t a64, uint64_t a0, unsigned int count )
+{
+    unsigned int negCount;
+    struct uint192 z;
+
+    negCount = - count;
+    z.v128 = a128<<count | a64>>( negCount & 63 );
+    z.v64 = a64<<count | a0>>( negCount & 63 );
+    z.v0 = a0<<count;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shortShift32Right1Jam.c 
b/target-riscv/fpu-custom-riscv/s_shortShift32Right1Jam.c
new file mode 100755
index 0000000..db4c304
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shortShift32Right1Jam.c
@@ -0,0 +1,12 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+uint32_t softfloat_shortShift32Right1Jam( uint32_t a )
+{
+
+    return a>>1 | ( a & 1 );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shortShift64ExtraRightJam.c 
b/target-riscv/fpu-custom-riscv/s_shortShift64ExtraRightJam.c
new file mode 100755
index 0000000..b861c67
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shortShift64ExtraRightJam.c
@@ -0,0 +1,17 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint64_extra
+ softfloat_shortShift64ExtraRightJam(
+     uint64_t a, uint64_t extra, unsigned int count )
+{
+    struct uint64_extra z;
+
+    z.v = a>>count;
+    z.extra = a<<( ( - count ) & 63 ) | ( extra != 0 );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_shortShift64RightJam.c 
b/target-riscv/fpu-custom-riscv/s_shortShift64RightJam.c
new file mode 100755
index 0000000..0da6c93
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_shortShift64RightJam.c
@@ -0,0 +1,12 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+uint64_t softfloat_shortShift64RightJam( uint64_t a, unsigned int count )
+{
+
+    return a>>count | ( ( a & ( ( (uint64_t) 1<<count ) - 1 ) ) != 0 );
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_sub128.c 
b/target-riscv/fpu-custom-riscv/s_sub128.c
new file mode 100755
index 0000000..0c4f181
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_sub128.c
@@ -0,0 +1,17 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+    struct uint128 z;
+
+    z.v0 = a0 - b0;
+    z.v64 = a64 - b64;
+    z.v64 -= ( a0 < b0 );
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_sub192.c 
b/target-riscv/fpu-custom-riscv/s_sub192.c
new file mode 100755
index 0000000..96f21c9
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_sub192.c
@@ -0,0 +1,30 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint192
+ softfloat_sub192(
+     uint64_t a128,
+     uint64_t a64,
+     uint64_t a0,
+     uint64_t b128,
+     uint64_t b64,
+     uint64_t b0
+ )
+{
+    struct uint192 z;
+    unsigned int borrow64, borrow128;
+
+    z.v0 = a0 - b0;
+    borrow64 = ( a0 < b0 );
+    z.v64 = a64 - b64;
+    borrow128 = ( a64 < b64 );
+    z.v128 = a128 - b128;
+    borrow128 += ( z.v64 < borrow64 );
+    z.v64 -= borrow64;
+    z.v128 -= borrow128;
+    return z;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_subMagsF32.c 
b/target-riscv/fpu-custom-riscv/s_subMagsF32.c
new file mode 100755
index 0000000..0c83b02
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_subMagsF32.c
@@ -0,0 +1,81 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t
+ softfloat_subMagsF32( uint_fast32_t uiA, uint_fast32_t uiB, bool signZ )
+{
+    int_fast16_t expA;
+    uint_fast32_t sigA;
+    int_fast16_t expB;
+    uint_fast32_t sigB;
+    int_fast16_t expDiff;
+    uint_fast32_t uiZ;
+    int_fast16_t expZ;
+    uint_fast32_t sigZ;
+    union ui32_f32 uZ;
+
+    expA = expF32UI( uiA );
+    sigA = fracF32UI( uiA );
+    expB = expF32UI( uiB );
+    sigB = fracF32UI( uiB );
+    expDiff = expA - expB;
+    sigA <<= 7;
+    sigB <<= 7;
+    if ( 0 < expDiff ) goto expABigger;
+    if ( expDiff < 0 ) goto expBBigger;
+    if ( expA == 0xFF ) {
+        if ( sigA | sigB ) goto propagateNaN;
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        uiZ = defaultNaNF32UI;
+        goto uiZ;
+    }
+    if ( ! expA ) {
+        expA = 1;
+        expB = 1;
+    }
+    if ( sigB < sigA ) goto aBigger;
+    if ( sigA < sigB ) goto bBigger;
+    uiZ = packToF32UI( softfloat_roundingMode == softfloat_round_min, 0, 0 );
+    goto uiZ;
+ expBBigger:
+    if ( expB == 0xFF ) {
+        if ( sigB ) goto propagateNaN;
+        uiZ = packToF32UI( signZ ^ 1, 0xFF, 0 );
+        goto uiZ;
+    }
+    sigA += expA ? 0x40000000 : sigA;
+    sigA = softfloat_shift32RightJam( sigA, - expDiff );
+    sigB |= 0x40000000;
+ bBigger:
+    signZ ^= 1;
+    expZ = expB;
+    sigZ = sigB - sigA;
+    goto normRoundPack;
+ expABigger:
+    if ( expA == 0xFF ) {
+        if ( sigA ) goto propagateNaN;
+        uiZ = uiA;
+        goto uiZ;
+    }
+    sigB += expB ? 0x40000000 : sigB;
+    sigB = softfloat_shift32RightJam( sigB, expDiff );
+    sigA |= 0x40000000;
+ aBigger:
+    expZ = expA;
+    sigZ = sigA - sigB;
+ normRoundPack:
+    return softfloat_normRoundPackToF32( signZ, expZ - 1, sigZ );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/s_subMagsF64.c 
b/target-riscv/fpu-custom-riscv/s_subMagsF64.c
new file mode 100755
index 0000000..45b81ba
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/s_subMagsF64.c
@@ -0,0 +1,81 @@
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t
+ softfloat_subMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ )
+{
+    int_fast16_t expA;
+    uint_fast64_t sigA;
+    int_fast16_t expB;
+    uint_fast64_t sigB;
+    int_fast16_t expDiff;
+    uint_fast64_t uiZ;
+    int_fast16_t expZ;
+    uint_fast64_t sigZ;
+    union ui64_f64 uZ;
+
+    expA = expF64UI( uiA );
+    sigA = fracF64UI( uiA );
+    expB = expF64UI( uiB );
+    sigB = fracF64UI( uiB );
+    expDiff = expA - expB;
+    sigA <<= 10;
+    sigB <<= 10;
+    if ( 0 < expDiff ) goto expABigger;
+    if ( expDiff < 0 ) goto expBBigger;
+    if ( expA == 0x7FF ) {
+        if ( sigA | sigB ) goto propagateNaN;
+        softfloat_raiseFlags( softfloat_flag_invalid );
+        uiZ = defaultNaNF64UI;
+        goto uiZ;
+    }
+    if ( ! expA ) {
+        expA = 1;
+        expB = 1;
+    }
+    if ( sigB < sigA ) goto aBigger;
+    if ( sigA < sigB ) goto bBigger;
+    uiZ = packToF64UI( softfloat_roundingMode == softfloat_round_min, 0, 0 );
+    goto uiZ;
+ expBBigger:
+    if ( expB == 0x7FF ) {
+        if ( sigB ) goto propagateNaN;
+        uiZ = packToF64UI( signZ ^ 1, 0x7FF, 0 );
+        goto uiZ;
+    }
+    sigA += expA ? UINT64_C( 0x4000000000000000 ) : sigA;
+    sigA = softfloat_shift64RightJam( sigA, - expDiff );
+    sigB |= UINT64_C( 0x4000000000000000 );
+ bBigger:
+    signZ ^= 1;
+    expZ = expB;
+    sigZ = sigB - sigA;
+    goto normRoundPack;
+ expABigger:
+    if ( expA == 0x7FF ) {
+        if ( sigA ) goto propagateNaN;
+        uiZ = uiA;
+        goto uiZ;
+    }
+    sigB += expB ? UINT64_C( 0x4000000000000000 ) : sigB;
+    sigB = softfloat_shift64RightJam( sigB, expDiff );
+    sigA |= UINT64_C( 0x4000000000000000 );
+ aBigger:
+    expZ = expA;
+    sigZ = sigA - sigB;
+ normRoundPack:
+    return softfloat_normRoundPackToF64( signZ, expZ - 1, sigZ );
+ propagateNaN:
+    uiZ = softfloat_propagateNaNF64UI( uiA, uiB );
+ uiZ:
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/softfloat.ac 
b/target-riscv/fpu-custom-riscv/softfloat.ac
new file mode 100644
index 0000000..e69de29
diff --git a/target-riscv/fpu-custom-riscv/softfloat.h 
b/target-riscv/fpu-custom-riscv/softfloat.h
new file mode 100755
index 0000000..bacaf1e
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/softfloat.h
@@ -0,0 +1,235 @@
+
+#ifndef softfloat_h
+#define softfloat_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*** UPDATE COMMENTS. ***/
+
+/*============================================================================
+
+This C header file is part of the SoftFloat IEEE Floating-point Arithmetic
+Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include "softfloat_types.h"
+
+/*----------------------------------------------------------------------------
+| Software floating-point underflow tininess-detection mode.
+*----------------------------------------------------------------------------*/
+extern int_fast8_t softfloat_detectTininess;
+enum {
+    softfloat_tininess_beforeRounding = 0,
+    softfloat_tininess_afterRounding  = 1
+};
+
+/*----------------------------------------------------------------------------
+| Software floating-point rounding mode.
+*----------------------------------------------------------------------------*/
+extern int_fast8_t softfloat_roundingMode;
+enum {
+    softfloat_round_nearest_even   = 0,
+    softfloat_round_minMag         = 1,
+    softfloat_round_min            = 2,
+    softfloat_round_max            = 3,
+    softfloat_round_nearest_maxMag = 4
+};
+
+/*----------------------------------------------------------------------------
+| Software floating-point exception flags.
+*----------------------------------------------------------------------------*/
+extern int_fast8_t softfloat_exceptionFlags;
+enum {
+    softfloat_flag_inexact   =  1,
+    softfloat_flag_underflow =  2,
+    softfloat_flag_overflow  =  4,
+    softfloat_flag_infinity  =  8,
+    softfloat_flag_invalid   = 16
+};
+
+/*----------------------------------------------------------------------------
+| Routine to raise any or all of the software floating-point exception flags.
+*----------------------------------------------------------------------------*/
+void softfloat_raiseFlags( int_fast8_t );
+
+/*----------------------------------------------------------------------------
+| Integer-to-floating-point conversion routines.
+*----------------------------------------------------------------------------*/
+float32_t ui32_to_f32( uint_fast32_t );
+float64_t ui32_to_f64( uint_fast32_t );
+floatx80_t ui32_to_fx80( uint_fast32_t );
+float128_t ui32_to_f128( uint_fast32_t );
+float32_t ui64_to_f32( uint_fast64_t );
+float64_t ui64_to_f64( uint_fast64_t );
+floatx80_t ui64_to_fx80( uint_fast64_t );
+float128_t ui64_to_f128( uint_fast64_t );
+float32_t i32_to_f32( int_fast32_t );
+float64_t i32_to_f64( int_fast32_t );
+floatx80_t i32_to_fx80( int_fast32_t );
+float128_t i32_to_f128( int_fast32_t );
+float32_t i64_to_f32( int_fast64_t );
+float64_t i64_to_f64( int_fast64_t );
+floatx80_t i64_to_fx80( int_fast64_t );
+float128_t i64_to_f128( int_fast64_t );
+
+/*----------------------------------------------------------------------------
+| 32-bit (single-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+uint_fast32_t f32_to_ui32( float32_t, int_fast8_t, bool );
+uint_fast64_t f32_to_ui64( float32_t, int_fast8_t, bool );
+int_fast32_t f32_to_i32( float32_t, int_fast8_t, bool );
+int_fast64_t f32_to_i64( float32_t, int_fast8_t, bool );
+uint_fast32_t f32_to_ui32_r_minMag( float32_t, bool );
+uint_fast64_t f32_to_ui64_r_minMag( float32_t, bool );
+int_fast32_t f32_to_i32_r_minMag( float32_t, bool );
+int_fast64_t f32_to_i64_r_minMag( float32_t, bool );
+float64_t f32_to_f64( float32_t );
+floatx80_t f32_to_fx80( float32_t );
+float128_t f32_to_f128( float32_t );
+float32_t f32_roundToInt( float32_t, int_fast8_t, bool );
+float32_t f32_add( float32_t, float32_t );
+float32_t f32_sub( float32_t, float32_t );
+float32_t f32_mul( float32_t, float32_t );
+float32_t f32_mulAdd( float32_t, float32_t, float32_t );
+float32_t f32_div( float32_t, float32_t );
+float32_t f32_rem( float32_t, float32_t );
+float32_t f32_sqrt( float32_t );
+bool f32_eq( float32_t, float32_t );
+bool f32_le( float32_t, float32_t );
+bool f32_lt( float32_t, float32_t );
+bool f32_eq_signaling( float32_t, float32_t );
+bool f32_le_quiet( float32_t, float32_t );
+bool f32_lt_quiet( float32_t, float32_t );
+bool f32_isSignalingNaN( float32_t );
+uint_fast16_t f32_classify( float32_t );
+
+/*----------------------------------------------------------------------------
+| 64-bit (double-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+uint_fast32_t f64_to_ui32( float64_t, int_fast8_t, bool );
+uint_fast64_t f64_to_ui64( float64_t, int_fast8_t, bool );
+int_fast32_t f64_to_i32( float64_t, int_fast8_t, bool );
+int_fast64_t f64_to_i64( float64_t, int_fast8_t, bool );
+uint_fast32_t f64_to_ui32_r_minMag( float64_t, bool );
+uint_fast64_t f64_to_ui64_r_minMag( float64_t, bool );
+int_fast32_t f64_to_i32_r_minMag( float64_t, bool );
+int_fast64_t f64_to_i64_r_minMag( float64_t, bool );
+float32_t f64_to_f32( float64_t );
+floatx80_t f64_to_fx80( float64_t );
+float128_t f64_to_f128( float64_t );
+float64_t f64_roundToInt( float64_t, int_fast8_t, bool );
+float64_t f64_add( float64_t, float64_t );
+float64_t f64_sub( float64_t, float64_t );
+float64_t f64_mul( float64_t, float64_t );
+float64_t f64_mulAdd( float64_t, float64_t, float64_t );
+float64_t f64_div( float64_t, float64_t );
+float64_t f64_rem( float64_t, float64_t );
+float64_t f64_sqrt( float64_t );
+bool f64_eq( float64_t, float64_t );
+bool f64_le( float64_t, float64_t );
+bool f64_lt( float64_t, float64_t );
+bool f64_eq_signaling( float64_t, float64_t );
+bool f64_le_quiet( float64_t, float64_t );
+bool f64_lt_quiet( float64_t, float64_t );
+bool f64_isSignalingNaN( float64_t );
+uint_fast16_t f64_classify( float64_t );
+
+/*----------------------------------------------------------------------------
+| Extended double-precision rounding precision.  Valid values are 32, 64, and
+| 80.
+*----------------------------------------------------------------------------*/
+extern int_fast8_t floatx80_roundingPrecision;
+
+/*----------------------------------------------------------------------------
+| Extended double-precision floating-point operations.
+*----------------------------------------------------------------------------*/
+uint_fast32_t fx80_to_ui32( floatx80_t, int_fast8_t, bool );
+uint_fast64_t fx80_to_ui64( floatx80_t, int_fast8_t, bool );
+int_fast32_t fx80_to_i32( floatx80_t, int_fast8_t, bool );
+int_fast64_t fx80_to_i64( floatx80_t, int_fast8_t, bool );
+uint_fast32_t fx80_to_ui32_r_minMag( floatx80_t, bool );
+uint_fast64_t fx80_to_ui64_r_minMag( floatx80_t, bool );
+int_fast32_t fx80_to_i32_r_minMag( floatx80_t, bool );
+int_fast64_t fx80_to_i64_r_minMag( floatx80_t, bool );
+float32_t fx80_to_f32( floatx80_t );
+float64_t fx80_to_f64( floatx80_t );
+float128_t fx80_to_f128( floatx80_t );
+floatx80_t fx80_roundToInt( floatx80_t, int_fast8_t, bool );
+floatx80_t fx80_add( floatx80_t, floatx80_t );
+floatx80_t fx80_sub( floatx80_t, floatx80_t );
+floatx80_t fx80_mul( floatx80_t, floatx80_t );
+floatx80_t fx80_mulAdd( floatx80_t, floatx80_t, floatx80_t );
+floatx80_t fx80_div( floatx80_t, floatx80_t );
+floatx80_t fx80_rem( floatx80_t, floatx80_t );
+floatx80_t fx80_sqrt( floatx80_t );
+bool fx80_eq( floatx80_t, floatx80_t );
+bool fx80_le( floatx80_t, floatx80_t );
+bool fx80_lt( floatx80_t, floatx80_t );
+bool fx80_eq_signaling( floatx80_t, floatx80_t );
+bool fx80_le_quiet( floatx80_t, floatx80_t );
+bool fx80_lt_quiet( floatx80_t, floatx80_t );
+bool fx80_isSignalingNaN( floatx80_t );
+
+/*----------------------------------------------------------------------------
+| 128-bit (quadruple-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+uint_fast32_t f128_to_ui32( float128_t, int_fast8_t, bool );
+uint_fast64_t f128_to_ui64( float128_t, int_fast8_t, bool );
+int_fast32_t f128_to_i32( float128_t, int_fast8_t, bool );
+int_fast64_t f128_to_i64( float128_t, int_fast8_t, bool );
+uint_fast32_t f128_to_ui32_r_minMag( float128_t, bool );
+uint_fast64_t f128_to_ui64_r_minMag( float128_t, bool );
+int_fast32_t f128_to_i32_r_minMag( float128_t, bool );
+int_fast64_t f128_to_i64_r_minMag( float128_t, bool );
+float32_t f128_to_f32( float128_t );
+float64_t f128_to_f64( float128_t );
+floatx80_t f128_to_fx80( float128_t );
+float128_t f128_roundToInt( float128_t, int_fast8_t, bool );
+float128_t f128_add( float128_t, float128_t );
+float128_t f128_sub( float128_t, float128_t );
+float128_t f128_mul( float128_t, float128_t );
+float128_t f128_mulAdd( float128_t, float128_t, float128_t );
+float128_t f128_div( float128_t, float128_t );
+float128_t f128_rem( float128_t, float128_t );
+float128_t f128_sqrt( float128_t );
+bool f128_eq( float128_t, float128_t );
+bool f128_le( float128_t, float128_t );
+bool f128_lt( float128_t, float128_t );
+bool f128_eq_signaling( float128_t, float128_t );
+bool f128_le_quiet( float128_t, float128_t );
+bool f128_lt_quiet( float128_t, float128_t );
+bool f128_isSignalingNaN( float128_t );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/target-riscv/fpu-custom-riscv/softfloat.mk.in 
b/target-riscv/fpu-custom-riscv/softfloat.mk.in
new file mode 100644
index 0000000..7f70053
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/softfloat.mk.in
@@ -0,0 +1,126 @@
+softfloat_subproject_deps = \
+
+softfloat_hdrs = \
+       internals.h \
+       primitives.h \
+       softfloat.h \
+       softfloat_types.h \
+       platform.h \
+       specialize.h \
+
+softfloat_c_srcs = \
+       f32_add.c                      \
+       f32_div.c                      \
+       f32_eq.c                       \
+       f32_eq_signaling.c             \
+       f32_isSignalingNaN.c           \
+       f32_le.c                       \
+       f32_le_quiet.c                 \
+       f32_lt.c                       \
+       f32_lt_quiet.c                 \
+       f32_mulAdd.c                   \
+       f32_mul.c                      \
+       f32_rem.c                      \
+       f32_roundToInt.c               \
+       f32_sqrt.c                     \
+       f32_sub.c                      \
+       f32_to_f64.c                   \
+       f32_to_i32.c                   \
+       f32_to_i32_r_minMag.c          \
+       f32_to_i64.c                   \
+       f32_to_i64_r_minMag.c          \
+       f32_to_ui32.c                  \
+       f32_to_ui32_r_minMag.c         \
+       f32_to_ui64.c                  \
+       f32_to_ui64_r_minMag.c         \
+       f32_classify.c                 \
+       f64_add.c                      \
+       f64_div.c                      \
+       f64_eq.c                       \
+       f64_eq_signaling.c             \
+       f64_isSignalingNaN.c           \
+       f64_le.c                       \
+       f64_le_quiet.c                 \
+       f64_lt.c                       \
+       f64_lt_quiet.c                 \
+       f64_mulAdd.c                   \
+       f64_mul.c                      \
+       f64_rem.c                      \
+       f64_roundToInt.c               \
+       f64_sqrt.c                     \
+       f64_sub.c                      \
+       f64_to_f32.c                   \
+       f64_to_i32.c                   \
+       f64_to_i32_r_minMag.c          \
+       f64_to_i64.c                   \
+       f64_to_i64_r_minMag.c          \
+       f64_to_ui32.c                  \
+       f64_to_ui32_r_minMag.c         \
+       f64_to_ui64.c                  \
+       f64_to_ui64_r_minMag.c         \
+       f64_classify.c                 \
+       i32_to_f32.c                   \
+       i32_to_f64.c                   \
+       i64_to_f32.c                   \
+       i64_to_f64.c                   \
+       s_add128.c                     \
+       s_add192.c                     \
+       s_addMagsF32.c                 \
+       s_addMagsF64.c                 \
+       s_countLeadingZeros32.c        \
+       s_countLeadingZeros64.c        \
+       s_countLeadingZeros8.c         \
+       s_eq128.c                      \
+       s_estimateDiv128To64.c         \
+       s_estimateSqrt32.c             \
+       s_le128.c                      \
+       s_lt128.c                      \
+       s_mul128By64To192.c            \
+       s_mul128To256.c                \
+       s_mul64To128.c                 \
+       s_mulAddF32.c                  \
+       s_mulAddF64.c                  \
+       s_normRoundPackToF32.c         \
+       s_normRoundPackToF64.c         \
+       s_normSubnormalF32Sig.c        \
+       s_normSubnormalF64Sig.c        \
+       softfloat_state.c              \
+       s_roundPackToF32.c             \
+       s_roundPackToF64.c             \
+       s_roundPackToI32.c             \
+       s_roundPackToI64.c             \
+       s_roundPackToUI32.c            \
+       s_roundPackToUI64.c            \
+       s_shift128ExtraRightJam.c      \
+       s_shift128RightJam.c           \
+       s_shift32RightJam.c            \
+       s_shift64ExtraRightJam.c       \
+       s_shift64RightJam.c            \
+       s_shortShift128ExtraRightJam.c \
+       s_shortShift128Left.c          \
+       s_shortShift128Right.c         \
+       s_shortShift192Left.c          \
+       s_shortShift32Right1Jam.c      \
+       s_shortShift64ExtraRightJam.c  \
+       s_shortShift64RightJam.c       \
+       s_sub128.c                     \
+       s_sub192.c                     \
+       s_subMagsF32.c                 \
+       s_subMagsF64.c                 \
+       ui32_to_f32.c                  \
+       ui32_to_f64.c                  \
+       ui64_to_f32.c                  \
+       ui64_to_f64.c                  \
+       softfloat_raiseFlags.c         \
+       s_commonNaNToF32UI.c           \
+       s_commonNaNToF64UI.c           \
+       s_f32UIToCommonNaN.c           \
+       s_f64UIToCommonNaN.c           \
+       s_isSigNaNF32UI.c              \
+       s_isSigNaNF64UI.c              \
+       s_propagateNaNF32UI.c          \
+       s_propagateNaNF64UI.c          \
+
+softfloat_test_srcs =
+
+softfloat_install_prog_srcs =
diff --git a/target-riscv/fpu-custom-riscv/softfloat_raiseFlags.c 
b/target-riscv/fpu-custom-riscv/softfloat_raiseFlags.c
new file mode 100755
index 0000000..c0c0dc8
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/softfloat_raiseFlags.c
@@ -0,0 +1,51 @@
+
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include "platform.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Raises the exceptions specified by `flags'.  Floating-point traps can be
+| defined here if desired.  It is currently not possible for such a trap
+| to substitute a result value.  If traps are not implemented, this routine
+| should be simply `float_exception_flags |= flags;'.
+*----------------------------------------------------------------------------*/
+
+void softfloat_raiseFlags( int_fast8_t flags )
+{
+
+    softfloat_exceptionFlags |= flags;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/softfloat_state.c 
b/target-riscv/fpu-custom-riscv/softfloat_state.c
new file mode 100755
index 0000000..8859089
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/softfloat_state.c
@@ -0,0 +1,19 @@
+
+/*** COMMENTS. ***/
+
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Floating-point rounding mode, extended double-precision rounding precision,
+| and exception flags.
+*----------------------------------------------------------------------------*/
+int_fast8_t softfloat_roundingMode = softfloat_round_nearest_even;
+int_fast8_t softfloat_detectTininess = init_detectTininess;
+int_fast8_t softfloat_exceptionFlags = 0;
+
+int_fast8_t floatx80_roundingPrecision = 80;
+
diff --git a/target-riscv/fpu-custom-riscv/softfloat_types.h 
b/target-riscv/fpu-custom-riscv/softfloat_types.h
new file mode 100755
index 0000000..9fada89
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/softfloat_types.h
@@ -0,0 +1,16 @@
+
+#ifndef softfloat_types_h
+#define softfloat_types_h
+
+/*** COMMENTS. ***/
+
+#include <stdbool.h>
+#include <stdint.h>
+
+typedef uint32_t float32_t;
+typedef uint64_t float64_t;
+typedef struct { uint64_t v; uint16_t x; } floatx80_t;
+typedef struct { uint64_t v[ 2 ]; } float128_t;
+
+#endif
+
diff --git a/target-riscv/fpu-custom-riscv/specialize.h 
b/target-riscv/fpu-custom-riscv/specialize.h
new file mode 100755
index 0000000..4b0138a
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/specialize.h
@@ -0,0 +1,113 @@
+
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser.  This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704.  Funding was partially provided by the
+National Science Foundation under grant MIP-9311980.  The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define init_detectTininess softfloat_tininess_beforeRounding;
+
+/*----------------------------------------------------------------------------
+| Structure used to transfer NaN representations from one format to another.
+*----------------------------------------------------------------------------*/
+struct commonNaN {
+    bool sign;
+    uint64_t v64, v0;
+};
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated single-precision NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF32UI 0x7FC00000
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a signaling
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool softfloat_isSigNaNF32UI( uint_fast32_t ui )
+    { return ( ( ui>>22 & 0x1FF ) == 0x1FE ) && ( ui & 0x003FFFFF ); }
+#else
+bool softfloat_isSigNaNF32UI( uint_fast32_t );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f32UIToCommonNaN( uint_fast32_t );
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN a )
+    { return defaultNaNF32UI; }
+#else
+uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN );
+#endif
+
+/*----------------------------------------------------------------------------
+| Takes two single-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+uint_fast32_t softfloat_propagateNaNF32UI( uint_fast32_t, uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated double-precision NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF64UI UINT64_C(0x7FF8000000000000)
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool softfloat_isSigNaNF64UI( uint_fast64_t ui )
+{
+    return
+        ( ( ui>>51 & 0xFFF ) == 0xFFE )
+            && ( ui & UINT64_C( 0x0007FFFFFFFFFFFF ) );
+}
+#else
+bool softfloat_isSigNaNF64UI( uint_fast64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+/*** MIGHT BE INLINE'D. ***/
+struct commonNaN softfloat_f64UIToCommonNaN( uint_fast64_t );
+uint_fast64_t softfloat_commonNaNToF64UI( struct commonNaN );
+
+/*----------------------------------------------------------------------------
+| Takes two double-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result.  If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+uint_fast64_t softfloat_propagateNaNF64UI( uint_fast64_t, uint_fast64_t );
+
diff --git a/target-riscv/fpu-custom-riscv/ui32_to_f32.c 
b/target-riscv/fpu-custom-riscv/ui32_to_f32.c
new file mode 100755
index 0000000..ba0fc1a
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/ui32_to_f32.c
@@ -0,0 +1,25 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t ui32_to_f32( uint_fast32_t a )
+{
+    union ui32_f32 uZ;
+
+    if ( ! a ) {
+        uZ.ui = 0;
+        return uZ.f;
+    }
+    if ( a & 0x80000000 ) {
+        return
+            softfloat_roundPackToF32(
+                0, 0x9D, softfloat_shortShift32Right1Jam( a ) );
+    } else {
+        return softfloat_normRoundPackToF32( 0, 0x9C, a );
+    }
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/ui32_to_f64.c 
b/target-riscv/fpu-custom-riscv/ui32_to_f64.c
new file mode 100755
index 0000000..d0bd177
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/ui32_to_f64.c
@@ -0,0 +1,26 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t ui32_to_f64( uint_fast32_t a )
+{
+    uint_fast64_t uiZ;
+    int shiftCount;
+    union ui64_f64 uZ;
+
+    if ( ! a ) {
+        uiZ = 0;
+    } else {
+        shiftCount = softfloat_countLeadingZeros32( a ) + 21;
+        uiZ =
+            packToF64UI(
+                0, 0x432 - shiftCount, (uint_fast64_t) a<<shiftCount );
+    }
+    uZ.ui = uiZ;
+    return uZ.f;
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/ui64_to_f32.c 
b/target-riscv/fpu-custom-riscv/ui64_to_f32.c
new file mode 100755
index 0000000..82afbdc
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/ui64_to_f32.c
@@ -0,0 +1,31 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t ui64_to_f32( uint_fast64_t a )
+{
+    int shiftCount;
+    union ui32_f32 u;
+    uint_fast32_t sig;
+
+    shiftCount = softfloat_countLeadingZeros64( a ) - 40;
+    if ( 0 <= shiftCount ) {
+        u.ui =
+            a ? packToF32UI(
+                    0, 0x95 - shiftCount, (uint_fast32_t) a<<shiftCount )
+                : 0;
+        return u.f;
+    } else {
+        shiftCount += 7;
+        sig =
+            ( shiftCount < 0 )
+                ? softfloat_shortShift64RightJam( a, - shiftCount )
+                : (uint_fast32_t) a<<shiftCount;
+        return softfloat_roundPackToF32( 0, 0x9C - shiftCount, sig );
+    }
+
+}
+
diff --git a/target-riscv/fpu-custom-riscv/ui64_to_f64.c 
b/target-riscv/fpu-custom-riscv/ui64_to_f64.c
new file mode 100755
index 0000000..52c158b
--- /dev/null
+++ b/target-riscv/fpu-custom-riscv/ui64_to_f64.c
@@ -0,0 +1,25 @@
+
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t ui64_to_f64( uint_fast64_t a )
+{
+    union ui64_f64 uZ;
+
+    if ( ! a ) {
+        uZ.ui = 0;
+        return uZ.f;
+    }
+    if ( a & UINT64_C( 0x8000000000000000 ) ) {
+        return
+            softfloat_roundPackToF64(
+                0, 0x43D, softfloat_shortShift64RightJam( a, 1 ) );
+    } else {
+        return softfloat_normRoundPackToF64( 0, 0x43C, a );
+    }
+
+}
+
diff --git a/target-riscv/gdbstub.c b/target-riscv/gdbstub.c
new file mode 100644
index 0000000..054c042
--- /dev/null
+++ b/target-riscv/gdbstub.c
@@ -0,0 +1,177 @@
+/*
+ * RISC-V GDB Server Stub
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "config.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+#include "cpu.h"
+
+// map to CSR NOs for GDB
+int indexed_csrs[] = {
+    NEW_CSR_FFLAGS,
+    NEW_CSR_FRM,
+    NEW_CSR_FCSR,
+    NEW_CSR_CYCLE,
+    NEW_CSR_TIME,
+    NEW_CSR_INSTRET,
+    NEW_CSR_STATS,
+    NEW_CSR_UARCH0,
+    NEW_CSR_UARCH1,
+    NEW_CSR_UARCH2,
+    NEW_CSR_UARCH3,
+    NEW_CSR_UARCH4,
+    NEW_CSR_UARCH5,
+    NEW_CSR_UARCH6,
+    NEW_CSR_UARCH7,
+    NEW_CSR_UARCH8,
+    NEW_CSR_UARCH9,
+    NEW_CSR_UARCH10,
+    NEW_CSR_UARCH11,
+    NEW_CSR_UARCH12,
+    NEW_CSR_UARCH13,
+    NEW_CSR_UARCH14,
+    NEW_CSR_UARCH15,
+    NEW_CSR_SSTATUS,
+    NEW_CSR_STVEC,
+    NEW_CSR_SIE,
+    NEW_CSR_SSCRATCH,
+    NEW_CSR_SEPC,
+    NEW_CSR_SIP,
+    NEW_CSR_SPTBR,
+    NEW_CSR_SASID,
+    NEW_CSR_CYCLEW,
+    NEW_CSR_TIMEW,
+    NEW_CSR_INSTRETW,
+    NEW_CSR_STIME,
+    NEW_CSR_SCAUSE,
+    NEW_CSR_SBADADDR,
+    NEW_CSR_STIMEW,
+    NEW_CSR_MSTATUS,
+    NEW_CSR_MTVEC,
+    NEW_CSR_MTDELEG,
+    NEW_CSR_MIE,
+    NEW_CSR_MTIMECMP,
+    NEW_CSR_MSCRATCH,
+    NEW_CSR_MEPC,
+    NEW_CSR_MCAUSE,
+    NEW_CSR_MBADADDR,
+    NEW_CSR_MIP,
+    NEW_CSR_MTIME,
+    NEW_CSR_MCPUID,
+    NEW_CSR_MIMPID,
+    NEW_CSR_MHARTID,
+    NEW_CSR_MTOHOST,
+    NEW_CSR_MFROMHOST,
+    NEW_CSR_MRESET,
+    NEW_CSR_MIPI,
+    NEW_CSR_MIOBASE,
+    NEW_CSR_CYCLEH,
+    NEW_CSR_TIMEH,
+    NEW_CSR_INSTRETH,
+    NEW_CSR_CYCLEHW,
+    NEW_CSR_TIMEHW,
+    NEW_CSR_INSTRETHW,
+    NEW_CSR_STIMEH,
+    NEW_CSR_STIMEHW,
+    NEW_CSR_MTIMECMPH,
+    NEW_CSR_MTIMEH
+};
+
+int riscv_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    int target_csrno;
+
+    if (n < 32) {
+        return gdb_get_regl(mem_buf, env->active_tc.gpr[n]);
+    } else if (n == 32) {
+        return gdb_get_regl(mem_buf, env->active_tc.PC);
+    } else if (n < 65) {
+        return gdb_get_regl(mem_buf, env->active_tc.fpr[n-33]);
+    } else if (n < 132) {
+        n -= 65;
+        target_csrno = indexed_csrs[n];
+        switch (target_csrno) 
+        {
+        // 32-bit wide 
+        case NEW_CSR_FFLAGS:
+        case NEW_CSR_FRM:
+        case NEW_CSR_FCSR:
+            return gdb_get_reg32(mem_buf, csr_read_helper(env, target_csrno));
+
+        // unused on RV64 or not implemented
+        case NEW_CSR_MTIMEH:
+        case NEW_CSR_STIMEH:
+        case NEW_CSR_STIMEHW:
+        case NEW_CSR_TIMEH:
+        case NEW_CSR_TIMEHW:
+        case NEW_CSR_CYCLEH:
+        case NEW_CSR_INSTRETH:
+        case NEW_CSR_CYCLEHW:
+        case NEW_CSR_INSTRETHW:
+        case NEW_CSR_STATS:
+        case NEW_CSR_MRESET:
+        case NEW_CSR_MTIMECMPH:
+            return gdb_get_regl(mem_buf, 0L);
+
+        // special MFROMHOST, MTOHOST
+        case NEW_CSR_MFROMHOST:
+        case NEW_CSR_MTOHOST:
+            return gdb_get_regl(mem_buf, env->csr[target_csrno]);
+
+        // all others
+        default:
+            return gdb_get_regl(mem_buf, csr_read_helper(env, target_csrno));
+        }
+    }
+    return 0;
+}
+
+int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    target_ulong tmp;
+    int target_csrno;
+
+    tmp = ldtul_p(mem_buf);
+
+    if (n < 32) {
+        env->active_tc.gpr[n] = tmp;
+        return sizeof(target_ulong);
+    } else if (n == 32) {
+        env->active_tc.PC = tmp;
+        return sizeof(target_ulong);
+    } else if (n < 65) {
+        env->active_tc.fpr[n-33] = tmp;
+        return sizeof(target_ulong);
+    } else if (n < 132) {
+        n -= 65;
+        target_csrno = indexed_csrs[n];
+        env->csr[target_csrno] = tmp;
+        if (n < 3) {
+            return sizeof(uint32_t);
+        } else {
+            return sizeof(target_ulong);
+        }
+    }
+    return 0;
+}
diff --git a/target-riscv/helper.c b/target-riscv/helper.c
new file mode 100644
index 0000000..ab8902e
--- /dev/null
+++ b/target-riscv/helper.c
@@ -0,0 +1,356 @@
+/*
+ *  RISC-V emulation helpers for qemu.
+ *
+ *  Author: Sagar Karandikar, address@hidden
+ *
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include "cpu.h"
+
+#define QEMU_IN_FETCH 0x2
+#define QEMU_IN_WRITE 0x1
+#define QEMU_IN_READ  0x0
+
+#if !defined(CONFIG_USER_ONLY)
+
+bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) {
+    if (interrupt_request & CPU_INTERRUPT_HARD) {
+        RISCVCPU *cpu = RISCV_CPU(cs);
+        CPURISCVState *env = &cpu->env;
+        int interruptno = cpu_riscv_hw_interrupts_pending(env);
+        if (interruptno + 1) {
+            cs->exception_index = 0x70000000U | interruptno;
+            riscv_cpu_do_interrupt(cs);
+            return true;
+        }
+    }
+    return false;
+}
+
+/* get_physical_address - get the physical address for this virtual address
+ *
+ * Do a page table walk to obtain the physical address corresponding to a
+ * virtual address.
+ *
+ * Returns 0 if the translation was successful
+*/
+static int get_physical_address (CPURISCVState *env, hwaddr *physical,
+                                int *prot, target_ulong address,
+                                int rw, int mmu_idx)
+{
+    /* NOTE: the env->active_tc.PC value visible here will not be
+     * correct, but the value visible to the exception handler
+     * (riscv_cpu_do_interrupt) is correct */
+
+    // rw is either QEMU_IN_FETCH, QEMU_IN_WRITE, or QEMU_IN_READ
+    *prot = 0;
+    CPUState *cs = CPU(riscv_env_get_cpu(env));
+
+    target_ulong mode = get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_PRV);
+    if (rw != QEMU_IN_FETCH && get_field(env->csr[NEW_CSR_MSTATUS], 
MSTATUS_MPRV)) {
+        mode = get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_PRV1);
+    }
+    if (get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_VM) == VM_MBARE) {
+        mode = PRV_M;
+    }
+
+    // check to make sure that mmu_idx and mode that we get matches
+    if (unlikely(mode != mmu_idx)) {
+        fprintf(stderr, "MODE, mmu_idx mismatch\n");
+        exit(1);
+    }
+
+    if (mode == PRV_M) {
+        target_ulong msb_mask = (2L << 63) - 1;
+        *physical = address & msb_mask;
+        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+        return TRANSLATE_SUCCESS;
+    }
+
+    target_ulong addr = address;
+    int supervisor = mode > PRV_U;
+
+    int levels, ptidxbits, ptesize;
+    switch (get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_VM))
+    {
+      case VM_SV32:
+          printf("currently unsupported SV32\n");
+          exit(1);
+          levels = 2;
+          ptidxbits = 10;
+          ptesize = 4;
+          break;
+      case VM_SV39:
+          levels = 3;
+          ptidxbits = 9;
+          ptesize = 8;
+          break;
+      case VM_SV48:
+          levels = 4;
+          ptidxbits = 9;
+          ptesize = 8;
+          break;
+      default:
+          printf("unsupported MSTATUS_VM value\n");
+          exit(1);
+    }
+
+    int va_bits = PGSHIFT + levels * ptidxbits;
+    target_ulong mask = (1L << (64 - (va_bits-1))) - 1;
+    target_ulong masked_msbs = (addr >> (va_bits-1)) & mask;
+    if (masked_msbs != 0 && masked_msbs != mask) {
+        return TRANSLATE_FAIL;
+    }
+
+    target_ulong base = env->csr[NEW_CSR_SPTBR];
+    int ptshift = (levels - 1) * ptidxbits;
+    int i;
+    for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
+        target_ulong idx = (addr >> (PGSHIFT + ptshift)) & ((1 << ptidxbits) - 
1);
+
+        // check that physical address of PTE is legal
+        target_ulong pte_addr = base + idx * ptesize;
+        if (pte_addr >= env->memsize) {
+            break;
+        }
+
+        target_ulong pte = ldq_phys(cs->as, pte_addr);
+        target_ulong ppn = pte >> PTE_PPN_SHIFT;
+
+        if (PTE_TABLE(pte)) { // next level of page table
+            base = ppn << PGSHIFT;
+        } else if (!PTE_CHECK_PERM(pte, supervisor, rw == QEMU_IN_WRITE,
+                    rw == QEMU_IN_FETCH)) {
+            break;
+        } else {
+            // set referenced and possibly dirty bits.
+            // we only put it in the TLB if it has the right stuff
+            stq_phys(cs->as, pte_addr, ldq_phys(cs->as, pte_addr) | PTE_R |
+                    ((rw == QEMU_IN_WRITE) * PTE_D));
+
+            // for superpage mappings, make a fake leaf PTE for the TLB's 
benefit.
+            target_ulong vpn = addr >> PGSHIFT;
+            *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
+
+            // we do not give all prots indicated by the PTE
+            // this is because future accesses need to do things like set the
+            // dirty bit on the PTE
+            if (supervisor) {
+                if (PTE_SX(pte) && rw == QEMU_IN_FETCH) {
+                    *prot |= PAGE_EXEC;
+                } else if (PTE_SW(pte) && rw == QEMU_IN_WRITE) {
+                    *prot |= PAGE_WRITE;
+                } else if (PTE_SR(pte) && rw == QEMU_IN_READ) {
+                    *prot |= PAGE_READ;
+                } else {
+                    printf("err in translation prots");
+                    exit(1);
+                }
+            } else {
+                if (PTE_UX(pte) && rw == QEMU_IN_FETCH) {
+                    *prot |= PAGE_EXEC;
+                } else if (PTE_UW(pte) && rw == QEMU_IN_WRITE) {
+                    *prot |= PAGE_WRITE;
+                } else if (PTE_UR(pte) && rw == QEMU_IN_READ) {
+                    *prot |= PAGE_READ;
+                } else {
+                    printf("err in translation prots");
+                    exit(1);
+                }
+            }
+            return TRANSLATE_SUCCESS;
+        }
+    }
+    return TRANSLATE_FAIL;
+}
+#endif
+
+static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
+                                int rw)
+{
+    CPUState *cs = CPU(riscv_env_get_cpu(env));
+    int exception = 0;
+    if (rw == QEMU_IN_FETCH) { // inst access
+        exception = NEW_RISCV_EXCP_INST_ACCESS_FAULT;
+        env->csr[NEW_CSR_MBADADDR] = address;
+    } else if (rw == QEMU_IN_WRITE) { // store access
+        exception = NEW_RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+        env->csr[NEW_CSR_MBADADDR] = address;
+    } else if (rw == QEMU_IN_READ) { // load access
+        exception = NEW_RISCV_EXCP_LOAD_ACCESS_FAULT;
+        env->csr[NEW_CSR_MBADADDR] = address;
+    } else {
+        fprintf(stderr, "FAIL: invalid rw\n");
+        exit(1);
+    }
+    cs->exception_index = exception;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    hwaddr phys_addr;
+    int prot;
+    int mem_idx = cpu_mmu_index(&cpu->env, false);
+
+    if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mem_idx)) {
+        return -1;
+    }
+    return phys_addr;
+}
+#endif
+
+/* This is called when there is no QEMU "TLB" match
+ *
+ * Assuming system mode, only called in target-riscv/op_helper:tlb_fill
+ */
+int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int 
mmu_idx)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    hwaddr physical;
+    physical = 0; // stop gcc complaining
+    int prot;
+    int ret = 0;
+
+    qemu_log("%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
+              __func__, env->active_tc.PC, address, rw, mmu_idx);
+
+    ret = get_physical_address(env, &physical, &prot, address, rw, mmu_idx);
+    qemu_log("%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
+             " prot %d\n",
+             __func__, address, ret, physical, prot);
+    if (ret == TRANSLATE_SUCCESS) {
+        tlb_set_page(cs, address & TARGET_PAGE_MASK, physical & 
TARGET_PAGE_MASK,
+                prot, mmu_idx, TARGET_PAGE_SIZE);
+    } else if (ret == TRANSLATE_FAIL) {
+        raise_mmu_exception(env, address, rw);
+    }
+    return ret;
+}
+
+static const char * const riscv_excp_names[12] = {
+    "Instruction Address Misaligned",
+    "Instruction Access Fault",
+    "Illegal Instruction",
+    "Breakpoint",
+    "Load Address Misaligned",
+    "Load Access Fault",
+    "Store/AMO Address Misaligned",
+    "Store/AMO Access Fault",
+    "User ECALL",
+    "Supervisor ECALL",
+    "Hypervisor ECALL",
+    "Machine ECALL",
+};
+
+static const char * const riscv_interrupt_names[3] = {
+    "Soft interrupt",
+    "Timer interrupt",
+    "Host interrupt"
+};
+
+target_ulong push_priv_stack(target_ulong start_mstatus) {
+    target_ulong s = start_mstatus;
+    s = set_field(s, MSTATUS_PRV2, get_field(start_mstatus, MSTATUS_PRV1));
+    s = set_field(s, MSTATUS_IE2, get_field(start_mstatus, MSTATUS_IE1));
+    s = set_field(s, MSTATUS_PRV1, get_field(start_mstatus, MSTATUS_PRV));
+    s = set_field(s, MSTATUS_IE1, get_field(start_mstatus, MSTATUS_IE));
+    s = set_field(s, MSTATUS_PRV, PRV_M);
+    s = set_field(s, MSTATUS_MPRV, 0);
+    s = set_field(s, MSTATUS_IE, 0);
+    return s;
+}
+
+target_ulong pop_priv_stack(target_ulong start_mstatus) {
+    target_ulong s = start_mstatus;
+    s = set_field(s, MSTATUS_PRV, get_field(start_mstatus, MSTATUS_PRV1));
+    s = set_field(s, MSTATUS_IE, get_field(start_mstatus, MSTATUS_IE1));
+    s = set_field(s, MSTATUS_PRV1, get_field(start_mstatus, MSTATUS_PRV2));
+    s = set_field(s, MSTATUS_IE1, get_field(start_mstatus, MSTATUS_IE2));
+    s = set_field(s, MSTATUS_PRV2, PRV_U);
+    s = set_field(s, MSTATUS_IE2, 1);
+    return s;
+}
+
+void riscv_cpu_do_interrupt(CPUState *cs)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+
+    #ifdef RISCV_DEBUG_INTERRUPT
+    if (cs->exception_index & 0x70000000) {
+        fprintf(stderr, "core   0: exception trap_%s, epc 0x%016lx\n",
+                riscv_interrupt_names[cs->exception_index & 0x0fffffff], 
env->active_tc.PC);
+    } else {
+        fprintf(stderr, "core   0: exception trap_%s, epc 0x%016lx\n",
+                riscv_excp_names[cs->exception_index], env->active_tc.PC);
+    }
+    #endif
+
+    // Store original PC to epc reg
+    // This is correct because the env->active_tc.PC value visible here is
+    // actually the correct value, unlike other places where env->active_tc.PC
+    // may be used.
+    env->csr[NEW_CSR_MEPC] = env->active_tc.PC;
+
+    // set PC to handler
+    env->active_tc.PC = DEFAULT_MTVEC + 0x40 * 
get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_PRV);
+
+
+    // Store Cause in CSR_CAUSE. this comes from cs->exception_index
+    if (cs->exception_index & (0x70000000)) {
+        // hacky for now. the MSB (bit 63) indicates interrupt but 
cs->exception
+        // index is only 32 bits wide
+        env->csr[NEW_CSR_MCAUSE] = cs->exception_index & 0x0FFFFFFF;
+        env->csr[NEW_CSR_MCAUSE] |= (1L << 63);
+
+    } else {
+        // fixup User ECALL -> correct priv ECALL
+        if (cs->exception_index == NEW_RISCV_EXCP_U_ECALL) {
+            switch(get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_PRV)) {
+                case PRV_U:
+                    env->csr[NEW_CSR_MCAUSE] = NEW_RISCV_EXCP_U_ECALL;
+                    break;
+                case PRV_S:
+                    env->csr[NEW_CSR_MCAUSE] = NEW_RISCV_EXCP_S_ECALL;
+                    break;
+                case PRV_H:
+                    env->csr[NEW_CSR_MCAUSE] = NEW_RISCV_EXCP_H_ECALL;
+                    break;
+                case PRV_M:
+                    env->csr[NEW_CSR_MCAUSE] = NEW_RISCV_EXCP_M_ECALL;
+                    break;
+            }
+        } else {
+            env->csr[NEW_CSR_MCAUSE] = cs->exception_index;
+        }
+    }
+
+    // handle stack
+    target_ulong next_mstatus = push_priv_stack(env->csr[NEW_CSR_MSTATUS]);
+    csr_write_helper(env, next_mstatus, NEW_CSR_MSTATUS);
+
+    // NOTE: CSR_BADVADDR should be set from the handler that raises the 
exception
+
+    cs->exception_index = EXCP_NONE; // mark handled to qemu
+}
diff --git a/target-riscv/helper.h b/target-riscv/helper.h
new file mode 100644
index 0000000..257bf71
--- /dev/null
+++ b/target-riscv/helper.h
@@ -0,0 +1,82 @@
+// Exceptions
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
+DEF_HELPER_1(raise_exception_debug, noreturn, env)
+DEF_HELPER_3(raise_exception_err, noreturn, env, i32, tl)
+DEF_HELPER_3(raise_exception_mbadaddr, noreturn, env, i32, tl)
+
+// MULHSU helper
+DEF_HELPER_3(mulhsu, tl, env, tl, tl)
+
+// Floating Point - fused
+DEF_HELPER_5(fmadd_s, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(fmadd_d, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(fmsub_s, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(fmsub_d, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(fnmsub_s, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(fnmsub_d, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(fnmadd_s, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(fnmadd_d, tl, env, tl, tl, tl, tl)
+
+// Floating Point - Single Precision
+DEF_HELPER_4(fadd_s, tl, env, tl, tl, tl)
+DEF_HELPER_4(fsub_s, tl, env, tl, tl, tl)
+DEF_HELPER_4(fmul_s, tl, env, tl, tl, tl)
+DEF_HELPER_4(fdiv_s, tl, env, tl, tl, tl)
+DEF_HELPER_3(fsgnj_s, tl, env, tl, tl)
+DEF_HELPER_3(fsgnjn_s, tl, env, tl, tl)
+DEF_HELPER_3(fsgnjx_s, tl, env, tl, tl)
+DEF_HELPER_3(fmin_s, tl, env, tl, tl)
+DEF_HELPER_3(fmax_s, tl, env, tl, tl)
+DEF_HELPER_3(fsqrt_s, tl, env, tl, tl)
+DEF_HELPER_3(fle_s, tl, env, tl, tl)
+DEF_HELPER_3(flt_s, tl, env, tl, tl)
+DEF_HELPER_3(feq_s, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_w_s, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_wu_s, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_l_s, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_lu_s, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_s_w, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_s_wu, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_s_l, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_s_lu, tl, env, tl, tl)
+DEF_HELPER_2(fclass_s, tl, env, tl)
+
+// Floating Point - Double Precision
+DEF_HELPER_4(fadd_d, tl, env, tl, tl, tl)
+DEF_HELPER_4(fsub_d, tl, env, tl, tl, tl)
+DEF_HELPER_4(fmul_d, tl, env, tl, tl, tl)
+DEF_HELPER_4(fdiv_d, tl, env, tl, tl, tl)
+DEF_HELPER_3(fsgnj_d, tl, env, tl, tl)
+DEF_HELPER_3(fsgnjn_d, tl, env, tl, tl)
+DEF_HELPER_3(fsgnjx_d, tl, env, tl, tl)
+DEF_HELPER_3(fmin_d, tl, env, tl, tl)
+DEF_HELPER_3(fmax_d, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_s_d, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_d_s, tl, env, tl, tl)
+DEF_HELPER_3(fsqrt_d, tl, env, tl, tl)
+DEF_HELPER_3(fle_d, tl, env, tl, tl)
+DEF_HELPER_3(flt_d, tl, env, tl, tl)
+DEF_HELPER_3(feq_d, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_w_d, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_wu_d, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_l_d, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_lu_d, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_d_w, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_d_wu, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_d_l, tl, env, tl, tl)
+DEF_HELPER_3(fcvt_d_lu, tl, env, tl, tl)
+DEF_HELPER_2(fclass_d, tl, env, tl)
+
+/* Special functions */
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_4(csrrw, tl, env, tl, tl, tl)
+DEF_HELPER_4(csrrs, tl, env, tl, tl, tl)
+DEF_HELPER_4(csrrsi, tl, env, tl, tl, tl)
+DEF_HELPER_4(csrrc, tl, env, tl, tl, tl)
+DEF_HELPER_2(sret, tl, env, tl)
+DEF_HELPER_3(debug_print, void, env, tl, tl)
+DEF_HELPER_2(mrts, tl, env, tl)
+DEF_HELPER_1(tlb_flush, void, env)
+DEF_HELPER_1(fence_i, void, env)
+#endif /* !CONFIG_USER_ONLY */
+//DEF_HELPER_1(wait, void, env)
diff --git a/target-riscv/instmap.h b/target-riscv/instmap.h
new file mode 100644
index 0000000..b61d66c
--- /dev/null
+++ b/target-riscv/instmap.h
@@ -0,0 +1,311 @@
+/*
+ * RISC-V emulation for qemu: Instruction decode helpers
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define MASK_OP_MAJOR(op)  (op & 0x7F)
+enum {
+    /* rv32i, rv64i, rv32m */
+    OPC_RISC_LUI    = (0x37),
+    OPC_RISC_AUIPC  = (0x17),
+    OPC_RISC_JAL    = (0x6F),
+    OPC_RISC_JALR   = (0x67),
+    OPC_RISC_BRANCH = (0x63),
+    OPC_RISC_LOAD   = (0x03),
+    OPC_RISC_STORE  = (0x23),
+    OPC_RISC_ARITH_IMM  = (0x13),
+    OPC_RISC_ARITH      = (0x33),
+    OPC_RISC_FENCE      = (0x0F),
+    OPC_RISC_SYSTEM     = (0x73),
+
+    /* rv64i, rv64m */
+    OPC_RISC_ARITH_IMM_W = (0x1B),
+    OPC_RISC_ARITH_W = (0x3B),
+
+    /* rv32a, rv64a */
+    OPC_RISC_ATOMIC = (0x2F),
+
+    /* floating point */
+    OPC_RISC_FP_LOAD = (0x7),
+    OPC_RISC_FP_STORE = (0x27),
+
+    OPC_RISC_FMADD = (0x43),
+    OPC_RISC_FMSUB = (0x47),
+    OPC_RISC_FNMSUB = (0x4B),
+    OPC_RISC_FNMADD = (0x4F),
+
+    OPC_RISC_FP_ARITH = (0x53),
+};
+
+#define MASK_OP_ARITH(op)   (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | (0x7F << 
25))))
+enum {
+    OPC_RISC_ADD   = OPC_RISC_ARITH | (0x0 << 12) | (0x00 << 25),
+    OPC_RISC_SUB   = OPC_RISC_ARITH | (0x0 << 12) | (0x20 << 25),
+    OPC_RISC_SLL   = OPC_RISC_ARITH | (0x1 << 12) | (0x00 << 25),
+    OPC_RISC_SLT   = OPC_RISC_ARITH | (0x2 << 12) | (0x00 << 25),
+    OPC_RISC_SLTU  = OPC_RISC_ARITH | (0x3 << 12) | (0x00 << 25),
+    OPC_RISC_XOR   = OPC_RISC_ARITH | (0x4 << 12) | (0x00 << 25),
+    OPC_RISC_SRL   = OPC_RISC_ARITH | (0x5 << 12) | (0x00 << 25),
+    OPC_RISC_SRA   = OPC_RISC_ARITH | (0x5 << 12) | (0x20 << 25),
+    OPC_RISC_OR    = OPC_RISC_ARITH | (0x6 << 12) | (0x00 << 25),
+    OPC_RISC_AND   = OPC_RISC_ARITH | (0x7 << 12) | (0x00 << 25),
+
+    /* RV64M */
+    OPC_RISC_MUL    = OPC_RISC_ARITH | (0x0 << 12) | (0x01 << 25),
+    OPC_RISC_MULH   = OPC_RISC_ARITH | (0x1 << 12) | (0x01 << 25),
+    OPC_RISC_MULHSU = OPC_RISC_ARITH | (0x2 << 12) | (0x01 << 25),
+    OPC_RISC_MULHU  = OPC_RISC_ARITH | (0x3 << 12) | (0x01 << 25),
+
+    OPC_RISC_DIV    = OPC_RISC_ARITH | (0x4 << 12) | (0x01 << 25),
+    OPC_RISC_DIVU   = OPC_RISC_ARITH | (0x5 << 12) | (0x01 << 25),
+    OPC_RISC_REM    = OPC_RISC_ARITH | (0x6 << 12) | (0x01 << 25),
+    OPC_RISC_REMU   = OPC_RISC_ARITH | (0x7 << 12) | (0x01 << 25),
+};
+
+
+#define MASK_OP_ARITH_IMM(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_ADDI   = OPC_RISC_ARITH_IMM | (0x0 << 12),
+    OPC_RISC_SLTI   = OPC_RISC_ARITH_IMM | (0x2 << 12),
+    OPC_RISC_SLTIU  = OPC_RISC_ARITH_IMM | (0x3 << 12),
+    OPC_RISC_XORI   = OPC_RISC_ARITH_IMM | (0x4 << 12),
+    OPC_RISC_ORI    = OPC_RISC_ARITH_IMM | (0x6 << 12),
+    OPC_RISC_ANDI   = OPC_RISC_ARITH_IMM | (0x7 << 12),
+    OPC_RISC_SLLI   = OPC_RISC_ARITH_IMM | (0x1 << 12), // additional part of 
IMM
+    OPC_RISC_SHIFT_RIGHT_I = OPC_RISC_ARITH_IMM | (0x5 << 12) // SRAI, SRLI
+};
+
+#define MASK_OP_BRANCH(op)     (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_BEQ  = OPC_RISC_BRANCH  | (0x0  << 12),
+    OPC_RISC_BNE  = OPC_RISC_BRANCH  | (0x1  << 12),
+    OPC_RISC_BLT  = OPC_RISC_BRANCH  | (0x4  << 12),
+    OPC_RISC_BGE  = OPC_RISC_BRANCH  | (0x5  << 12),
+    OPC_RISC_BLTU = OPC_RISC_BRANCH  | (0x6  << 12),
+    OPC_RISC_BGEU = OPC_RISC_BRANCH  | (0x7  << 12)
+};
+
+#define MASK_OP_ARITH_IMM_W(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_ADDIW   = OPC_RISC_ARITH_IMM_W | (0x0 << 12),
+    OPC_RISC_SLLIW   = OPC_RISC_ARITH_IMM_W | (0x1 << 12), // additional part 
of IMM
+    OPC_RISC_SHIFT_RIGHT_IW = OPC_RISC_ARITH_IMM_W | (0x5 << 12) // SRAI, SRLI
+};
+
+#define MASK_OP_ARITH_W(op)   (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | (0x7F 
<< 25))))
+enum {
+    OPC_RISC_ADDW   = OPC_RISC_ARITH_W | (0x0 << 12) | (0x00 << 25),
+    OPC_RISC_SUBW   = OPC_RISC_ARITH_W | (0x0 << 12) | (0x20 << 25),
+    OPC_RISC_SLLW   = OPC_RISC_ARITH_W | (0x1 << 12) | (0x00 << 25),
+    OPC_RISC_SRLW   = OPC_RISC_ARITH_W | (0x5 << 12) | (0x00 << 25),
+    OPC_RISC_SRAW   = OPC_RISC_ARITH_W | (0x5 << 12) | (0x20 << 25),
+
+    /* RV64M */
+    OPC_RISC_MULW   = OPC_RISC_ARITH_W | (0x0 << 12) | (0x01 << 25),
+    OPC_RISC_DIVW   = OPC_RISC_ARITH_W | (0x4 << 12) | (0x01 << 25),
+    OPC_RISC_DIVUW  = OPC_RISC_ARITH_W | (0x5 << 12) | (0x01 << 25),
+    OPC_RISC_REMW   = OPC_RISC_ARITH_W | (0x6 << 12) | (0x01 << 25),
+    OPC_RISC_REMUW  = OPC_RISC_ARITH_W | (0x7 << 12) | (0x01 << 25),
+};
+
+#define MASK_OP_LOAD(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_LB   = OPC_RISC_LOAD | (0x0 << 12),
+    OPC_RISC_LH   = OPC_RISC_LOAD | (0x1 << 12),
+    OPC_RISC_LW   = OPC_RISC_LOAD | (0x2 << 12),
+    OPC_RISC_LD   = OPC_RISC_LOAD | (0x3 << 12),
+    OPC_RISC_LBU  = OPC_RISC_LOAD | (0x4 << 12),
+    OPC_RISC_LHU  = OPC_RISC_LOAD | (0x5 << 12),
+    OPC_RISC_LWU  = OPC_RISC_LOAD | (0x6 << 12),
+};
+
+#define MASK_OP_STORE(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_SB   = OPC_RISC_STORE | (0x0 << 12),
+    OPC_RISC_SH   = OPC_RISC_STORE | (0x1 << 12),
+    OPC_RISC_SW   = OPC_RISC_STORE | (0x2 << 12),
+    OPC_RISC_SD   = OPC_RISC_STORE | (0x3 << 12),
+};
+
+#define MASK_OP_JALR(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+// no enum since OPC_RISC_JALR is the actual value
+
+#define MASK_OP_ATOMIC(op)   (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | (0x7F 
<< 25))))
+#define MASK_OP_ATOMIC_NO_AQ_RL(op)   (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) 
| (0x1F << 27))))
+enum {
+    OPC_RISC_LR_W        = OPC_RISC_ATOMIC | (0x2 << 12) | (0x02 << 27),
+    OPC_RISC_SC_W        = OPC_RISC_ATOMIC | (0x2 << 12) | (0x03 << 27),
+    OPC_RISC_AMOSWAP_W   = OPC_RISC_ATOMIC | (0x2 << 12) | (0x01 << 27),
+    OPC_RISC_AMOADD_W    = OPC_RISC_ATOMIC | (0x2 << 12) | (0x00 << 27),
+    OPC_RISC_AMOXOR_W    = OPC_RISC_ATOMIC | (0x2 << 12) | (0x04 << 27),
+    OPC_RISC_AMOAND_W    = OPC_RISC_ATOMIC | (0x2 << 12) | (0x0C << 27),
+    OPC_RISC_AMOOR_W     = OPC_RISC_ATOMIC | (0x2 << 12) | (0x08 << 27),
+    OPC_RISC_AMOMIN_W    = OPC_RISC_ATOMIC | (0x2 << 12) | (0x10 << 27),
+    OPC_RISC_AMOMAX_W    = OPC_RISC_ATOMIC | (0x2 << 12) | (0x14 << 27),
+    OPC_RISC_AMOMINU_W   = OPC_RISC_ATOMIC | (0x2 << 12) | (0x18 << 27),
+    OPC_RISC_AMOMAXU_W   = OPC_RISC_ATOMIC | (0x2 << 12) | (0x1C << 27),
+
+    OPC_RISC_LR_D        = OPC_RISC_ATOMIC | (0x3 << 12) | (0x02 << 27),
+    OPC_RISC_SC_D        = OPC_RISC_ATOMIC | (0x3 << 12) | (0x03 << 27),
+    OPC_RISC_AMOSWAP_D   = OPC_RISC_ATOMIC | (0x3 << 12) | (0x01 << 27),
+    OPC_RISC_AMOADD_D    = OPC_RISC_ATOMIC | (0x3 << 12) | (0x00 << 27),
+    OPC_RISC_AMOXOR_D    = OPC_RISC_ATOMIC | (0x3 << 12) | (0x04 << 27),
+    OPC_RISC_AMOAND_D    = OPC_RISC_ATOMIC | (0x3 << 12) | (0x0C << 27),
+    OPC_RISC_AMOOR_D     = OPC_RISC_ATOMIC | (0x3 << 12) | (0x08 << 27),
+    OPC_RISC_AMOMIN_D    = OPC_RISC_ATOMIC | (0x3 << 12) | (0x10 << 27),
+    OPC_RISC_AMOMAX_D    = OPC_RISC_ATOMIC | (0x3 << 12) | (0x14 << 27),
+    OPC_RISC_AMOMINU_D   = OPC_RISC_ATOMIC | (0x3 << 12) | (0x18 << 27),
+    OPC_RISC_AMOMAXU_D   = OPC_RISC_ATOMIC | (0x3 << 12) | (0x1C << 27),
+};
+
+#define MASK_OP_SYSTEM(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_ECALL       = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_EBREAK      = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_ERET        = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_MRTS        = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_MRTH        = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_HRTS        = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_WFI         = OPC_RISC_SYSTEM | (0x0 << 12),
+    OPC_RISC_SFENCEVM    = OPC_RISC_SYSTEM | (0x0 << 12),
+
+    OPC_RISC_CSRRW       = OPC_RISC_SYSTEM | (0x1 << 12),
+    OPC_RISC_CSRRS       = OPC_RISC_SYSTEM | (0x2 << 12),
+    OPC_RISC_CSRRC       = OPC_RISC_SYSTEM | (0x3 << 12),
+    OPC_RISC_CSRRWI      = OPC_RISC_SYSTEM | (0x5 << 12),
+    OPC_RISC_CSRRSI      = OPC_RISC_SYSTEM | (0x6 << 12),
+    OPC_RISC_CSRRCI      = OPC_RISC_SYSTEM | (0x7 << 12),
+};
+
+#define MASK_OP_FP_LOAD(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_FLW   = OPC_RISC_FP_LOAD | (0x2 << 12),
+    OPC_RISC_FLD   = OPC_RISC_FP_LOAD | (0x3 << 12),
+};
+
+#define MASK_OP_FP_STORE(op)   (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+    OPC_RISC_FSW   = OPC_RISC_FP_STORE | (0x2 << 12),
+    OPC_RISC_FSD   = OPC_RISC_FP_STORE | (0x3 << 12),
+};
+
+#define MASK_OP_FP_FMADD(op)   (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+    OPC_RISC_FMADD_S = OPC_RISC_FMADD | (0x0 << 25),
+    OPC_RISC_FMADD_D = OPC_RISC_FMADD | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FMSUB(op)   (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+    OPC_RISC_FMSUB_S = OPC_RISC_FMSUB | (0x0 << 25),
+    OPC_RISC_FMSUB_D = OPC_RISC_FMSUB | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FNMADD(op)   (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+    OPC_RISC_FNMADD_S = OPC_RISC_FNMADD | (0x0 << 25),
+    OPC_RISC_FNMADD_D = OPC_RISC_FNMADD | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FNMSUB(op)   (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+    OPC_RISC_FNMSUB_S = OPC_RISC_FNMSUB | (0x0 << 25),
+    OPC_RISC_FNMSUB_D = OPC_RISC_FNMSUB | (0x1 << 25),
+};
+
+#define MASK_OP_FP_ARITH(op)   (MASK_OP_MAJOR(op) | (op & (0x7F << 25)))
+enum {
+    // float
+    OPC_RISC_FADD_S    = OPC_RISC_FP_ARITH | (0x0 << 25),
+    OPC_RISC_FSUB_S    = OPC_RISC_FP_ARITH | (0x4 << 25),
+    OPC_RISC_FMUL_S    = OPC_RISC_FP_ARITH | (0x8 << 25),
+    OPC_RISC_FDIV_S    = OPC_RISC_FP_ARITH | (0xC << 25),
+
+    OPC_RISC_FSGNJ_S   = OPC_RISC_FP_ARITH | (0x10 << 25),
+    OPC_RISC_FSGNJN_S  = OPC_RISC_FP_ARITH | (0x10 << 25),
+    OPC_RISC_FSGNJX_S  = OPC_RISC_FP_ARITH | (0x10 << 25),
+
+    OPC_RISC_FMIN_S    = OPC_RISC_FP_ARITH | (0x14 << 25),
+    OPC_RISC_FMAX_S    = OPC_RISC_FP_ARITH | (0x14 << 25),
+
+    OPC_RISC_FSQRT_S   = OPC_RISC_FP_ARITH | (0x2C << 25),
+
+    OPC_RISC_FEQ_S     = OPC_RISC_FP_ARITH | (0x50 << 25),
+    OPC_RISC_FLT_S     = OPC_RISC_FP_ARITH | (0x50 << 25),
+    OPC_RISC_FLE_S     = OPC_RISC_FP_ARITH | (0x50 << 25),
+
+    OPC_RISC_FCVT_W_S  = OPC_RISC_FP_ARITH | (0x60 << 25),
+    OPC_RISC_FCVT_WU_S = OPC_RISC_FP_ARITH | (0x60 << 25),
+    OPC_RISC_FCVT_L_S  = OPC_RISC_FP_ARITH | (0x60 << 25),
+    OPC_RISC_FCVT_LU_S = OPC_RISC_FP_ARITH | (0x60 << 25),
+
+    OPC_RISC_FCVT_S_W  = OPC_RISC_FP_ARITH | (0x68 << 25),
+    OPC_RISC_FCVT_S_WU = OPC_RISC_FP_ARITH | (0x68 << 25),
+    OPC_RISC_FCVT_S_L  = OPC_RISC_FP_ARITH | (0x68 << 25),
+    OPC_RISC_FCVT_S_LU = OPC_RISC_FP_ARITH | (0x68 << 25),
+
+    OPC_RISC_FMV_X_S   = OPC_RISC_FP_ARITH | (0x70 << 25),
+    OPC_RISC_FCLASS_S  = OPC_RISC_FP_ARITH | (0x70 << 25),
+
+    OPC_RISC_FMV_S_X   = OPC_RISC_FP_ARITH | (0x78 << 25),
+
+    // double
+    OPC_RISC_FADD_D    = OPC_RISC_FP_ARITH | (0x1 << 25),
+    OPC_RISC_FSUB_D    = OPC_RISC_FP_ARITH | (0x5 << 25),
+    OPC_RISC_FMUL_D    = OPC_RISC_FP_ARITH | (0x9 << 25),
+    OPC_RISC_FDIV_D    = OPC_RISC_FP_ARITH | (0xD << 25),
+
+    OPC_RISC_FSGNJ_D   = OPC_RISC_FP_ARITH | (0x11 << 25),
+    OPC_RISC_FSGNJN_D  = OPC_RISC_FP_ARITH | (0x11 << 25),
+    OPC_RISC_FSGNJX_D  = OPC_RISC_FP_ARITH | (0x11 << 25),
+
+    OPC_RISC_FMIN_D    = OPC_RISC_FP_ARITH | (0x15 << 25),
+    OPC_RISC_FMAX_D    = OPC_RISC_FP_ARITH | (0x15 << 25),
+
+    OPC_RISC_FCVT_S_D = OPC_RISC_FP_ARITH | (0x20 << 25),
+
+    OPC_RISC_FCVT_D_S = OPC_RISC_FP_ARITH | (0x21 << 25),
+
+    OPC_RISC_FSQRT_D   = OPC_RISC_FP_ARITH | (0x2D << 25),
+
+    OPC_RISC_FEQ_D     = OPC_RISC_FP_ARITH | (0x51 << 25),
+    OPC_RISC_FLT_D     = OPC_RISC_FP_ARITH | (0x51 << 25),
+    OPC_RISC_FLE_D     = OPC_RISC_FP_ARITH | (0x51 << 25),
+
+    OPC_RISC_FCVT_W_D  = OPC_RISC_FP_ARITH | (0x61 << 25),
+    OPC_RISC_FCVT_WU_D = OPC_RISC_FP_ARITH | (0x61 << 25),
+    OPC_RISC_FCVT_L_D  = OPC_RISC_FP_ARITH | (0x61 << 25),
+    OPC_RISC_FCVT_LU_D = OPC_RISC_FP_ARITH | (0x61 << 25),
+
+    OPC_RISC_FCVT_D_W  = OPC_RISC_FP_ARITH | (0x69 << 25),
+    OPC_RISC_FCVT_D_WU = OPC_RISC_FP_ARITH | (0x69 << 25),
+    OPC_RISC_FCVT_D_L  = OPC_RISC_FP_ARITH | (0x69 << 25),
+    OPC_RISC_FCVT_D_LU = OPC_RISC_FP_ARITH | (0x69 << 25),
+
+    OPC_RISC_FMV_X_D   = OPC_RISC_FP_ARITH | (0x71 << 25),
+    OPC_RISC_FCLASS_D  = OPC_RISC_FP_ARITH | (0x71 << 25),
+
+    OPC_RISC_FMV_D_X   = OPC_RISC_FP_ARITH | (0x79 << 25),
+};
+
+#define GET_B_IMM(inst)              ((int16_t)((((inst >> 25) & 0x3F) << 5) | 
((((int32_t)inst) >> 31) << 12) | (((inst >> 8) & 0xF) << 1) | (((inst >> 7) & 
0x1) << 11)))  /* THIS BUILDS 13 bit imm (implicit zero is tacked on here), 
also note that bit #12 is obtained in a special way to get sign extension */
+#define GET_STORE_IMM(inst)           ((int16_t)(((((int32_t)inst) >> 25) << 
5) | ((inst >> 7) & 0x1F)))
+#define GET_JAL_IMM(inst)             ((int32_t)((inst & 0xFF000) | (((inst >> 
20) & 0x1) << 11) | (((inst >> 21) & 0x3FF) << 1) | ((((int32_t)inst) >> 31) << 
20)))
+#define GET_RM(inst)                  ((inst >> 12) & 0x7)
+#define GET_RS3(inst)                 ((inst >> 27) & 0x1F)
+
diff --git a/target-riscv/machine.c b/target-riscv/machine.c
new file mode 100644
index 0000000..b639b9d
--- /dev/null
+++ b/target-riscv/machine.c
@@ -0,0 +1,91 @@
+/*
+ * RISC-V CPU Machine State Helpers
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "hw/boards.h"
+
+#include "cpu.h"
+
+static void save_tc(QEMUFile *f, TCState *tc)
+{
+    int i;
+    /* Save active TC */
+    for (i = 0; i < 32; i++) {
+        qemu_put_betls(f, &tc->gpr[i]);
+    }
+    for (i = 0; i < 32; i++) {
+        qemu_put_betls(f, &tc->fpr[i]);
+    }
+    qemu_put_betls(f, &tc->PC);
+    qemu_put_betls(f, &tc->load_reservation);
+}
+
+void cpu_save(QEMUFile *f, void *opaque)
+{
+    CPURISCVState *env = opaque;
+    int i;
+
+    /* Save active TC */
+    save_tc(f, &env->active_tc);
+
+    /* Save CPU metastate */
+    qemu_put_be32s(f, &env->current_tc);
+
+    for (i = 0; i < 4096; i++) {
+        qemu_put_betls(f, &env->csr[i]);
+    }
+}
+
+static void load_tc(QEMUFile *f, TCState *tc)
+{
+    int i;
+    /* Save active TC */
+    for(i = 0; i < 32; i++) {
+        qemu_get_betls(f, &tc->gpr[i]);
+    }
+    for(i = 0; i < 32; i++) {
+        qemu_get_betls(f, &tc->fpr[i]);
+    }
+    qemu_get_betls(f, &tc->PC);
+    qemu_get_betls(f, &tc->load_reservation);
+}
+
+int cpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+    CPURISCVState *env = opaque;
+    RISCVCPU *cpu = riscv_env_get_cpu(env);
+    int i;
+
+    if (version_id != 3)
+        return -EINVAL;
+
+    /* Load active TC */
+    load_tc(f, &env->active_tc);
+
+    /* Load CPU metastate */
+    qemu_get_be32s(f, &env->current_tc);
+
+    for (i = 0; i < 4096; i++) {
+        qemu_get_betls(f, &env->csr[i]);
+    }
+
+    tlb_flush(CPU(cpu), 1);
+    return 0;
+}
diff --git a/target-riscv/op_helper.c b/target-riscv/op_helper.c
new file mode 100644
index 0000000..614f8e1
--- /dev/null
+++ b/target-riscv/op_helper.c
@@ -0,0 +1,1037 @@
+/*
+ * RISC-V Emulation Helpers for QEMU.
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdlib.h>
+#include "cpu.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+// custom floating point includes. use this instead of qemu's included
+// fpu/softmmu since we know it already works exactly as desired for riscv
+#include "fpu-custom-riscv/softfloat.h"
+#include "fpu-custom-riscv/platform.h"
+#include "fpu-custom-riscv/internals.h"
+
+static int validate_vm(target_ulong vm) {
+    return vm == VM_SV39 || vm == VM_SV48 || vm == VM_MBARE;
+}
+
+static int validate_priv(target_ulong priv) {
+    return priv == PRV_U || priv == PRV_S || priv == PRV_M;
+}
+
+/* Exceptions processing helpers */
+static inline void QEMU_NORETURN do_raise_exception_err(CPURISCVState *env,
+                                          uint32_t exception, uintptr_t pc)
+{
+    CPUState *cs = CPU(riscv_env_get_cpu(env));
+    qemu_log("%s: %d\n", __func__, exception);
+    cs->exception_index = exception;
+    cpu_loop_exit_restore(cs, pc);
+}
+
+void helper_raise_exception(CPURISCVState *env, uint32_t exception)
+{
+    do_raise_exception_err(env, exception, 0);
+}
+
+void helper_raise_exception_debug(CPURISCVState *env)
+{
+    do_raise_exception_err(env, EXCP_DEBUG, 0);
+}
+
+
+void helper_raise_exception_err(CPURISCVState *env, uint32_t exception, 
target_ulong pc)
+{
+    do_raise_exception_err(env, exception, pc);
+}
+
+void helper_raise_exception_mbadaddr(CPURISCVState *env, uint32_t exception,
+        target_ulong bad_pc) {
+    env->csr[NEW_CSR_MBADADDR] = bad_pc;
+    do_raise_exception_err(env, exception, 0);
+}
+
+/* floating point */
+uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t frs3, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_mulAdd(frs1, frs2, frs3);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t frs3, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_mulAdd(frs1, frs2, frs3);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t frs3, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_mulAdd(frs1, frs2, frs3 ^ (uint32_t)INT32_MIN);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t frs3, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_mulAdd(frs1, frs2, frs3 ^ (uint64_t)INT64_MIN);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t frs3, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_mulAdd(frs1 ^ (uint32_t)INT32_MIN, frs2, frs3);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t frs3, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_mulAdd(frs1 ^ (uint64_t)INT64_MIN, frs2, frs3);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t frs3, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_mulAdd(frs1 ^ (uint32_t)INT32_MIN, frs2, frs3 ^ 
(uint32_t)INT32_MIN);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t frs3, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_mulAdd(frs1 ^ (uint64_t)INT64_MIN, frs2, frs3 ^ 
(uint64_t)INT64_MIN);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_mulAdd(frs1, 0x3f800000, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_mulAdd(frs1, 0x3f800000, frs2 ^ (uint32_t)INT32_MIN);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fmul_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_mulAdd(frs1, frs2, (frs1 ^ frs2) & (uint32_t)INT32_MIN);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_div(frs1, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fsgnj_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = (frs1 &~ (uint32_t)INT32_MIN) | (frs2 & (uint32_t)INT32_MIN);
+    return frs1;
+}
+
+uint64_t helper_fsgnjn_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = (frs1 &~ (uint32_t)INT32_MIN) | ((~frs2) & (uint32_t)INT32_MIN);
+    return frs1;
+}
+
+uint64_t helper_fsgnjx_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = frs1 ^ (frs2 & (uint32_t)INT32_MIN);
+    return frs1;
+}
+
+uint64_t helper_fmin_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = isNaNF32UI(frs2) || f32_lt_quiet(frs1, frs2) ? frs1 : frs2;
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fmax_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = isNaNF32UI(frs2) || f32_le_quiet(frs2, frs1) ? frs1 : frs2;
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_sqrt(frs1);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fle_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = f32_le(frs1, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_flt_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = f32_lt(frs1, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_feq_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = f32_eq(frs1, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_w_s(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = (int64_t)((int32_t)f32_to_i32(frs1, RISCV_RM, true));
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_wu_s(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = (int64_t)((int32_t)f32_to_ui32(frs1, RISCV_RM, true));
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_l_s(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_to_i64(frs1, RISCV_RM, true);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_lu_s(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f32_to_ui64(frs1, RISCV_RM, true);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_s_w(CPURISCVState *env, uint64_t rs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    rs1 = i32_to_f32((int32_t)rs1);
+    set_fp_exceptions;
+    return rs1;
+}
+
+uint64_t helper_fcvt_s_wu(CPURISCVState *env, uint64_t rs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    rs1 = ui32_to_f32((uint32_t)rs1);
+    set_fp_exceptions;
+    return rs1;
+}
+
+uint64_t helper_fcvt_s_l(CPURISCVState *env, uint64_t rs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    rs1 = i64_to_f32(rs1);
+    set_fp_exceptions;
+    return rs1;
+}
+
+uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    rs1 = ui64_to_f32(rs1);
+    set_fp_exceptions;
+    return rs1;
+}
+
+uint64_t helper_fclass_s(CPURISCVState *env, uint64_t frs1)
+{
+    frs1 = f32_classify(frs1);
+    return frs1;
+}
+
+uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_mulAdd(frs1, 0x3ff0000000000000ULL, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_mulAdd(frs1, 0x3ff0000000000000ULL, frs2 ^ (uint64_t)INT64_MIN);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fmul_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_mulAdd(frs1, frs2, (frs1 ^ frs2) & (uint64_t)INT64_MIN);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, 
uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_div(frs1, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fsgnj_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = (frs1 &~ INT64_MIN) | (frs2 & INT64_MIN);
+    return frs1;
+}
+
+uint64_t helper_fsgnjn_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = (frs1 &~ INT64_MIN) | ((~frs2) & INT64_MIN);
+    return frs1;
+}
+
+uint64_t helper_fsgnjx_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = frs1 ^ (frs2 & INT64_MIN);
+    return frs1;
+}
+
+uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = isNaNF64UI(frs2) || f64_lt_quiet(frs1, frs2) ? frs1 : frs2;
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = isNaNF64UI(frs2) || f64_le_quiet(frs2, frs1) ? frs1 : frs2;
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    rs1 = f64_to_f32(rs1);
+    set_fp_exceptions;
+    return rs1;
+}
+
+uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    rs1 = f32_to_f64(rs1);
+    set_fp_exceptions;
+    return rs1;
+}
+
+uint64_t helper_fsqrt_d(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_sqrt(frs1);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fle_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = f64_le(frs1, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_flt_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = f64_lt(frs1, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_feq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+    frs1 = f64_eq(frs1, frs2);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_w_d(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = (int64_t)((int32_t)f64_to_i32(frs1, RISCV_RM, true));
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = (int64_t)((int32_t)f64_to_ui32(frs1, RISCV_RM, true));
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_l_d(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_to_i64(frs1, RISCV_RM, true);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_lu_d(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = f64_to_ui64(frs1, RISCV_RM, true);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_d_w(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = i32_to_f64((int32_t)frs1);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_d_wu(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = ui32_to_f64((uint32_t)frs1);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_d_l(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = i64_to_f64(frs1);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fcvt_d_lu(CPURISCVState *env, uint64_t frs1, uint64_t rm)
+{
+    softfloat_roundingMode = RISCV_RM;
+    frs1 = ui64_to_f64(frs1);
+    set_fp_exceptions;
+    return frs1;
+}
+
+uint64_t helper_fclass_d(CPURISCVState *env, uint64_t frs1)
+{
+    frs1 = f64_classify(frs1);
+    return frs1;
+}
+
+target_ulong helper_mulhsu(CPURISCVState *env, target_ulong arg1,
+                          target_ulong arg2)
+{
+    int64_t a = arg1;
+    uint64_t b = arg2;
+    return (int64_t)((__int128_t)a*b >> 64);
+}
+
+/*
+ * Handle writes to CSRs and any resulting special behavior
+ *
+ * Note: mtohost and mfromhost are not handled here
+ */
+inline void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
+        target_ulong csrno)
+{
+    #ifdef RISCV_DEBUG_PRINT
+    fprintf(stderr, "Write CSR reg: 0x" TARGET_FMT_lx "\n", csrno);
+    fprintf(stderr, "Write CSR val: 0x" TARGET_FMT_lx "\n", val_to_write);
+    #endif
+
+    switch (csrno)
+    {
+    case NEW_CSR_FFLAGS:
+        env->csr[NEW_CSR_MSTATUS] |= MSTATUS_FS | MSTATUS64_SD;
+        env->csr[NEW_CSR_FFLAGS] = val_to_write & (FSR_AEXC >> FSR_AEXC_SHIFT);
+        break;
+    case NEW_CSR_FRM:
+        env->csr[NEW_CSR_MSTATUS] |= MSTATUS_FS | MSTATUS64_SD;
+        env->csr[NEW_CSR_FRM] = val_to_write & (FSR_RD >> FSR_RD_SHIFT);
+        break;
+    case NEW_CSR_FCSR:
+        env->csr[NEW_CSR_MSTATUS] |= MSTATUS_FS | MSTATUS64_SD;
+        env->csr[NEW_CSR_FFLAGS] = (val_to_write & FSR_AEXC) >> FSR_AEXC_SHIFT;
+        env->csr[NEW_CSR_FRM] = (val_to_write & FSR_RD) >> FSR_RD_SHIFT;
+        break;
+    case NEW_CSR_MTIME:
+    case NEW_CSR_STIMEW:
+        // this implementation ignores writes to MTIME
+        break;
+    case NEW_CSR_MTIMEH:
+    case NEW_CSR_STIMEHW:
+        // this implementation ignores writes to MTIME
+        break;
+    case NEW_CSR_TIMEW:
+        cpu_riscv_store_timew(env, val_to_write);
+        break;
+    case NEW_CSR_TIMEHW:
+        fprintf(stderr, "CSR_TIMEHW unsupported on RV64I\n");
+        exit(1);
+        break;
+    case NEW_CSR_CYCLEW:
+    case NEW_CSR_INSTRETW:
+        cpu_riscv_store_instretw(env, val_to_write);
+        break;
+    case NEW_CSR_CYCLEHW:
+    case NEW_CSR_INSTRETHW:
+        fprintf(stderr, "CSR_CYCLEHW/INSTRETHW unsupported on RV64I\n");
+        exit(1);
+        break;
+    case NEW_CSR_MSTATUS: {
+        target_ulong mstatus = env->csr[NEW_CSR_MSTATUS];
+        #ifdef RISCV_DEBUG_PRINT
+        target_ulong debug_mstatus = mstatus;
+        #endif
+        if ((val_to_write ^ mstatus) &
+                (MSTATUS_VM | MSTATUS_PRV | MSTATUS_PRV1 | MSTATUS_MPRV)) {
+            #ifdef RISCV_DEBUG_PRINT
+            fprintf(stderr, "flushing TLB\n");
+            #endif
+            helper_tlb_flush(env);
+        }
+
+        // no extension support
+        target_ulong mask = MSTATUS_IE | MSTATUS_IE1 | MSTATUS_IE2
+            | MSTATUS_MPRV | MSTATUS_FS;
+
+        if (validate_vm(get_field(val_to_write, MSTATUS_VM))) {
+            mask |= MSTATUS_VM;
+        }
+        if (validate_priv(get_field(val_to_write, MSTATUS_PRV))) {
+            mask |= MSTATUS_PRV;
+        }
+        if (validate_priv(get_field(val_to_write, MSTATUS_PRV1))) {
+            mask |= MSTATUS_PRV1;
+        }
+        if (validate_priv(get_field(val_to_write, MSTATUS_PRV2))) {
+            mask |= MSTATUS_PRV2;
+        }
+
+        mstatus = (mstatus & ~mask) | (val_to_write & mask);
+
+        int dirty = (mstatus & MSTATUS_FS) == MSTATUS_FS;
+        dirty |= (mstatus & MSTATUS_XS) == MSTATUS_XS;
+        mstatus = set_field(mstatus, MSTATUS64_SD, dirty);
+        env->csr[NEW_CSR_MSTATUS] = mstatus;
+        break;
+    }
+    case NEW_CSR_MIP: {
+        target_ulong mask = MIP_SSIP | MIP_MSIP | MIP_STIP;
+        env->csr[NEW_CSR_MIP] = (env->csr[NEW_CSR_MIP] & ~mask) |
+            (val_to_write & mask);
+        CPUState *cs = CPU(riscv_env_get_cpu(env));
+        if (env->csr[NEW_CSR_MIP] & MIP_SSIP) {
+            stw_phys(cs->as, 0xFFFFFFFFF0000020, 0x1);
+        } else {
+            stw_phys(cs->as, 0xFFFFFFFFF0000020, 0x0);
+        }
+        if (env->csr[NEW_CSR_MIP] & MIP_STIP) {
+            stw_phys(cs->as, 0xFFFFFFFFF0000040, 0x1);
+        } else {
+            stw_phys(cs->as, 0xFFFFFFFFF0000040, 0x0);
+        }
+        if (env->csr[NEW_CSR_MIP] & MIP_MSIP) {
+            stw_phys(cs->as, 0xFFFFFFFFF0000060, 0x1);
+        } else {
+            stw_phys(cs->as, 0xFFFFFFFFF0000060, 0x0);
+        }
+        break;
+    }
+    case NEW_CSR_MIPI: {
+        CPUState *cs = CPU(riscv_env_get_cpu(env));
+        env->csr[NEW_CSR_MIP] = set_field(env->csr[NEW_CSR_MIP], MIP_MSIP, 
val_to_write & 1);
+        if (env->csr[NEW_CSR_MIP] & MIP_MSIP) {
+            stw_phys(cs->as, 0xFFFFFFFFF0000060, 0x1);
+        } else {
+            stw_phys(cs->as, 0xFFFFFFFFF0000060, 0x0);
+        }
+        break;
+    }
+    case NEW_CSR_MIE: {
+        target_ulong mask = MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP;
+        env->csr[NEW_CSR_MIE] = (env->csr[NEW_CSR_MIE] & ~mask) |
+            (val_to_write & mask);
+        break;
+    }
+    case NEW_CSR_SSTATUS: {
+        target_ulong ms = env->csr[NEW_CSR_MSTATUS];
+        ms = set_field(ms, MSTATUS_IE, get_field(val_to_write, SSTATUS_IE));
+        ms = set_field(ms, MSTATUS_IE1, get_field(val_to_write, SSTATUS_PIE));
+        ms = set_field(ms, MSTATUS_PRV1, get_field(val_to_write, SSTATUS_PS));
+        ms = set_field(ms, MSTATUS_FS, get_field(val_to_write, SSTATUS_FS));
+        ms = set_field(ms, MSTATUS_XS, get_field(val_to_write, SSTATUS_XS));
+        ms = set_field(ms, MSTATUS_MPRV, get_field(val_to_write, 
SSTATUS_MPRV));
+        csr_write_helper(env, ms, NEW_CSR_MSTATUS);
+        break;
+    }
+    case NEW_CSR_SIP: {
+        target_ulong mask = MIP_SSIP;
+        env->csr[NEW_CSR_MIP] = (env->csr[NEW_CSR_MIP] & ~mask) |
+            (val_to_write & mask);
+        CPUState *cs = CPU(riscv_env_get_cpu(env));
+        if (env->csr[NEW_CSR_MIP] & MIP_SSIP) {
+            stw_phys(cs->as, 0xFFFFFFFFF0000020, 0x1);
+        } else {
+            stw_phys(cs->as, 0xFFFFFFFFF0000020, 0x0);
+        }
+        break;
+    }
+    case NEW_CSR_SIE: {
+        target_ulong mask = MIP_SSIP | MIP_STIP;
+        env->csr[NEW_CSR_MIE] = (env->csr[NEW_CSR_MIE] & ~mask) |
+            (val_to_write & mask);
+        break;
+    }
+    case NEW_CSR_SEPC:
+        env->csr[NEW_CSR_SEPC] = val_to_write;
+        break;
+    case NEW_CSR_STVEC:
+        env->csr[NEW_CSR_STVEC] = val_to_write >> 2 << 2;
+        break;
+    case NEW_CSR_SPTBR:
+        env->csr[NEW_CSR_SPTBR] = val_to_write & -(1L << PGSHIFT);
+        break;
+    case NEW_CSR_SSCRATCH:
+        env->csr[NEW_CSR_SSCRATCH] = val_to_write;
+        break;
+    case NEW_CSR_MEPC:
+        env->csr[NEW_CSR_MEPC] = val_to_write;
+        break;
+    case NEW_CSR_MSCRATCH:
+        env->csr[NEW_CSR_MSCRATCH] = val_to_write;
+        break;
+    case NEW_CSR_MCAUSE:
+        env->csr[NEW_CSR_MCAUSE] = val_to_write;
+        break;
+    case NEW_CSR_MBADADDR:
+        env->csr[NEW_CSR_MBADADDR] = val_to_write;
+        break;
+    case NEW_CSR_MTIMECMP:
+        // NOTE: clearing bit in MIP handled in cpu_riscv_store_compare
+        cpu_riscv_store_compare(env, val_to_write);
+        break;
+    case NEW_CSR_MTOHOST:
+        fprintf(stderr, "Write to mtohost should not be handled here\n");
+        exit(1);
+        break;
+    case NEW_CSR_MFROMHOST:
+        fprintf(stderr, "Write to mfromhost should not be handled here\n");
+        exit(1);
+        break;
+    }
+}
+
+/*
+ * Handle reads to CSRs and any resulting special behavior
+ *
+ * Note: mtohost and mfromhost are not handled here
+ */
+inline target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno)
+{
+    int csrno2 = (int)csrno;
+    #ifdef RISCV_DEBUG_PRINT
+    fprintf(stderr, "READ CSR 0x%x\n", csrno2);
+    #endif
+
+    switch (csrno2)
+    {
+    case NEW_CSR_FFLAGS:
+        return env->csr[NEW_CSR_FFLAGS];
+    case NEW_CSR_FRM:
+        return env->csr[NEW_CSR_FRM];
+    case NEW_CSR_FCSR:
+        return (env->csr[NEW_CSR_FFLAGS] << FSR_AEXC_SHIFT) |
+            (env->csr[NEW_CSR_FRM] << FSR_RD_SHIFT);
+    case NEW_CSR_MTIME:
+        return cpu_riscv_read_mtime(env);
+    case NEW_CSR_STIME:
+    case NEW_CSR_STIMEW:
+        return cpu_riscv_read_stime(env);
+    case NEW_CSR_MTIMEH:
+    case NEW_CSR_STIMEH:
+    case NEW_CSR_STIMEHW:
+        fprintf(stderr, "CSR_MTIMEH unsupported on RV64I\n");
+        exit(1);
+    case NEW_CSR_TIME:
+    case NEW_CSR_TIMEW:
+        return cpu_riscv_read_time(env);
+    case NEW_CSR_CYCLE:
+    case NEW_CSR_CYCLEW:
+    case NEW_CSR_INSTRET:
+    case NEW_CSR_INSTRETW:
+        return cpu_riscv_read_instretw(env);
+    case NEW_CSR_TIMEH:
+    case NEW_CSR_TIMEHW:
+        fprintf(stderr, "CSR_TIMEH unsupported on RV64I\n");
+        exit(1);
+    case NEW_CSR_CYCLEH:
+    case NEW_CSR_INSTRETH:
+    case NEW_CSR_CYCLEHW:
+    case NEW_CSR_INSTRETHW:
+        fprintf(stderr, "CSR_INSTRETH unsupported on RV64I\n");
+        exit(1);
+    case NEW_CSR_SSTATUS: {
+        target_ulong ss = 0;
+        ss = set_field(ss, SSTATUS_IE, get_field(env->csr[NEW_CSR_MSTATUS],
+                    MSTATUS_IE));
+        ss = set_field(ss, SSTATUS_PIE, get_field(env->csr[NEW_CSR_MSTATUS],
+                    MSTATUS_IE1));
+        ss = set_field(ss, SSTATUS_PS, get_field(env->csr[NEW_CSR_MSTATUS],
+                    MSTATUS_PRV1));
+        ss = set_field(ss, SSTATUS_FS, get_field(env->csr[NEW_CSR_MSTATUS],
+                    MSTATUS_FS));
+        ss = set_field(ss, SSTATUS_XS, get_field(env->csr[NEW_CSR_MSTATUS],
+                    MSTATUS_XS));
+        ss = set_field(ss, SSTATUS_MPRV, get_field(env->csr[NEW_CSR_MSTATUS],
+                    MSTATUS_MPRV));
+        if (get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS64_SD)) {
+            ss = set_field(ss, SSTATUS64_SD, 1);
+        }
+        return ss;
+    }
+    case NEW_CSR_SIP:
+        return env->csr[NEW_CSR_MIP] & (MIP_SSIP | MIP_STIP);
+    case NEW_CSR_SIE:
+        return env->csr[NEW_CSR_MIE] & (MIP_SSIP | MIP_STIP);
+    case NEW_CSR_SEPC:
+        return env->csr[NEW_CSR_SEPC];
+    case NEW_CSR_SBADADDR:
+        return env->csr[NEW_CSR_SBADADDR];
+    case NEW_CSR_STVEC:
+        return env->csr[NEW_CSR_STVEC];
+    case NEW_CSR_SCAUSE:
+        return env->csr[NEW_CSR_SCAUSE];
+    case NEW_CSR_SPTBR:
+        return env->csr[NEW_CSR_SPTBR];
+    case NEW_CSR_SASID:
+        return 0;
+    case NEW_CSR_SSCRATCH:
+        return env->csr[NEW_CSR_SSCRATCH];
+    case NEW_CSR_MSTATUS:
+        return env->csr[NEW_CSR_MSTATUS];
+    case NEW_CSR_MIP:
+        return env->csr[NEW_CSR_MIP];
+    case NEW_CSR_MIPI:
+        return 0;
+    case NEW_CSR_MIE:
+        return env->csr[NEW_CSR_MIE];
+    case NEW_CSR_MEPC:
+        return env->csr[NEW_CSR_MEPC];
+    case NEW_CSR_MSCRATCH:
+        return env->csr[NEW_CSR_MSCRATCH];
+    case NEW_CSR_MCAUSE:
+        return env->csr[NEW_CSR_MCAUSE];
+    case NEW_CSR_MBADADDR:
+        return env->csr[NEW_CSR_MBADADDR];
+    case NEW_CSR_MTIMECMP:
+        return env->csr[NEW_CSR_MTIMECMP];
+    case NEW_CSR_MCPUID:
+        return env->csr[NEW_CSR_MCPUID];
+    case NEW_CSR_MIMPID:
+        return 0x1; // "Rocket"
+    case NEW_CSR_MHARTID:
+        return 0;
+    case NEW_CSR_MTVEC:
+        return DEFAULT_MTVEC;
+    case NEW_CSR_MTDELEG:
+        return 0;
+    case NEW_CSR_MTOHOST:
+        fprintf(stderr, "Read from mtohost should not be handled here\n");
+        exit(1);
+    case NEW_CSR_MFROMHOST:
+        fprintf(stderr, "Read from mfromhost should not be handled here\n");
+        exit(1);
+    case NEW_CSR_MIOBASE:
+        return env->memsize;
+    case NEW_CSR_UARCH0:
+    case NEW_CSR_UARCH1:
+    case NEW_CSR_UARCH2:
+    case NEW_CSR_UARCH3:
+    case NEW_CSR_UARCH4:
+    case NEW_CSR_UARCH5:
+    case NEW_CSR_UARCH6:
+    case NEW_CSR_UARCH7:
+    case NEW_CSR_UARCH8:
+    case NEW_CSR_UARCH9:
+    case NEW_CSR_UARCH10:
+    case NEW_CSR_UARCH11:
+    case NEW_CSR_UARCH12:
+    case NEW_CSR_UARCH13:
+    case NEW_CSR_UARCH14:
+    case NEW_CSR_UARCH15:
+        return 0;
+    }
+    fprintf(stderr, "Attempt to read invalid csr!\n");
+    exit(1);
+}
+
+void validate_csr(CPURISCVState *env, uint64_t which, uint64_t write,
+        uint64_t new_pc) {
+    unsigned my_priv = get_field(env->csr[NEW_CSR_MSTATUS], MSTATUS_PRV);
+    unsigned csr_priv = get_field((which), 0x300);
+    unsigned csr_read_only = get_field((which), 0xC00) == 3;
+    if (((write) && csr_read_only) || (my_priv < csr_priv)) {
+        do_raise_exception_err(env, NEW_RISCV_EXCP_ILLEGAL_INST, new_pc);
+    }
+    return;
+}
+
+target_ulong helper_csrrw(CPURISCVState *env, target_ulong src,
+        target_ulong csr, target_ulong new_pc)
+{
+    validate_csr(env, csr, 1, new_pc);
+    uint64_t csr_backup = csr_read_helper(env, csr);
+    csr_write_helper(env, src, csr);
+    return csr_backup;
+}
+
+target_ulong helper_csrrs(CPURISCVState *env, target_ulong src,
+        target_ulong csr, target_ulong new_pc)
+{
+    validate_csr(env, csr, src != 0, new_pc);
+    uint64_t csr_backup = csr_read_helper(env, csr);
+    csr_write_helper(env, src | csr_backup, csr);
+    return csr_backup;
+}
+
+// match spike behavior for validate_csr write flag
+target_ulong helper_csrrsi(CPURISCVState *env, target_ulong src,
+        target_ulong csr, target_ulong new_pc)
+{
+    validate_csr(env, csr, 1, new_pc);
+    uint64_t csr_backup = csr_read_helper(env, csr);
+    csr_write_helper(env, src | csr_backup, csr);
+    return csr_backup;
+}
+
+target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
+        target_ulong csr, target_ulong new_pc) {
+    validate_csr(env, csr, 1, new_pc);
+    uint64_t csr_backup = csr_read_helper(env, csr);
+    csr_write_helper(env, (~src) & csr_backup, csr);
+    return csr_backup;
+}
+
+/*
+ * This is a debug print helper for printing trace.
+ * Currently calls spike-dasm, so very slow.
+ * Probably not useful unless you're debugging riscv-qemu
+ */
+void helper_debug_print(CPURISCVState *env, target_ulong cpu_pc_deb,
+        target_ulong instruction)
+{
+/*    int buflen = 100;
+    char runbuf[buflen];
+    char path[buflen];
+
+    snprintf(runbuf, buflen, "echo 'DASM(%08lx)\n' | spike-dasm", instruction);
+
+    FILE *fp;
+    fp = popen(runbuf, "r");
+    if (fp == NULL) {
+        printf("popen fail\n");
+        exit(1);
+    }
+    if (fgets(path, sizeof(path)-1, fp) != NULL) {
+        fprintf(stderr, ": core   0: 0x" TARGET_FMT_lx " (0x%08lx) %s",
+                cpu_pc_deb, instruction, path);
+    } else {*/
+        fprintf(stderr, ": core   0: 0x" TARGET_FMT_lx " (0x%08lx) %s",
+                cpu_pc_deb, instruction, "DASM BAD RESULT\n");
+/*    }
+    pclose(fp);*/
+}
+
+target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
+{
+    target_ulong mstatus = env->csr[NEW_CSR_MSTATUS];
+    if(!(get_field(mstatus, MSTATUS_PRV) >= PRV_S)) {
+        // TODO: real illegal instruction trap
+        printf("ILLEGAL INST");
+        exit(1);
+    }
+
+    target_ulong retpc = 0;
+    switch(get_field(mstatus, MSTATUS_PRV)) {
+        case PRV_S:
+            // set PC val to sepc
+            retpc = env->csr[NEW_CSR_SEPC];
+            break;
+        case PRV_M:
+            // set PC val to mepc
+            retpc = env->csr[NEW_CSR_MEPC];
+            break;
+        default:
+            // TODO: illegal inst
+            printf("ILLEGAL INST");
+            exit(1);
+            break;
+    }
+    if (retpc & 0x3) {
+        // check for misaligned fetch
+        helper_raise_exception_mbadaddr(env, NEW_RISCV_EXCP_INST_ADDR_MIS,
+                cpu_pc_deb);
+        return cpu_pc_deb;
+    }
+
+    target_ulong next_mstatus = pop_priv_stack(env->csr[NEW_CSR_MSTATUS]);
+    csr_write_helper(env, next_mstatus, NEW_CSR_MSTATUS);
+    return retpc;
+}
+
+target_ulong helper_mrts(CPURISCVState *env, target_ulong curr_pc)
+{
+    target_ulong mstatus = env->csr[NEW_CSR_MSTATUS];
+    if(!(get_field(mstatus, MSTATUS_PRV) >= PRV_M)) {
+        // TODO: real illegal instruction trap
+        printf("ILLEGAL INST");
+        exit(1);
+    }
+
+    csr_write_helper(env, set_field(mstatus, MSTATUS_PRV, PRV_S),
+            NEW_CSR_MSTATUS);
+    env->csr[NEW_CSR_SBADADDR] = env->csr[NEW_CSR_MBADADDR];
+    env->csr[NEW_CSR_SCAUSE] = env->csr[NEW_CSR_MCAUSE];
+    env->csr[NEW_CSR_SEPC] = env->csr[NEW_CSR_MEPC];
+
+    if (env->csr[NEW_CSR_STVEC] & 0x3) {
+        helper_raise_exception_mbadaddr(env, NEW_RISCV_EXCP_INST_ADDR_MIS,
+                curr_pc);
+        return curr_pc;
+    }
+    return env->csr[NEW_CSR_STVEC];
+}
+
+
+#ifndef CONFIG_USER_ONLY
+
+/* TLB and translation cache management functions */
+
+inline void cpu_riscv_tlb_flush (CPURISCVState *env, int flush_global)
+{
+    RISCVCPU *cpu = riscv_env_get_cpu(env);
+    // Flush QEMU's TLB
+    tlb_flush(CPU(cpu), flush_global);
+}
+
+void helper_fence_i(CPURISCVState *env) {
+    RISCVCPU *cpu = riscv_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+    // Flush QEMU's TLB
+    tlb_flush(cs, 1);
+    // ARM port seems to not know if this is okay inside a TB...
+    // But we need to do it
+    tb_flush(cs);
+}
+
+void helper_tlb_flush(CPURISCVState *env)
+{
+    cpu_riscv_tlb_flush(env, 1);
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+#if !defined(CONFIG_USER_ONLY)
+
+void riscv_cpu_do_unaligned_access(CPUState *cs, target_ulong addr,
+                                int rw, int is_user, uintptr_t retaddr)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    printf("addr: %016lx\n", addr);
+    if (rw & 0x2) {
+        fprintf(stderr, "unaligned inst fetch not handled here\n");
+        exit(1);
+    } else if (rw == 0x1) {
+        printf("Store\n");
+        cs->exception_index = NEW_RISCV_EXCP_STORE_AMO_ADDR_MIS;
+        env->csr[NEW_CSR_MBADADDR] = addr;
+    } else {
+        printf("Load\n");
+        cs->exception_index = NEW_RISCV_EXCP_LOAD_ADDR_MIS;
+        env->csr[NEW_CSR_MBADADDR] = addr;
+    }
+    do_raise_exception_err(env, cs->exception_index, retaddr);
+}
+
+/* called by qemu's softmmu to fill the qemu tlb */
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
+              uintptr_t retaddr)
+{
+    int ret;
+    ret = riscv_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
+    if (ret == TRANSLATE_FAIL) {
+        RISCVCPU *cpu = RISCV_CPU(cs);
+        CPURISCVState *env = &cpu->env;
+        do_raise_exception_err(env, cs->exception_index, retaddr);
+    }
+}
+
+void riscv_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write,
+        bool is_exec, int unused, unsigned size)
+{
+    printf("unassigned address not implemented for riscv\n");
+    printf("unassigned Address: %016lX\n", addr);
+    exit(1);
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target-riscv/riscv-defs.h b/target-riscv/riscv-defs.h
new file mode 100644
index 0000000..8a20664
--- /dev/null
+++ b/target-riscv/riscv-defs.h
@@ -0,0 +1,14 @@
+#if !defined (__QEMU_RISCV_DEFS_H__)
+#define __QEMU_RISCV_DEFS_H__
+
+#define TARGET_PAGE_BITS 12 // 4 KiB Pages
+//#define RISCV_TLB_MAX 0 not used. was for MIPS tlb
+// if you're looking to change the QEMU softmmu size, look for TLB_
+// #define CPU_TLB_BITS 2 in /include/exec/cpu-defs.h
+
+#define TARGET_LONG_BITS 64 // this defs TCGv as TCGv_i64 in tcg/tcg-op.h
+// according to spec? 38 PPN + 12 Offset
+#define TARGET_PHYS_ADDR_SPACE_BITS 50
+#define TARGET_VIRT_ADDR_SPACE_BITS 39
+
+#endif /* !defined (__QEMU_RISCV_DEFS_H__) */
diff --git a/target-riscv/translate.c b/target-riscv/translate.c
new file mode 100644
index 0000000..a18b126
--- /dev/null
+++ b/target-riscv/translate.c
@@ -0,0 +1,2155 @@
+/*
+ * RISC-V emulation for qemu: main translation routines.
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "disas/disas.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "instmap.h"
+
+//#define DISABLE_CHAINING_BRANCH
+//#define DISABLE_CHAINING_JAL
+
+#define RISCV_DEBUG_DISAS 0
+
+/* global register indices */
+static TCGv_ptr cpu_env;
+static TCGv cpu_gpr[32], cpu_PC, cpu_fpr[32];
+static TCGv load_reservation;
+
+#include "exec/gen-icount.h"
+
+typedef struct DisasContext {
+    struct TranslationBlock *tb;
+    target_ulong pc;
+    uint32_t opcode;
+    int singlestep_enabled;
+    int mem_idx;
+    int bstate;
+} DisasContext;
+
+static inline void kill_unknown(DisasContext *ctx, int excp);
+
+enum {
+    BS_NONE     = 0, // When seen outside of translation while loop, indicates
+                     // need to exit tb due to end of page.
+    BS_STOP     = 1, // Need to exit tb for syscall, sret, etc.
+    BS_BRANCH   = 2, // Need to exit tb for branch, jal, etc.
+};
+
+static const char * const regnames[] = {
+  "zero", "ra  ", "sp  ", "gp  ", "tp  ", "t0  ",  "t1  ",  "t2  ",
+  "s0  ", "s1  ", "a0  ", "a1  ", "a2  ", "a3  ",  "a4  ",  "a5  ",
+  "a6  ", "a7  ", "s2  ", "s3  ", "s4  ", "s5  ",  "s6  ",  "s7  ",
+  "s8  ", "s9  ", "s10 ", "s11 ", "t3  ", "t4  ",  "t5  ",  "t6  "
+};
+
+static const char * const fpr_regnames[] = {
+  "ft0", "ft1", "ft2",  "ft3",  "ft4", "ft5", "ft6",  "ft7",
+  "fs0", "fs1", "fa0",  "fa1",  "fa2", "fa3", "fa4",  "fa5",
+  "fa6", "fa7", "fs2",  "fs3",  "fs4", "fs5", "fs6",  "fs7",
+  "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"
+};
+
+#define RISCV_DEBUG(fmt, ...)                                                  
\
+    do {                                                                      \
+        if (RISCV_DEBUG_DISAS) {                                               
\
+            qemu_log_mask(CPU_LOG_TB_IN_ASM,                                  \
+                          TARGET_FMT_lx ": %08x " fmt "\n",                   \
+                          ctx->pc, ctx->opcode , ## __VA_ARGS__);             \
+        }                                                                     \
+    } while (0)
+
+#define LOG_DISAS(...)                                                        \
+    do {                                                                      \
+        if (RISCV_DEBUG_DISAS) {                                               
\
+            qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__);                 \
+        }                                                                     \
+    } while (0)
+
+static inline void generate_exception (DisasContext *ctx, int excp)
+{
+    tcg_gen_movi_tl(cpu_PC, ctx->pc);
+    TCGv_i32 helper_tmp = tcg_const_i32(excp);
+    gen_helper_raise_exception(cpu_env, helper_tmp);
+    tcg_temp_free_i32(helper_tmp);
+}
+
+static inline void generate_exception_mbadaddr(DisasContext *ctx, int excp)
+{
+    tcg_gen_movi_tl(cpu_PC, ctx->pc);
+    TCGv_i32 helper_tmp = tcg_const_i32(excp);
+    gen_helper_raise_exception_mbadaddr(cpu_env, helper_tmp, cpu_PC);
+    tcg_temp_free_i32(helper_tmp);
+}
+
+static inline void generate_exception_err (DisasContext *ctx, int excp)
+{
+    tcg_gen_movi_tl(cpu_PC, ctx->pc);
+    TCGv_i32 helper_tmp = tcg_const_i32(excp);
+    gen_helper_raise_exception_err(cpu_env, helper_tmp, cpu_PC);
+    tcg_temp_free_i32(helper_tmp);
+}
+
+
+// unknown instruction / fp disabled
+static inline void kill_unknown(DisasContext *ctx, int excp) {
+    generate_exception(ctx, excp);
+    ctx->bstate = BS_STOP;
+}
+
+static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+    TranslationBlock *tb;
+    tb = ctx->tb;
+    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
+        likely(!ctx->singlestep_enabled)) {
+        // we only allow direct chaining when the jump is to the same page
+        // otherwise, we could produce incorrect chains when address spaces
+        // change. see
+        // http://lists.gnu.org/archive/html/qemu-devel/2007-06/msg00213.html
+        tcg_gen_goto_tb(n);
+        tcg_gen_movi_tl(cpu_PC, dest);
+        tcg_gen_exit_tb((uintptr_t)tb + n);
+    } else {
+        tcg_gen_movi_tl(cpu_PC, dest);
+        if (ctx->singlestep_enabled) {
+            gen_helper_raise_exception_debug(cpu_env);
+        }
+        tcg_gen_exit_tb(0);
+    }
+}
+
+/* Wrapper for getting reg values - need to check of reg is zero since
+ * cpu_gpr[0] is not actually allocated
+ */
+static inline void gen_get_gpr (TCGv t, int reg_num)
+{
+    if (reg_num == 0) {
+        tcg_gen_movi_tl(t, 0);
+    } else {
+        tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
+    }
+}
+
+/* Wrapper for setting reg values - need to check of reg is zero since
+ * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
+ * since we usually avoid calling the OP_TYPE_gen function if we see a write to
+ * $zero
+ */
+static inline void gen_set_gpr (int reg_num_dst, TCGv t)
+{
+    if (reg_num_dst != 0) {
+        tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
+    }
+}
+
+inline static void gen_arith(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int rs2)
+{
+    TCGv source1, source2;
+
+    source1 = tcg_temp_new();
+    source2 = tcg_temp_new();
+
+    gen_get_gpr(source1, rs1);
+    gen_get_gpr(source2, rs2);
+
+    switch (opc) {
+
+    case OPC_RISC_ADD:
+        tcg_gen_add_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_SUB:
+        tcg_gen_sub_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_SLL:
+        tcg_gen_andi_tl(source2, source2, 0x3F);
+        tcg_gen_shl_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_SLT:
+        tcg_gen_setcond_tl(TCG_COND_LT, source1, source1, source2);
+        break;
+    case OPC_RISC_SLTU:
+        tcg_gen_setcond_tl(TCG_COND_LTU, source1, source1, source2);
+        break;
+    case OPC_RISC_XOR:
+        tcg_gen_xor_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_SRL:
+        tcg_gen_andi_tl(source2, source2, 0x3F);
+        tcg_gen_shr_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_SRA:
+        tcg_gen_andi_tl(source2, source2, 0x3F);
+        tcg_gen_sar_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_OR:
+        tcg_gen_or_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_AND:
+        tcg_gen_and_tl(source1, source1, source2);
+        break;
+    case OPC_RISC_MUL:
+        tcg_gen_muls2_tl(source1, source2, source1, source2);
+        break;
+    case OPC_RISC_MULH:
+        tcg_gen_muls2_tl(source2, source1, source1, source2);
+        break;
+    case OPC_RISC_MULHSU:
+        gen_helper_mulhsu(source1, cpu_env, source1, source2);
+        break;
+    case OPC_RISC_MULHU:
+        tcg_gen_mulu2_tl(source2, source1, source1, source2);
+        break;
+    case OPC_RISC_DIV:
+        {
+            TCGv spec_source1, spec_source2;
+            TCGv cond1, cond2;
+            TCGLabel* handle_zero = gen_new_label();
+            TCGLabel* handle_overflow = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            spec_source1 = tcg_temp_local_new();
+            spec_source2 = tcg_temp_local_new();
+            cond1 = tcg_temp_local_new();
+            cond2 = tcg_temp_local_new();
+
+            gen_get_gpr(spec_source1, rs1);
+            gen_get_gpr(spec_source2, rs2);
+            tcg_gen_brcondi_tl(TCG_COND_EQ, spec_source2, 0x0, handle_zero);
+
+            // now, use temp reg to check if both overflow conditions satisfied
+            tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, spec_source2, 
0xFFFFFFFFFFFFFFFF); // divisor = -1
+            tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, spec_source1, 
0x8000000000000000);
+            tcg_gen_and_tl(cond1, cond1, cond2);
+
+            tcg_gen_brcondi_tl(TCG_COND_EQ, cond1, 1, handle_overflow);
+            // normal case
+            tcg_gen_div_tl(spec_source1, spec_source1, spec_source2);
+            tcg_gen_br(done);
+            // special zero case
+            gen_set_label(handle_zero);
+            tcg_gen_movi_tl(spec_source1, -1);
+            tcg_gen_br(done);
+            // special overflow case
+            gen_set_label(handle_overflow);
+            tcg_gen_movi_tl(spec_source1, 0x8000000000000000);
+            // done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, spec_source1);
+            tcg_temp_free(spec_source1);
+            tcg_temp_free(spec_source2);
+            tcg_temp_free(cond1);
+            tcg_temp_free(cond2);
+        }
+        break;
+    case OPC_RISC_DIVU:
+        {
+            TCGv spec_source1, spec_source2;
+            TCGLabel* handle_zero = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            spec_source1 = tcg_temp_local_new();
+            spec_source2 = tcg_temp_local_new();
+
+            gen_get_gpr(spec_source1, rs1);
+            gen_get_gpr(spec_source2, rs2);
+            tcg_gen_brcondi_tl(TCG_COND_EQ, spec_source2, 0x0, handle_zero);
+
+            // normal case
+            tcg_gen_divu_tl(spec_source1, spec_source1, spec_source2);
+            tcg_gen_br(done);
+            // special zero case
+            gen_set_label(handle_zero);
+            tcg_gen_movi_tl(spec_source1, -1);
+            tcg_gen_br(done);
+            // done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, spec_source1);
+            tcg_temp_free(spec_source1);
+            tcg_temp_free(spec_source2);
+        }
+        break;
+    case OPC_RISC_REM:
+        {
+            TCGv spec_source1, spec_source2;
+            TCGv cond1, cond2;
+            TCGLabel* handle_zero = gen_new_label();
+            TCGLabel* handle_overflow = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            spec_source1 = tcg_temp_local_new();
+            spec_source2 = tcg_temp_local_new();
+            cond1 = tcg_temp_local_new();
+            cond2 = tcg_temp_local_new();
+
+            gen_get_gpr(spec_source1, rs1);
+            gen_get_gpr(spec_source2, rs2);
+            tcg_gen_brcondi_tl(TCG_COND_EQ, spec_source2, 0x0, handle_zero);
+
+            // now, use temp reg to check if both overflow conditions satisfied
+            tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, spec_source2, 
0xFFFFFFFFFFFFFFFF); // divisor = -1
+            tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, spec_source1, 
0x8000000000000000);
+            tcg_gen_and_tl(cond1, cond1, cond2);
+
+            tcg_gen_brcondi_tl(TCG_COND_EQ, cond1, 1, handle_overflow);
+            // normal case
+            tcg_gen_rem_tl(spec_source1, spec_source1, spec_source2);
+            tcg_gen_br(done);
+            // special zero case
+            gen_set_label(handle_zero);
+            tcg_gen_mov_tl(spec_source1, spec_source1); // even though it's a 
nop, just for clarity
+            tcg_gen_br(done);
+            // special overflow case
+            gen_set_label(handle_overflow);
+            tcg_gen_movi_tl(spec_source1, 0);
+            // done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, spec_source1);
+            tcg_temp_free(spec_source1);
+            tcg_temp_free(spec_source2);
+            tcg_temp_free(cond1);
+            tcg_temp_free(cond2);
+        }
+        break;
+    case OPC_RISC_REMU:
+        {
+            TCGv spec_source1, spec_source2;
+            TCGLabel* handle_zero = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            spec_source1 = tcg_temp_local_new();
+            spec_source2 = tcg_temp_local_new();
+
+            gen_get_gpr(spec_source1, rs1);
+            gen_get_gpr(spec_source2, rs2);
+            tcg_gen_brcondi_tl(TCG_COND_EQ, spec_source2, 0x0, handle_zero);
+
+            // normal case
+            tcg_gen_remu_tl(spec_source1, spec_source1, spec_source2);
+            tcg_gen_br(done);
+            // special zero case
+            gen_set_label(handle_zero);
+            tcg_gen_mov_tl(spec_source1, spec_source1); // even though it's a 
nop, just for clarity
+            tcg_gen_br(done);
+            // done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, spec_source1);
+            tcg_temp_free(spec_source1);
+            tcg_temp_free(spec_source2);
+        }
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+
+    }
+
+    // set and free
+    gen_set_gpr(rd, source1);
+    tcg_temp_free(source1);
+    tcg_temp_free(source2);
+}
+
+/* lower 12 bits of imm are valid */
+inline static void gen_arith_imm(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int16_t imm)
+{
+    TCGv source1;
+    source1 = tcg_temp_new();
+    gen_get_gpr(source1, rs1);
+    target_long uimm = (target_long)imm; /* sign ext 16->64 bits */
+
+    switch (opc) {
+    case OPC_RISC_ADDI:
+        tcg_gen_addi_tl(source1, source1, uimm);
+        break;
+    case OPC_RISC_SLTI:
+        tcg_gen_setcondi_tl(TCG_COND_LT, source1, source1, uimm);
+        break;
+    case OPC_RISC_SLTIU:
+        tcg_gen_setcondi_tl(TCG_COND_LTU, source1, source1, uimm);
+        break;
+    case OPC_RISC_XORI:
+        tcg_gen_xori_tl(source1, source1, uimm);
+        break;
+    case OPC_RISC_ORI:
+        tcg_gen_ori_tl(source1, source1, uimm);
+        break;
+    case OPC_RISC_ANDI:
+        tcg_gen_andi_tl(source1, source1, uimm);
+        break;
+    case OPC_RISC_SLLI: // TODO: add immediate upper bits check?
+        tcg_gen_shli_tl(source1, source1, uimm);
+        break;
+    case OPC_RISC_SHIFT_RIGHT_I: // SRLI, SRAI, TODO: upper bits check
+        // differentiate on IMM
+        if (uimm & 0x400) {
+            // SRAI
+            tcg_gen_sari_tl(source1, source1, uimm ^ 0x400);
+        } else {
+            tcg_gen_shri_tl(source1, source1, uimm);
+        }
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+
+    gen_set_gpr(rd, source1);
+    tcg_temp_free(source1);
+}
+
+/* lower 12 bits of imm are valid */
+inline static void gen_arith_imm_w(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int16_t imm)
+{
+    TCGv source1;
+    source1 = tcg_temp_new();
+    gen_get_gpr(source1, rs1);
+    target_long uimm = (target_long)imm; /* sign ext 16->64 bits */
+
+    switch (opc) {
+    case OPC_RISC_ADDIW:
+        tcg_gen_addi_tl(source1, source1, uimm);
+        tcg_gen_ext32s_tl(source1, source1);
+        break;
+    case OPC_RISC_SLLIW: // TODO: add immediate upper bits check?
+        tcg_gen_shli_tl(source1, source1, uimm);
+        tcg_gen_ext32s_tl(source1, source1);
+        break;
+    case OPC_RISC_SHIFT_RIGHT_IW: // SRLIW, SRAIW, TODO: upper bits check
+        // differentiate on IMM
+        if (uimm & 0x400) {
+            // SRAI
+            // first, trick to get it to act like working on 32 bits:
+            tcg_gen_shli_tl(source1, source1, 32);
+            // now shift back to the right by shamt + 32 to get proper upper
+            // bits filling
+            tcg_gen_sari_tl(source1, source1, (uimm ^ 0x400) + 32);
+            tcg_gen_ext32s_tl(source1, source1);
+        } else {
+            // first, trick to get it to act like working on 32 bits (get rid
+            // of upper 32):
+            tcg_gen_shli_tl(source1, source1, 32);
+            // now shift back to the right by shamt + 32 to get proper upper
+            // bits filling
+            tcg_gen_shri_tl(source1, source1, uimm + 32);
+            tcg_gen_ext32s_tl(source1, source1);
+        }
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+    gen_set_gpr(rd, source1);
+    tcg_temp_free(source1);
+}
+
+inline static void gen_arith_w(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int rs2)
+{
+    TCGv source1, source2;
+    source1 = tcg_temp_new();
+    source2 = tcg_temp_new();
+    gen_get_gpr(source1, rs1);
+    gen_get_gpr(source2, rs2);
+
+    switch (opc) {
+    case OPC_RISC_ADDW:
+        tcg_gen_add_tl(source1, source1, source2);
+        tcg_gen_ext32s_tl(source1, source1);
+        break;
+    case OPC_RISC_SUBW:
+        tcg_gen_sub_tl(source1, source1, source2);
+        tcg_gen_ext32s_tl(source1, source1);
+        break;
+    case OPC_RISC_SLLW:
+        tcg_gen_andi_tl(source2, source2, 0x1F);
+        tcg_gen_shl_tl(source1, source1, source2);
+        tcg_gen_ext32s_tl(source1, source1);
+        break;
+    case OPC_RISC_SRLW:
+        tcg_gen_andi_tl(source1, source1, 0x00000000FFFFFFFFLL); // clear 
upper 32
+        tcg_gen_andi_tl(source2, source2, 0x1F);
+        tcg_gen_shr_tl(source1, source1, source2); // do actual right shift
+        tcg_gen_ext32s_tl(source1, source1); // sign ext
+        break;
+    case OPC_RISC_SRAW:
+        // first, trick to get it to act like working on 32 bits (get rid of
+        // upper 32)
+        tcg_gen_shli_tl(source1, source1, 32); // clear upper 32
+        tcg_gen_sari_tl(source1, source1, 32); // smear the sign bit into 
upper 32
+        tcg_gen_andi_tl(source2, source2, 0x1F);
+        tcg_gen_sar_tl(source1, source1, source2); // do the actual right shift
+        tcg_gen_ext32s_tl(source1, source1); // sign ext
+        break;
+    case OPC_RISC_MULW:
+        tcg_gen_muls2_tl(source1, source2, source1, source2);
+        tcg_gen_ext32s_tl(source1, source1);
+        break;
+    case OPC_RISC_DIVW:
+        {
+            TCGv spec_source1, spec_source2;
+            TCGv cond1, cond2;
+            TCGLabel* handle_zero = gen_new_label();
+            TCGLabel* handle_overflow = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            spec_source1 = tcg_temp_local_new();
+            spec_source2 = tcg_temp_local_new();
+            cond1 = tcg_temp_local_new();
+            cond2 = tcg_temp_local_new();
+
+            gen_get_gpr(spec_source1, rs1);
+            gen_get_gpr(spec_source2, rs2);
+            tcg_gen_ext32s_tl(spec_source1, spec_source1);
+            tcg_gen_ext32s_tl(spec_source2, spec_source2);
+
+            tcg_gen_brcondi_tl(TCG_COND_EQ, spec_source2, 0x0, handle_zero);
+
+            // now, use temp reg to check if both overflow conditions satisfied
+            tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, spec_source2, 
0xFFFFFFFFFFFFFFFF); // divisor = -1
+            tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, spec_source1, 
0x8000000000000000);
+            tcg_gen_and_tl(cond1, cond1, cond2);
+
+            tcg_gen_brcondi_tl(TCG_COND_EQ, cond1, 1, handle_overflow);
+            // normal case
+            tcg_gen_div_tl(spec_source1, spec_source1, spec_source2);
+            tcg_gen_br(done);
+            // special zero case
+            gen_set_label(handle_zero);
+            tcg_gen_movi_tl(spec_source1, -1);
+            tcg_gen_br(done);
+            // special overflow case
+            gen_set_label(handle_overflow);
+            tcg_gen_movi_tl(spec_source1, 0x8000000000000000);
+            // done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, spec_source1);
+            tcg_temp_free(spec_source1);
+            tcg_temp_free(spec_source2);
+            tcg_temp_free(cond1);
+            tcg_temp_free(cond2);
+            tcg_gen_ext32s_tl(source1, source1);
+        }
+        break;
+    case OPC_RISC_DIVUW:
+        {
+            TCGv spec_source1, spec_source2;
+            TCGLabel* handle_zero = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            spec_source1 = tcg_temp_local_new();
+            spec_source2 = tcg_temp_local_new();
+
+            gen_get_gpr(spec_source1, rs1);
+            gen_get_gpr(spec_source2, rs2);
+            tcg_gen_ext32u_tl(spec_source1, spec_source1);
+            tcg_gen_ext32u_tl(spec_source2, spec_source2);
+
+            tcg_gen_brcondi_tl(TCG_COND_EQ, spec_source2, 0x0, handle_zero);
+
+            // normal case
+            tcg_gen_divu_tl(spec_source1, spec_source1, spec_source2);
+            tcg_gen_br(done);
+            // special zero case
+            gen_set_label(handle_zero);
+            tcg_gen_movi_tl(spec_source1, -1);
+            tcg_gen_br(done);
+            // done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, spec_source1);
+            tcg_temp_free(spec_source1);
+            tcg_temp_free(spec_source2);
+            tcg_gen_ext32s_tl(source1, source1);
+        }
+        break;
+    case OPC_RISC_REMW:
+        {
+            TCGv spec_source1, spec_source2;
+            TCGv cond1, cond2;
+            TCGLabel* handle_zero = gen_new_label();
+            TCGLabel* handle_overflow = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            spec_source1 = tcg_temp_local_new();
+            spec_source2 = tcg_temp_local_new();
+            cond1 = tcg_temp_local_new();
+            cond2 = tcg_temp_local_new();
+
+            gen_get_gpr(spec_source1, rs1);
+            gen_get_gpr(spec_source2, rs2);
+            tcg_gen_ext32s_tl(spec_source1, spec_source1);
+            tcg_gen_ext32s_tl(spec_source2, spec_source2);
+
+            tcg_gen_brcondi_tl(TCG_COND_EQ, spec_source2, 0x0, handle_zero);
+
+            // now, use temp reg to check if both overflow conditions satisfied
+            tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, spec_source2, 
0xFFFFFFFFFFFFFFFF); // divisor = -1
+            tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, spec_source1, 
0x8000000000000000);
+            tcg_gen_and_tl(cond1, cond1, cond2);
+
+            tcg_gen_brcondi_tl(TCG_COND_EQ, cond1, 1, handle_overflow);
+            // normal case
+            tcg_gen_rem_tl(spec_source1, spec_source1, spec_source2);
+            tcg_gen_br(done);
+            // special zero case
+            gen_set_label(handle_zero);
+            tcg_gen_mov_tl(spec_source1, spec_source1); // even though it's a 
nop, just for clarity
+            tcg_gen_br(done);
+            // special overflow case
+            gen_set_label(handle_overflow);
+            tcg_gen_movi_tl(spec_source1, 0);
+            // done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, spec_source1);
+            tcg_temp_free(spec_source1);
+            tcg_temp_free(spec_source2);
+            tcg_temp_free(cond1);
+            tcg_temp_free(cond2);
+            tcg_gen_ext32s_tl(source1, source1);
+        }
+        break;
+    case OPC_RISC_REMUW:
+        {
+            TCGv spec_source1, spec_source2;
+            TCGLabel* handle_zero = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            spec_source1 = tcg_temp_local_new();
+            spec_source2 = tcg_temp_local_new();
+
+            gen_get_gpr(spec_source1, rs1);
+            gen_get_gpr(spec_source2, rs2);
+            tcg_gen_ext32u_tl(spec_source1, spec_source1);
+            tcg_gen_ext32u_tl(spec_source2, spec_source2);
+
+            tcg_gen_brcondi_tl(TCG_COND_EQ, spec_source2, 0x0, handle_zero);
+            // normal case
+            tcg_gen_remu_tl(spec_source1, spec_source1, spec_source2);
+            tcg_gen_br(done);
+            // special zero case
+            gen_set_label(handle_zero);
+            tcg_gen_mov_tl(spec_source1, spec_source1); // even though it's a 
nop, just for clarity
+            tcg_gen_br(done);
+            // done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, spec_source1);
+            tcg_temp_free(spec_source1);
+            tcg_temp_free(spec_source2);
+            tcg_gen_ext32s_tl(source1, source1);
+        }
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+    gen_set_gpr(rd, source1);
+    tcg_temp_free(source1);
+    tcg_temp_free(source2);
+}
+
+inline static void gen_branch(DisasContext *ctx, uint32_t opc,
+                       int rs1, int rs2, int16_t bimm) {
+
+    // TODO: misaligned insn (see jalr)
+    TCGLabel* l = gen_new_label();
+    TCGv source1, source2;
+    source1 = tcg_temp_new();
+    source2 = tcg_temp_new();
+    gen_get_gpr(source1, rs1);
+    gen_get_gpr(source2, rs2);
+    target_ulong ubimm = (target_long)bimm; /* sign ext 16->64 bits */
+
+    switch (opc) {
+    case OPC_RISC_BEQ:
+        tcg_gen_brcond_tl(TCG_COND_EQ, source1, source2, l);
+        break;
+    case OPC_RISC_BNE:
+        tcg_gen_brcond_tl(TCG_COND_NE, source1, source2, l);
+        break;
+    case OPC_RISC_BLT:
+        tcg_gen_brcond_tl(TCG_COND_LT, source1, source2, l);
+        break;
+    case OPC_RISC_BGE:
+        tcg_gen_brcond_tl(TCG_COND_GE, source1, source2, l);
+        break;
+    case OPC_RISC_BLTU:
+        tcg_gen_brcond_tl(TCG_COND_LTU, source1, source2, l);
+        break;
+    case OPC_RISC_BGEU:
+        tcg_gen_brcond_tl(TCG_COND_GEU, source1, source2, l);
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+
+#ifdef DISABLE_CHAINING_BRANCH
+    tcg_gen_movi_tl(cpu_PC, ctx->pc + 4);
+    tcg_gen_exit_tb(0);
+#else
+    gen_goto_tb(ctx, 1, ctx->pc + 4); // must use this for safety
+#endif
+    gen_set_label(l); // branch taken
+#ifdef DISABLE_CHAINING_BRANCH
+    tcg_gen_movi_tl(cpu_PC, ctx->pc + ubimm);
+    tcg_gen_exit_tb(0);
+#else
+    gen_goto_tb(ctx, 0, ctx->pc + ubimm); // must use this for safety
+#endif
+    tcg_temp_free(source1);
+    tcg_temp_free(source2);
+    ctx->bstate = BS_BRANCH;
+}
+
+inline static void gen_load(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int16_t imm)
+{
+
+    target_long uimm = (target_long)imm; /* sign ext 16->64 bits */
+
+    TCGv t0 = tcg_temp_new();
+    TCGv t1 = tcg_temp_new();
+
+    gen_get_gpr(t0, rs1);
+    tcg_gen_addi_tl(t0, t0, uimm); //
+
+    switch (opc) {
+
+    case OPC_RISC_LB:
+        tcg_gen_qemu_ld8s(t1, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_LH:
+        tcg_gen_qemu_ld16s(t1, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_LW:
+        tcg_gen_qemu_ld32s(t1, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_LD:
+        tcg_gen_qemu_ld64(t1, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_LBU:
+        tcg_gen_qemu_ld8u(t1, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_LHU:
+        tcg_gen_qemu_ld16u(t1, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_LWU:
+        tcg_gen_qemu_ld32u(t1, t0, ctx->mem_idx);
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+
+    }
+
+    gen_set_gpr(rd, t1);
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+}
+
+
+inline static void gen_store(DisasContext *ctx, uint32_t opc,
+                      int rs1, int rs2, int16_t imm)
+{
+    target_long uimm = (target_long)imm; /* sign ext 16->64 bits */
+
+    TCGv t0 = tcg_temp_new();
+    TCGv dat = tcg_temp_new();
+    gen_get_gpr(t0, rs1);
+    tcg_gen_addi_tl(t0, t0, uimm);
+    gen_get_gpr(dat, rs2);
+
+    switch (opc) {
+
+    case OPC_RISC_SB:
+        tcg_gen_qemu_st8(dat, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_SH:
+        tcg_gen_qemu_st16(dat, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_SW:
+        tcg_gen_qemu_st32(dat, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_SD:
+        tcg_gen_qemu_st64(dat, t0, ctx->mem_idx);
+        break;
+
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+
+    tcg_temp_free(t0);
+    tcg_temp_free(dat);
+}
+
+inline static void gen_jalr(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int16_t imm)
+{
+    TCGLabel* misaligned = gen_new_label();
+    TCGLabel* done = gen_new_label();
+    target_long uimm = (target_long)imm; /* sign ext 16->64 bits */
+    TCGv t0, t1, t2, t3;
+    t0 = tcg_temp_local_new();
+    t1 = tcg_temp_local_new();
+    t2 = tcg_temp_local_new(); // old_pc
+    t3 = tcg_temp_local_new();
+
+    switch (opc) {
+
+    case OPC_RISC_JALR: // CANNOT HAVE CHAINING WITH JALR
+        gen_get_gpr(t0, rs1);
+        tcg_gen_addi_tl(t0, t0, uimm);
+        tcg_gen_andi_tl(t0, t0, 0xFFFFFFFFFFFFFFFEll);
+
+        tcg_gen_andi_tl(t3, t0, 0x2);
+        tcg_gen_movi_tl(t2, ctx->pc);
+
+        tcg_gen_brcondi_tl(TCG_COND_NE, t3, 0x0, misaligned);
+        tcg_gen_mov_tl(cpu_PC, t0);
+        tcg_gen_addi_tl(t1, t2, 4);
+        gen_set_gpr(rd, t1);
+        tcg_gen_br(done);
+
+        gen_set_label(misaligned);
+        generate_exception_mbadaddr(ctx, NEW_RISCV_EXCP_INST_ADDR_MIS);
+
+        gen_set_label(done);
+        tcg_gen_exit_tb(0); // exception or not, NO CHAINING FOR JALR
+        ctx->bstate = BS_BRANCH;
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+
+    }
+    tcg_temp_free(t0);
+    tcg_temp_free(t1);
+    tcg_temp_free(t2);
+    tcg_temp_free(t3);
+
+}
+
+inline static void gen_atomic(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int rs2)
+{
+    // TODO: handle aq, rl bits? - for now just get rid of them:
+    opc = MASK_OP_ATOMIC_NO_AQ_RL(opc);
+
+    TCGv source1, source2, dat;
+
+    source1 = tcg_temp_local_new();
+    source2 = tcg_temp_local_new();
+    dat = tcg_temp_new();
+
+    gen_get_gpr(source1, rs1);
+    gen_get_gpr(source2, rs2);
+
+    switch (opc) {
+        // all currently implemented as non-atomics
+    case OPC_RISC_LR_W:
+        // put addr in load_reservation
+        tcg_gen_mov_tl(load_reservation, source1);
+        tcg_gen_qemu_ld32s(source1, source1, ctx->mem_idx);
+        break;
+    case OPC_RISC_SC_W: {
+        TCGLabel* fail = gen_new_label();
+        TCGLabel* done = gen_new_label();
+        tcg_gen_brcond_tl(TCG_COND_NE, load_reservation, source1, fail);
+        tcg_gen_qemu_st32(source2, source1, ctx->mem_idx);
+        tcg_gen_movi_tl(source1, 0); //success
+        tcg_gen_br(done);
+        gen_set_label(fail);
+        tcg_gen_movi_tl(source1, 1); //fail
+        gen_set_label(done);
+        }
+        break;
+    case OPC_RISC_AMOSWAP_W:
+        tcg_gen_qemu_ld32s(dat, source1, ctx->mem_idx);
+        tcg_gen_qemu_st32(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOADD_W:
+        tcg_gen_qemu_ld32s(dat, source1, ctx->mem_idx);
+        tcg_gen_add_tl(source2, dat, source2);
+        tcg_gen_qemu_st32(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOXOR_W:
+        tcg_gen_qemu_ld32s(dat, source1, ctx->mem_idx);
+        tcg_gen_xor_tl(source2, dat, source2);
+        tcg_gen_qemu_st32(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOAND_W:
+        tcg_gen_qemu_ld32s(dat, source1, ctx->mem_idx);
+        tcg_gen_and_tl(source2, dat, source2);
+        tcg_gen_qemu_st32(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOOR_W:
+        tcg_gen_qemu_ld32s(dat, source1, ctx->mem_idx);
+        tcg_gen_or_tl(source2, dat, source2);
+        tcg_gen_qemu_st32(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOMIN_W:
+        {
+            TCGv source1_l, source2_l, dat_l;
+            source1_l = tcg_temp_local_new();
+            source2_l = tcg_temp_local_new();
+            dat_l = tcg_temp_local_new();
+            TCGLabel* j = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            tcg_gen_mov_tl(source1_l, source1);
+            tcg_gen_ext32s_tl(source2_l, source2);
+            tcg_gen_qemu_ld32s(dat_l, source1_l, ctx->mem_idx);
+            tcg_gen_brcond_tl(TCG_COND_LT, dat_l, source2_l, j);
+            tcg_gen_qemu_st32(source2_l, source1_l, ctx->mem_idx);
+            tcg_gen_br(done);
+            // here we store the thing on the left
+            gen_set_label(j);
+            tcg_gen_qemu_st32(dat_l, source1_l, ctx->mem_idx);
+            //done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, dat_l);
+            tcg_temp_free(source1_l);
+            tcg_temp_free(source2_l);
+            tcg_temp_free(dat_l);
+        }
+        break;
+    case OPC_RISC_AMOMAX_W:
+        {
+            TCGv source1_l, source2_l, dat_l;
+            source1_l = tcg_temp_local_new();
+            source2_l = tcg_temp_local_new();
+            dat_l = tcg_temp_local_new();
+            TCGLabel* j = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            tcg_gen_mov_tl(source1_l, source1);
+            tcg_gen_ext32s_tl(source2_l, source2);
+            tcg_gen_qemu_ld32s(dat_l, source1_l, ctx->mem_idx);
+            tcg_gen_brcond_tl(TCG_COND_GT, dat_l, source2_l, j);
+            tcg_gen_qemu_st32(source2_l, source1_l, ctx->mem_idx);
+            tcg_gen_br(done);
+            // here we store the thing on the left
+            gen_set_label(j);
+            tcg_gen_qemu_st32(dat_l, source1_l, ctx->mem_idx);
+            //done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, dat_l);
+            tcg_temp_free(source1_l);
+            tcg_temp_free(source2_l);
+            tcg_temp_free(dat_l);
+        }
+        break;
+    case OPC_RISC_AMOMINU_W:
+        {
+            TCGv source1_l, source2_l, dat_l;
+            source1_l = tcg_temp_local_new();
+            source2_l = tcg_temp_local_new();
+            dat_l = tcg_temp_local_new();
+            TCGLabel* j = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            tcg_gen_mov_tl(source1_l, source1);
+            tcg_gen_ext32u_tl(source2_l, source2);
+            tcg_gen_qemu_ld32u(dat_l, source1_l, ctx->mem_idx);
+            tcg_gen_brcond_tl(TCG_COND_LTU, dat_l, source2_l, j);
+            tcg_gen_qemu_st32(source2_l, source1_l, ctx->mem_idx);
+            tcg_gen_br(done);
+            // here we store the thing on the left
+            gen_set_label(j);
+            tcg_gen_qemu_st32(dat_l, source1_l, ctx->mem_idx);
+            //done
+            gen_set_label(done);
+            tcg_gen_ext32s_tl(source1, dat_l);
+            tcg_temp_free(source1_l);
+            tcg_temp_free(source2_l);
+            tcg_temp_free(dat_l);
+        }
+        break;
+    case OPC_RISC_AMOMAXU_W:
+        {
+            TCGv source1_l, source2_l, dat_l;
+            source1_l = tcg_temp_local_new();
+            source2_l = tcg_temp_local_new();
+            dat_l = tcg_temp_local_new();
+            TCGLabel* j = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            tcg_gen_mov_tl(source1_l, source1);
+            tcg_gen_ext32u_tl(source2_l, source2);
+            tcg_gen_qemu_ld32u(dat_l, source1_l, ctx->mem_idx);
+            tcg_gen_brcond_tl(TCG_COND_GTU, dat_l, source2_l, j);
+            tcg_gen_qemu_st32(source2_l, source1_l, ctx->mem_idx);
+            tcg_gen_br(done);
+            // here we store the thing on the left
+            gen_set_label(j);
+            tcg_gen_qemu_st32(dat_l, source1_l, ctx->mem_idx);
+            //done
+            gen_set_label(done);
+            tcg_gen_ext32s_tl(source1, dat_l);
+            tcg_temp_free(source1_l);
+            tcg_temp_free(source2_l);
+            tcg_temp_free(dat_l);
+        }
+        break;
+    case OPC_RISC_LR_D:
+        // put addr in load_reservation
+        tcg_gen_mov_tl(load_reservation, source1);
+        tcg_gen_qemu_ld64(source1, source1, ctx->mem_idx);
+        break;
+    case OPC_RISC_SC_D: {
+        TCGLabel* fail = gen_new_label();
+        TCGLabel* done = gen_new_label();
+        tcg_gen_brcond_tl(TCG_COND_NE, load_reservation, source1, fail);
+        tcg_gen_qemu_st64(source2, source1, ctx->mem_idx);
+        tcg_gen_movi_tl(source1, 0); //success
+        tcg_gen_br(done);
+        gen_set_label(fail);
+        tcg_gen_movi_tl(source1, 1); //fail
+        gen_set_label(done);
+        break;
+        }
+    case OPC_RISC_AMOSWAP_D:
+        tcg_gen_qemu_ld64(dat, source1, ctx->mem_idx);
+        tcg_gen_qemu_st64(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOADD_D:
+        tcg_gen_qemu_ld64(dat, source1, ctx->mem_idx);
+        tcg_gen_add_tl(source2, dat, source2);
+        tcg_gen_qemu_st64(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOXOR_D:
+        tcg_gen_qemu_ld64(dat, source1, ctx->mem_idx);
+        tcg_gen_xor_tl(source2, dat, source2);
+        tcg_gen_qemu_st64(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOAND_D:
+        tcg_gen_qemu_ld64(dat, source1, ctx->mem_idx);
+        tcg_gen_and_tl(source2, dat, source2);
+        tcg_gen_qemu_st64(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOOR_D:
+        tcg_gen_qemu_ld64(dat, source1, ctx->mem_idx);
+        tcg_gen_or_tl(source2, dat, source2);
+        tcg_gen_qemu_st64(source2, source1, ctx->mem_idx);
+        tcg_gen_mov_tl(source1, dat);
+        break;
+    case OPC_RISC_AMOMIN_D:
+        {
+            TCGv source1_l, source2_l, dat_l;
+            source1_l = tcg_temp_local_new();
+            source2_l = tcg_temp_local_new();
+            dat_l = tcg_temp_local_new();
+            TCGLabel* j = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            tcg_gen_mov_tl(source1_l, source1);
+            tcg_gen_mov_tl(source2_l, source2);
+            tcg_gen_qemu_ld64(dat_l, source1_l, ctx->mem_idx);
+            tcg_gen_brcond_tl(TCG_COND_LT, dat_l, source2_l, j);
+            tcg_gen_qemu_st64(source2_l, source1_l, ctx->mem_idx);
+            tcg_gen_br(done);
+            // here we store the thing on the left
+            gen_set_label(j);
+            tcg_gen_qemu_st64(dat_l, source1_l, ctx->mem_idx);
+            //done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, dat_l);
+            tcg_temp_free(source1_l);
+            tcg_temp_free(source2_l);
+            tcg_temp_free(dat_l);
+        }
+        break;
+    case OPC_RISC_AMOMAX_D:
+        {
+            TCGv source1_l, source2_l, dat_l;
+            source1_l = tcg_temp_local_new();
+            source2_l = tcg_temp_local_new();
+            dat_l = tcg_temp_local_new();
+            TCGLabel* j = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            tcg_gen_mov_tl(source1_l, source1);
+            tcg_gen_mov_tl(source2_l, source2);
+            tcg_gen_qemu_ld64(dat_l, source1_l, ctx->mem_idx);
+            tcg_gen_brcond_tl(TCG_COND_GT, dat_l, source2_l, j);
+            tcg_gen_qemu_st64(source2_l, source1_l, ctx->mem_idx);
+            tcg_gen_br(done);
+            // here we store the thing on the left
+            gen_set_label(j);
+            tcg_gen_qemu_st64(dat_l, source1_l, ctx->mem_idx);
+            //done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, dat_l);
+            tcg_temp_free(source1_l);
+            tcg_temp_free(source2_l);
+            tcg_temp_free(dat_l);
+        }
+        break;
+    case OPC_RISC_AMOMINU_D:
+        {
+            TCGv source1_l, source2_l, dat_l;
+            source1_l = tcg_temp_local_new();
+            source2_l = tcg_temp_local_new();
+            dat_l = tcg_temp_local_new();
+            TCGLabel* j = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            tcg_gen_mov_tl(source1_l, source1);
+            tcg_gen_mov_tl(source2_l, source2);
+            tcg_gen_qemu_ld64(dat_l, source1_l, ctx->mem_idx);
+            tcg_gen_brcond_tl(TCG_COND_LTU, dat_l, source2_l, j);
+            tcg_gen_qemu_st64(source2_l, source1_l, ctx->mem_idx);
+            tcg_gen_br(done);
+            // here we store the thing on the left
+            gen_set_label(j);
+            tcg_gen_qemu_st64(dat_l, source1_l, ctx->mem_idx);
+            //done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, dat_l);
+            tcg_temp_free(source1_l);
+            tcg_temp_free(source2_l);
+            tcg_temp_free(dat_l);
+        }
+        break;
+    case OPC_RISC_AMOMAXU_D:
+        {
+            TCGv source1_l, source2_l, dat_l;
+            source1_l = tcg_temp_local_new();
+            source2_l = tcg_temp_local_new();
+            dat_l = tcg_temp_local_new();
+            TCGLabel* j = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            tcg_gen_mov_tl(source1_l, source1);
+            tcg_gen_mov_tl(source2_l, source2);
+            tcg_gen_qemu_ld64(dat_l, source1_l, ctx->mem_idx);
+            tcg_gen_brcond_tl(TCG_COND_GTU, dat_l, source2_l, j);
+            tcg_gen_qemu_st64(source2_l, source1_l, ctx->mem_idx);
+            tcg_gen_br(done);
+            // here we store the thing on the left
+            gen_set_label(j);
+            tcg_gen_qemu_st64(dat_l, source1_l, ctx->mem_idx);
+            //done
+            gen_set_label(done);
+            tcg_gen_mov_tl(source1, dat_l);
+            tcg_temp_free(source1_l);
+            tcg_temp_free(source2_l);
+            tcg_temp_free(dat_l);
+        }
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+
+    }
+
+    // set and free
+    gen_set_gpr(rd, source1);
+    tcg_temp_free(source1);
+    tcg_temp_free(source2);
+    tcg_temp_free(dat);
+}
+
+
+inline static void gen_csr_htif(DisasContext *ctx, uint32_t opc, uint64_t 
addr, int rd, int rs1) {
+    TCGv source1, csr_store, htif_addr;
+    source1 = tcg_temp_new();
+    csr_store = tcg_temp_new();
+    htif_addr = tcg_temp_new();
+    gen_get_gpr(source1, rs1); // load rs1 val
+    tcg_gen_movi_tl(htif_addr, addr);
+    tcg_gen_qemu_ld64(csr_store, htif_addr, ctx->mem_idx); // get htif "reg" 
val
+
+    switch (opc) {
+
+    case OPC_RISC_CSRRW:
+        break;
+    case OPC_RISC_CSRRS:
+        tcg_gen_or_tl(source1, csr_store, source1);
+        break;
+    case OPC_RISC_CSRRC:
+        tcg_gen_not_tl(source1, source1);
+        tcg_gen_and_tl(source1, csr_store, source1);
+        break;
+    case OPC_RISC_CSRRWI:
+        tcg_gen_movi_tl(source1, rs1);
+        break;
+    case OPC_RISC_CSRRSI:
+        tcg_gen_ori_tl(source1, csr_store, rs1);
+        break;
+    case OPC_RISC_CSRRCI:
+        tcg_gen_andi_tl(source1, csr_store, ~((uint64_t)rs1));
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+
+    }
+    tcg_gen_qemu_st64(source1, htif_addr, ctx->mem_idx);
+    gen_set_gpr(rd, csr_store);
+    tcg_temp_free(source1);
+    tcg_temp_free(csr_store);
+    tcg_temp_free(htif_addr);
+}
+
+inline static void gen_system(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int csr)
+{
+    // get index into csr array
+    int backup_csr = csr;
+
+    if (csr == NEW_CSR_MTOHOST) {
+        gen_csr_htif(ctx, opc, 0xFFFFFFFFF0000000L, rd, rs1);
+        return;
+    } else if (csr == NEW_CSR_MFROMHOST) {
+        gen_csr_htif(ctx, opc, 0xFFFFFFFFF0000008L, rd, rs1);
+        return;
+    }
+
+    TCGv source1, csr_store, dest;
+    source1 = tcg_temp_new();
+    csr_store = tcg_temp_new();
+    dest = tcg_temp_new();
+    gen_get_gpr(source1, rs1);
+    tcg_gen_movi_tl(csr_store, csr); // copy into temp reg to feed to helper
+
+    switch (opc) {
+
+    case OPC_RISC_ECALL:
+        switch (backup_csr) {
+            case 0x0: // ECALL
+                // always generates U-level ECALL, fixed in do_interrupt 
handler
+                generate_exception(ctx, NEW_RISCV_EXCP_U_ECALL);
+                tcg_gen_exit_tb(0); // no chaining
+                ctx->bstate = BS_BRANCH;
+                break;
+            case 0x1: // EBREAK
+                generate_exception(ctx, NEW_RISCV_EXCP_BREAKPOINT);
+                tcg_gen_exit_tb(0); // no chaining
+                ctx->bstate = BS_BRANCH;
+                break;
+            case 0x100: // ERET
+                // temporarily added second cpu_PC for debug
+                tcg_gen_movi_tl(cpu_PC, ctx->pc);
+                gen_helper_sret(cpu_PC, cpu_env, cpu_PC);
+                tcg_gen_exit_tb(0); // no chaining
+                ctx->bstate = BS_BRANCH;
+                break;
+            case 0x305: // MRTS
+                tcg_gen_movi_tl(cpu_PC, ctx->pc); // mrts helper may cause 
misaligned exception
+                gen_helper_mrts(cpu_PC, cpu_env, cpu_PC);
+                tcg_gen_exit_tb(0); // no chaining
+                ctx->bstate = BS_BRANCH;
+                break;
+            case 0x306: // MRTH
+                printf("SYSTEM INST NOT YET IMPLEMENTED 0x%x\n", backup_csr);
+                exit(1);
+                break;
+            case 0x205: // HRTS
+                printf("SYSTEM INST NOT YET IMPLEMENTED 0x%x\n", backup_csr);
+                exit(1);
+                break;
+            case 0x102: // WFI
+                // nop for now, as in spike
+                break;
+            case 0x101: // SFENCE.VM
+                gen_helper_tlb_flush(cpu_env);
+                break;
+            default:
+                kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+                break;
+        }
+        break;
+    case OPC_RISC_CSRRW:
+        tcg_gen_movi_tl(cpu_PC, ctx->pc);
+        gen_helper_csrrw(dest, cpu_env, source1, csr_store, cpu_PC);
+        gen_set_gpr(rd, dest);
+        // end tb since we may be changing priv modes
+        tcg_gen_movi_tl(cpu_PC, ctx->pc + 4);
+        tcg_gen_exit_tb(0); // no chaining
+        ctx->bstate = BS_BRANCH;
+        break;
+    case OPC_RISC_CSRRS:
+        tcg_gen_movi_tl(cpu_PC, ctx->pc);
+        gen_helper_csrrs(dest, cpu_env, source1, csr_store, cpu_PC);
+        gen_set_gpr(rd, dest);
+        // end tb since we may be changing priv modes
+        tcg_gen_movi_tl(cpu_PC, ctx->pc + 4);
+        tcg_gen_exit_tb(0); // no chaining
+        ctx->bstate = BS_BRANCH;
+        break;
+    case OPC_RISC_CSRRC:
+        tcg_gen_movi_tl(cpu_PC, ctx->pc);
+        gen_helper_csrrc(dest, cpu_env, source1, csr_store, cpu_PC);
+        gen_set_gpr(rd, dest);
+        // end tb since we may be changing priv modes
+        tcg_gen_movi_tl(cpu_PC, ctx->pc + 4);
+        tcg_gen_exit_tb(0); // no chaining
+        ctx->bstate = BS_BRANCH;
+        break;
+    case OPC_RISC_CSRRWI:
+        {
+            tcg_gen_movi_tl(cpu_PC, ctx->pc);
+            TCGv imm_rs1 = tcg_temp_new();
+            tcg_gen_movi_tl(imm_rs1, rs1);
+            gen_helper_csrrw(dest, cpu_env, imm_rs1, csr_store, cpu_PC);
+            gen_set_gpr(rd, dest);
+            tcg_temp_free(imm_rs1);
+            // end tb since we may be changing priv modes
+            tcg_gen_movi_tl(cpu_PC, ctx->pc + 4);
+            tcg_gen_exit_tb(0); // no chaining
+            ctx->bstate = BS_BRANCH;
+        }
+        break;
+    case OPC_RISC_CSRRSI:
+        {
+            tcg_gen_movi_tl(cpu_PC, ctx->pc);
+            TCGv imm_rs1 = tcg_temp_new();
+            tcg_gen_movi_tl(imm_rs1, rs1);
+            gen_helper_csrrsi(dest, cpu_env, imm_rs1, csr_store, cpu_PC);
+            gen_set_gpr(rd, dest);
+            tcg_temp_free(imm_rs1);
+            // end tb since we may be changing priv modes
+            tcg_gen_movi_tl(cpu_PC, ctx->pc + 4);
+            tcg_gen_exit_tb(0); // no chaining
+            ctx->bstate = BS_BRANCH;
+        }
+        break;
+    case OPC_RISC_CSRRCI:
+        {
+            tcg_gen_movi_tl(cpu_PC, ctx->pc);
+            TCGv imm_rs1 = tcg_temp_new();
+            tcg_gen_movi_tl(imm_rs1, rs1);
+            gen_helper_csrrc(dest, cpu_env, imm_rs1, csr_store, cpu_PC);
+            gen_set_gpr(rd, dest);
+            tcg_temp_free(imm_rs1);
+            // end tb since we may be changing priv modes
+            tcg_gen_movi_tl(cpu_PC, ctx->pc + 4);
+            tcg_gen_exit_tb(0); // no chaining
+            ctx->bstate = BS_BRANCH;
+        }
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+
+    }
+    tcg_temp_free(source1);
+    tcg_temp_free(dest);
+    tcg_temp_free(csr_store);
+}
+
+
+inline static void gen_fp_load(DisasContext *ctx, uint32_t opc,
+                      int rd, int rs1, int16_t imm)
+{
+    target_long uimm = (target_long)imm; /* sign ext 16->64 bits */
+
+    TCGv t0 = tcg_temp_new();
+    gen_get_gpr(t0, rs1);
+    tcg_gen_addi_tl(t0, t0, uimm);
+
+    switch (opc) {
+
+    case OPC_RISC_FLW:
+        tcg_gen_qemu_ld32u(t0, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_FLD:
+        tcg_gen_qemu_ld64(t0, t0, ctx->mem_idx);
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+
+    }
+    tcg_gen_mov_tl(cpu_fpr[rd], t0); // probably can just get rid of this and
+                                     // store directly to cpu_fpr[rd]
+    tcg_temp_free(t0);
+}
+
+inline static void gen_fp_store(DisasContext *ctx, uint32_t opc,
+                      int rs1, int rs2, int16_t imm)
+{
+    target_long uimm = (target_long)imm; /* sign ext 16->64 bits */
+
+    TCGv t0 = tcg_temp_new();
+    TCGv dat = tcg_temp_new();
+    gen_get_gpr(t0, rs1);
+    tcg_gen_addi_tl(t0, t0, uimm);
+    tcg_gen_mov_tl(dat, cpu_fpr[rs2]);
+
+    switch (opc) {
+
+    case OPC_RISC_FSW:
+        tcg_gen_qemu_st32(dat, t0, ctx->mem_idx);
+        break;
+    case OPC_RISC_FSD:
+        tcg_gen_qemu_st64(dat, t0, ctx->mem_idx);
+        break;
+
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+
+    tcg_temp_free(t0);
+    tcg_temp_free(dat);
+}
+
+inline static void gen_fp_fmadd(DisasContext *ctx, uint32_t opc,
+                    int rd, int rs1, int rs2, int rs3, int rm)
+{
+    TCGv rm_reg = tcg_temp_new();
+    tcg_gen_movi_tl(rm_reg, rm);
+
+    switch (opc) {
+
+    case OPC_RISC_FMADD_S:
+        gen_helper_fmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
cpu_fpr[rs3], rm_reg);
+        break;
+    case OPC_RISC_FMADD_D:
+        gen_helper_fmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
cpu_fpr[rs3], rm_reg);
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+    tcg_temp_free(rm_reg);
+
+}
+
+inline static void gen_fp_fmsub(DisasContext *ctx, uint32_t opc,
+                    int rd, int rs1, int rs2, int rs3, int rm)
+{
+    TCGv rm_reg = tcg_temp_new();
+    tcg_gen_movi_tl(rm_reg, rm);
+
+    switch (opc) {
+
+    case OPC_RISC_FMSUB_S:
+        gen_helper_fmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
cpu_fpr[rs3], rm_reg);
+        break;
+    case OPC_RISC_FMSUB_D:
+        gen_helper_fmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
cpu_fpr[rs3], rm_reg);
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+    tcg_temp_free(rm_reg);
+}
+
+inline static void gen_fp_fnmsub(DisasContext *ctx, uint32_t opc,
+                    int rd, int rs1, int rs2, int rs3, int rm)
+{
+    TCGv rm_reg = tcg_temp_new();
+    tcg_gen_movi_tl(rm_reg, rm);
+
+    switch (opc) {
+
+    case OPC_RISC_FNMSUB_S:
+        gen_helper_fnmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
cpu_fpr[rs3], rm_reg);
+        break;
+    case OPC_RISC_FNMSUB_D:
+        gen_helper_fnmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
cpu_fpr[rs3], rm_reg);
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+    tcg_temp_free(rm_reg);
+}
+
+inline static void gen_fp_fnmadd(DisasContext *ctx, uint32_t opc,
+                    int rd, int rs1, int rs2, int rs3, int rm)
+{
+    TCGv rm_reg = tcg_temp_new();
+    tcg_gen_movi_tl(rm_reg, rm);
+
+    switch (opc) {
+
+    case OPC_RISC_FNMADD_S:
+        gen_helper_fnmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
cpu_fpr[rs3], rm_reg);
+        break;
+    case OPC_RISC_FNMADD_D:
+        gen_helper_fnmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
cpu_fpr[rs3], rm_reg);
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+    tcg_temp_free(rm_reg);
+}
+
+inline static void gen_fp_arith(DisasContext *ctx, uint32_t opc,
+                    int rd, int rs1, int rs2, int rm)
+{
+    TCGv rm_reg = tcg_temp_new();
+    TCGv write_int_rd = tcg_temp_new();
+    tcg_gen_movi_tl(rm_reg, rm);
+
+    switch (opc) {
+    case OPC_RISC_FADD_S:
+        gen_helper_fadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
rm_reg);
+        break;
+    case OPC_RISC_FSUB_S:
+        gen_helper_fsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
rm_reg);
+        break;
+    case OPC_RISC_FMUL_S:
+        gen_helper_fmul_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
rm_reg);
+        break;
+    case OPC_RISC_FDIV_S:
+        gen_helper_fdiv_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
rm_reg);
+        break;
+    case OPC_RISC_FSGNJ_S:
+        // also handles: OPC_RISC_FSGNJN_S, OPC_RISC_FSGNJX_S
+        // TODO: probably don't need to use helpers here
+        if (rm == 0x0) {
+            gen_helper_fsgnj_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x1) {
+            gen_helper_fsgnjn_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x2) {
+            gen_helper_fsgnjx_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        break;
+    case OPC_RISC_FMIN_S:
+        // also handles: OPC_RISC_FMAX_S
+        if (rm == 0x0) {
+            gen_helper_fmin_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x1) {
+            gen_helper_fmax_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        break;
+
+    case OPC_RISC_FSQRT_S:
+        gen_helper_fsqrt_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], rm_reg);
+        break;
+
+    case OPC_RISC_FEQ_S:
+        // also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S
+        if (rm == 0x0) {
+            gen_helper_fle_s(write_int_rd, cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x1) {
+            gen_helper_flt_s(write_int_rd, cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x2) {
+            gen_helper_feq_s(write_int_rd, cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        gen_set_gpr(rd, write_int_rd);
+        break;
+
+    case OPC_RISC_FCVT_W_S:
+        // also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S
+        if (rs2 == 0x0) { // FCVT_W_S
+            gen_helper_fcvt_w_s(write_int_rd, cpu_env, cpu_fpr[rs1], rm_reg);
+        } else if (rs2 == 0x1) { // FCVT_WU_S
+            gen_helper_fcvt_wu_s(write_int_rd, cpu_env, cpu_fpr[rs1], rm_reg);
+        } else if (rs2 == 0x2) { // FCVT_L_S
+            gen_helper_fcvt_l_s(write_int_rd, cpu_env, cpu_fpr[rs1], rm_reg);
+        } else if (rs2 == 0x3) { // FCVT_LU_S
+            gen_helper_fcvt_lu_s(write_int_rd, cpu_env, cpu_fpr[rs1], rm_reg);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        gen_set_gpr(rd, write_int_rd);
+        break;
+
+    case OPC_RISC_FCVT_S_W:
+        // also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU
+        gen_get_gpr(write_int_rd, rs1);
+        if (rs2 == 0) { // FCVT_S_W
+            gen_helper_fcvt_s_w(cpu_fpr[rd], cpu_env, write_int_rd, rm_reg);
+        } else if (rs2 == 0x1) { // FCVT_S_WU
+            gen_helper_fcvt_s_wu(cpu_fpr[rd], cpu_env, write_int_rd, rm_reg);
+        } else if (rs2 == 0x2) { // FCVT_S_L
+            gen_helper_fcvt_s_l(cpu_fpr[rd], cpu_env, write_int_rd, rm_reg);
+        } else if (rs2 == 0x3) { // FCVT_S_LU
+            gen_helper_fcvt_s_lu(cpu_fpr[rd], cpu_env, write_int_rd, rm_reg);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        break;
+
+    case OPC_RISC_FMV_X_S:
+        // also OPC_RISC_FCLASS_S
+        if (rm == 0x0) { // FMV
+            tcg_gen_ext32s_tl(write_int_rd, cpu_fpr[rs1]);
+        } else if (rm == 0x1) {
+            gen_helper_fclass_s(write_int_rd, cpu_env, cpu_fpr[rs1]);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        gen_set_gpr(rd, write_int_rd);
+        break;
+
+    case OPC_RISC_FMV_S_X:
+        gen_get_gpr(write_int_rd, rs1);
+        tcg_gen_mov_tl(cpu_fpr[rd], write_int_rd);
+        break;
+
+    // double
+    case OPC_RISC_FADD_D:
+        gen_helper_fadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
rm_reg);
+        break;
+    case OPC_RISC_FSUB_D:
+        gen_helper_fsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
rm_reg);
+        break;
+    case OPC_RISC_FMUL_D:
+        gen_helper_fmul_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
rm_reg);
+        break;
+    case OPC_RISC_FDIV_D:
+        gen_helper_fdiv_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2], 
rm_reg);
+        break;
+    case OPC_RISC_FSGNJ_D:
+        // also OPC_RISC_FSGNJN_D, OPC_RISC_FSGNJX_D
+        if (rm == 0x0) {
+            gen_helper_fsgnj_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x1) {
+            gen_helper_fsgnjn_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x2) {
+            gen_helper_fsgnjx_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        break;
+    case OPC_RISC_FMIN_D:
+        // also OPC_RISC_FMAX_D
+        if (rm == 0x0) {
+            gen_helper_fmin_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x1) {
+            gen_helper_fmax_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        break;
+    case OPC_RISC_FCVT_S_D:
+        if (rs2 == 0x1) {
+            gen_helper_fcvt_s_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], rm_reg);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        break;
+    case OPC_RISC_FCVT_D_S:
+        if (rs2 == 0x0) {
+            gen_helper_fcvt_d_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], rm_reg);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        break;
+    case OPC_RISC_FSQRT_D:
+        gen_helper_fsqrt_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], rm_reg);
+        break;
+    case OPC_RISC_FEQ_D:
+        // also OPC_RISC_FLT_D, OPC_RISC_FLE_D
+        if (rm == 0x0) {
+            gen_helper_fle_d(write_int_rd, cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x1) {
+            gen_helper_flt_d(write_int_rd, cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else if (rm == 0x2) {
+            gen_helper_feq_d(write_int_rd, cpu_env, cpu_fpr[rs1], 
cpu_fpr[rs2]);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        gen_set_gpr(rd, write_int_rd);
+        break;
+    case OPC_RISC_FCVT_W_D:
+        // also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D
+        if (rs2 == 0x0) {
+            gen_helper_fcvt_w_d(write_int_rd, cpu_env, cpu_fpr[rs1], rm_reg);
+        } else if (rs2 == 0x1) {
+            gen_helper_fcvt_wu_d(write_int_rd, cpu_env, cpu_fpr[rs1], rm_reg);
+        } else if (rs2 == 0x2) {
+            gen_helper_fcvt_l_d(write_int_rd, cpu_env, cpu_fpr[rs1], rm_reg);
+        } else if (rs2 == 0x3) {
+            gen_helper_fcvt_lu_d(write_int_rd, cpu_env, cpu_fpr[rs1], rm_reg);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        gen_set_gpr(rd, write_int_rd);
+        break;
+    case OPC_RISC_FCVT_D_W:
+        // also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU
+        gen_get_gpr(write_int_rd, rs1);
+        if (rs2 == 0x0) {
+            gen_helper_fcvt_d_w(cpu_fpr[rd], cpu_env, write_int_rd, rm_reg);
+        } else if (rs2 == 0x1) {
+            gen_helper_fcvt_d_wu(cpu_fpr[rd], cpu_env, write_int_rd, rm_reg);
+        } else if (rs2 == 0x2) {
+            gen_helper_fcvt_d_l(cpu_fpr[rd], cpu_env, write_int_rd, rm_reg);
+        } else if (rs2 == 0x3) {
+            gen_helper_fcvt_d_lu(cpu_fpr[rd], cpu_env, write_int_rd, rm_reg);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        break;
+    case OPC_RISC_FMV_X_D:
+        // also OPC_RISC_FCLASS_D
+        if (rm == 0x0) { // FMV
+            tcg_gen_mov_tl(write_int_rd, cpu_fpr[rs1]);
+        } else if (rm == 0x1) {
+            gen_helper_fclass_d(write_int_rd, cpu_env, cpu_fpr[rs1]);
+        } else {
+            kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        }
+        gen_set_gpr(rd, write_int_rd);
+        break;
+    case OPC_RISC_FMV_D_X:
+        gen_get_gpr(write_int_rd, rs1);
+        tcg_gen_mov_tl(cpu_fpr[rd], write_int_rd);
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+    tcg_temp_free(rm_reg);
+    tcg_temp_free(write_int_rd);
+}
+
+#ifdef RISCV_DEBUG_PRINT
+int monitor_region = 0;
+#endif
+
+static void decode_opc (CPURISCVState *env, DisasContext *ctx)
+{
+
+    int rs1;
+    int rs2;
+    int rd;
+    uint32_t op;
+    int16_t imm;
+    target_long ubimm;
+
+    // do not do misaligned address check here, address should never be
+    // misaligned
+    //
+    // instead, all control flow instructions check for themselves
+    //
+    // this is because epc must be the address of the control flow instruction
+    // that "caused" to the misaligned instruction access
+    //
+    // we leave this check here for now, since not all control flow
+    // instructions have been updated yet
+
+    /* make sure instructions are on a word boundary */
+    if (unlikely(ctx->pc & 0x3)) {
+        printf("addr misaligned\n");
+        printf("misaligned instruction, not completely implemented for 
riscv\n");
+        exit(1);
+        return;
+    }
+
+    op = MASK_OP_MAJOR(ctx->opcode);
+    rs1 = (ctx->opcode >> 15) & 0x1f;
+    rs2 = (ctx->opcode >> 20) & 0x1f;
+    rd = (ctx->opcode >> 7) & 0x1f;
+    imm = (int16_t)(((int32_t)ctx->opcode) >> 20); /* sign extends */
+
+#ifdef RISCV_DEBUG_PRINT
+    // this will print a log similar to spike, should be left off unless
+    // you're debugging QEMU
+    int start = 1; //0 && ctx->pc == 0x8ccac;
+    TCGv print_helper_tmp = tcg_temp_local_new();
+    TCGv printpc = tcg_temp_local_new();
+    tcg_gen_movi_tl(print_helper_tmp, ctx->opcode);
+    tcg_gen_movi_tl(printpc, ctx->pc);
+
+    if (monitor_region || start) {
+        gen_helper_debug_print(cpu_env, printpc, print_helper_tmp);
+        monitor_region = 1;
+
+        // can print some reg val too
+        gen_helper_debug_print(cpu_env, cpu_fpr[28], cpu_fpr[28]);
+
+    }
+    tcg_temp_free(print_helper_tmp);
+    tcg_temp_free(printpc);
+#endif
+
+    switch (op) {
+
+    case OPC_RISC_LUI:
+        if (rd == 0) {
+            break; // NOP
+        }
+        tcg_gen_movi_tl(cpu_gpr[rd], (ctx->opcode & 0xFFFFF000));
+        tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+        break;
+    case OPC_RISC_AUIPC:
+        if (rd == 0) {
+            break; // NOP
+        }
+        tcg_gen_movi_tl(cpu_gpr[rd], (ctx->opcode & 0xFFFFF000));
+        tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+        tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rd], tcg_const_tl(ctx->pc));
+        break;
+    case OPC_RISC_JAL: {
+            TCGv nextpc = tcg_temp_local_new();
+            TCGv testpc = tcg_temp_local_new();
+            TCGLabel* misaligned = gen_new_label();
+            TCGLabel* done = gen_new_label();
+            ubimm = (target_long) (GET_JAL_IMM(ctx->opcode));
+            tcg_gen_movi_tl(nextpc, ctx->pc + ubimm);
+            // check misaligned:
+            tcg_gen_andi_tl(testpc, nextpc, 0x3);
+            tcg_gen_brcondi_tl(TCG_COND_NE, testpc, 0x0, misaligned);
+            if (rd != 0) {
+                tcg_gen_movi_tl(cpu_gpr[rd], 4);
+                tcg_gen_addi_tl(cpu_gpr[rd], cpu_gpr[rd], ctx->pc);
+            }
+
+#ifdef DISABLE_CHAINING_JAL
+            tcg_gen_movi_tl(cpu_PC, ctx->pc + ubimm);
+            tcg_gen_exit_tb(0);
+#else
+            gen_goto_tb(ctx, 0, ctx->pc + ubimm); // must use this for safety
+#endif
+            tcg_gen_br(done);
+            gen_set_label(misaligned);
+            // throw exception for misaligned case
+            generate_exception_mbadaddr(ctx, NEW_RISCV_EXCP_INST_ADDR_MIS);
+            gen_set_label(done);
+            ctx->bstate = BS_BRANCH;
+            tcg_temp_free(nextpc);
+            tcg_temp_free(testpc);
+        }
+        break;
+    case OPC_RISC_JALR:
+        gen_jalr(ctx, MASK_OP_JALR(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_BRANCH:
+        gen_branch(ctx, MASK_OP_BRANCH(ctx->opcode), rs1, rs2, 
GET_B_IMM(ctx->opcode));
+        break;
+    case OPC_RISC_LOAD:
+        gen_load(ctx, MASK_OP_LOAD(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_STORE:
+        gen_store(ctx, MASK_OP_STORE(ctx->opcode), rs1, rs2, 
GET_STORE_IMM(ctx->opcode));
+        break;
+    case OPC_RISC_ARITH_IMM:
+        if (rd == 0) {
+            break; // NOP
+        }
+        gen_arith_imm(ctx, MASK_OP_ARITH_IMM(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_ARITH:
+        if (rd == 0) {
+            break; // NOP
+        }
+        gen_arith(ctx, MASK_OP_ARITH(ctx->opcode), rd, rs1, rs2);
+        break;
+    case OPC_RISC_ARITH_IMM_W:
+        if (rd == 0) {
+            break; // NOP
+        }
+        gen_arith_imm_w(ctx, MASK_OP_ARITH_IMM_W(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_ARITH_W:
+        if (rd == 0) {
+            break; // NOP
+        }
+        gen_arith_w(ctx, MASK_OP_ARITH_W(ctx->opcode), rd, rs1, rs2);
+        break;
+    case OPC_RISC_FENCE:
+        // standard fence is nop
+        // fence_i flushes TB (like an icache):
+        if (ctx->opcode & 0x1000) { // FENCE_I
+            gen_helper_fence_i(cpu_env);
+            tcg_gen_movi_tl(cpu_PC, ctx->pc + 4);
+            tcg_gen_exit_tb(0); // no chaining
+            ctx->bstate = BS_BRANCH;
+        }
+        break;
+    case OPC_RISC_SYSTEM:
+        gen_system(ctx, MASK_OP_SYSTEM(ctx->opcode), rd, rs1, (ctx->opcode & 
0xFFF00000) >> 20);
+        break;
+    case OPC_RISC_ATOMIC:
+        gen_atomic(ctx, MASK_OP_ATOMIC(ctx->opcode), rd, rs1, rs2);
+        break;
+    case OPC_RISC_FP_LOAD:
+        gen_fp_load(ctx, MASK_OP_FP_LOAD(ctx->opcode), rd, rs1, imm);
+        break;
+    case OPC_RISC_FP_STORE:
+        gen_fp_store(ctx, MASK_OP_FP_STORE(ctx->opcode), rs1, rs2, 
GET_STORE_IMM(ctx->opcode));
+        break;
+    case OPC_RISC_FMADD:
+        gen_fp_fmadd(ctx, MASK_OP_FP_FMADD(ctx->opcode), rd, rs1, rs2, 
GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FMSUB:
+        gen_fp_fmsub(ctx, MASK_OP_FP_FMSUB(ctx->opcode), rd, rs1, rs2, 
GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FNMSUB:
+        gen_fp_fnmsub(ctx, MASK_OP_FP_FNMSUB(ctx->opcode), rd, rs1, rs2, 
GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FNMADD:
+        gen_fp_fnmadd(ctx, MASK_OP_FP_FNMADD(ctx->opcode), rd, rs1, rs2, 
GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
+        break;
+    case OPC_RISC_FP_ARITH:
+        gen_fp_arith(ctx, MASK_OP_FP_ARITH(ctx->opcode), rd, rs1, rs2, 
GET_RM(ctx->opcode));
+        break;
+    default:
+        kill_unknown(ctx, NEW_RISCV_EXCP_ILLEGAL_INST);
+        break;
+    }
+}
+
+static inline void
+gen_intermediate_code_internal(CPURISCVState *env, TranslationBlock *tb,
+                               bool search_pc)
+{
+    RISCVCPU *cpu = riscv_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+    DisasContext ctx;
+    target_ulong pc_start;
+    target_ulong next_page_start; // new
+    int num_insns;
+    int max_insns;
+    pc_start = tb->pc;
+    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+    ctx.pc = pc_start;
+
+    // once we have GDB, the rest of the translate.c implementation should be
+    // ready for singlestep
+    ctx.singlestep_enabled = cs->singlestep_enabled;
+
+    ctx.tb = tb;
+    ctx.bstate = BS_NONE;
+
+    // restore_cpu_state?
+
+    ctx.mem_idx = cpu_mmu_index(env, false);
+    num_insns = 0;
+    max_insns = tb->cflags & CF_COUNT_MASK;
+    if (max_insns == 0) {
+        max_insns = CF_COUNT_MASK;
+    }
+    if (max_insns > TCG_MAX_INSNS) {
+        max_insns = TCG_MAX_INSNS;
+    }
+    gen_tb_start(tb);
+
+    while (ctx.bstate == BS_NONE) {
+        tcg_gen_insn_start(ctx.pc);
+        num_insns++;
+
+        if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
+            tcg_gen_movi_tl(cpu_PC, ctx.pc);
+            ctx.bstate = BS_BRANCH;
+            gen_helper_raise_exception_debug(cpu_env);
+            /* The address covered by the breakpoint must be included in
+               [tb->pc, tb->pc + tb->size) in order to for it to be
+               properly cleared -- thus we increment the PC here so that
+               the logic setting tb->size below does the right thing.  */
+            ctx.pc += 4;
+            goto done_generating;
+        }
+
+        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+            gen_io_start();
+        }
+
+        ctx.opcode = cpu_ldl_code(env, ctx.pc);
+        decode_opc(env, &ctx);
+        ctx.pc += 4;
+
+        if (cs->singlestep_enabled) {
+            break;
+        }
+        if (ctx.pc >= next_page_start) {
+            break;
+        }
+        if (tcg_op_buf_full()) {
+            break;
+        }
+        if (num_insns >= max_insns) {
+            break;
+        }
+        if(singlestep) {
+            break;
+        }
+
+    }
+    if (tb->cflags & CF_LAST_IO) {
+        gen_io_end();
+    }
+    if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) {
+        if (ctx.bstate == BS_NONE) {
+            tcg_gen_movi_tl(cpu_PC, ctx.pc); // NOT PC+4, that was already done
+        }
+        gen_helper_raise_exception_debug(cpu_env);
+    } else {
+        switch (ctx.bstate) {
+            case BS_STOP:
+                gen_goto_tb(&ctx, 0, ctx.pc);
+                break;
+            case BS_NONE:
+                // DO NOT CHAIN. This is for END-OF-PAGE. See gen_goto_tb.
+                tcg_gen_movi_tl(cpu_PC, ctx.pc); // NOT PC+4, that was already 
done
+                tcg_gen_exit_tb(0);
+                break;
+            case BS_BRANCH:
+                // anything using BS_BRANCH will have generated it's own exit 
seq
+            default:
+                break;
+        }
+    }
+done_generating:
+    gen_tb_end(tb, num_insns);
+    tb->size = ctx.pc - pc_start;
+    tb->icount = num_insns;
+
+/*
+#ifdef DEBUG_DISAS // TODO: riscv disassembly
+    LOG_DISAS("\n");
+    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+        qemu_log("IN: %s\n", lookup_symbol(pc_start));
+        log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
+        qemu_log("\n");
+    }
+#endif*/
+}
+
+void gen_intermediate_code (CPURISCVState *env, struct TranslationBlock *tb)
+{
+    gen_intermediate_code_internal(env, tb, false);
+}
+
+void riscv_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+                         int flags)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+    int i;
+
+    cpu_fprintf(f, "pc=0x" TARGET_FMT_lx "\n", env->active_tc.PC);
+    for (i = 0; i < 32; i++) {
+        cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames[i], 
env->active_tc.gpr[i]);
+        if ((i & 3) == 3) {
+            cpu_fprintf(f, "\n");
+        }
+    }
+
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "MSTATUS ", 
env->csr[NEW_CSR_MSTATUS]);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "MTIMECMP", 
env->csr[NEW_CSR_MTIMECMP]);
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "MTIME   ", 
cpu_riscv_read_mtime(env));
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "TIME    ", 
cpu_riscv_read_time(env));
+
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "MIP     ", 
env->csr[NEW_CSR_MIP]);
+
+    cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "MIE     ", 
env->csr[NEW_CSR_MIE]);
+
+    for (i = 0; i < 32; i++) {
+        if ((i & 3) == 0) {
+            cpu_fprintf(f, "FPR%02d:", i);
+        }
+        cpu_fprintf(f, " %s " TARGET_FMT_lx, fpr_regnames[i], 
env->active_tc.fpr[i]);
+        if ((i & 3) == 3) {
+            cpu_fprintf(f, "\n");
+        }
+    }
+}
+
+void riscv_tcg_init(void)
+{
+    int i;
+    static int inited;
+
+    /* Initialize various static tables. */
+    if (inited) {
+        return;
+    }
+
+    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+
+    // WARNING: cpu_gpr[0] is not allocated ON PURPOSE. Do not use it.
+    // Use the gen_set_gpr and gen_get_gpr helper functions when accessing
+    // registers, unless you specifically block reads/writes to reg 0
+    TCGV_UNUSED(cpu_gpr[0]);
+    for (i = 1; i < 32; i++) {
+        cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
+                                        offsetof(CPURISCVState, 
active_tc.gpr[i]),
+                                        regnames[i]);
+    }
+
+    for (i = 0; i < 32; i++) {
+        cpu_fpr[i] = tcg_global_mem_new(TCG_AREG0,
+                                        offsetof(CPURISCVState, 
active_tc.fpr[i]),
+                                        fpr_regnames[i]);
+    }
+
+    cpu_PC = tcg_global_mem_new(TCG_AREG0,
+                                offsetof(CPURISCVState, active_tc.PC), "PC");
+
+    load_reservation = tcg_global_mem_new(TCG_AREG0,
+                     offsetof(CPURISCVState, active_tc.load_reservation),
+                     "load_reservation");
+    inited = 1;
+}
+
+#include "translate_init.c"
+
+RISCVCPU *cpu_riscv_init(const char *cpu_model)
+{
+    RISCVCPU *cpu;
+    CPURISCVState *env;
+    const riscv_def_t *def;
+
+    def = cpu_riscv_find_by_name(cpu_model);
+    if (!def)
+        return NULL;
+    cpu = RISCV_CPU(object_new(TYPE_RISCV_CPU));
+    env = &cpu->env;
+    env->cpu_model = def;
+
+    memset(env->csr, 0, 4096*sizeof(uint64_t));
+
+    // init mstatus
+    target_ulong start_mstatus = 0;
+    start_mstatus = set_field(start_mstatus, MSTATUS_PRV, PRV_M);
+    start_mstatus = set_field(start_mstatus, MSTATUS_PRV1, PRV_U);
+    start_mstatus = set_field(start_mstatus, MSTATUS_PRV2, PRV_U);
+    env->csr[NEW_CSR_MSTATUS] = start_mstatus;
+
+    // set mcpuid from def
+    env->csr[NEW_CSR_MCPUID] = def->init_mcpuid_reg;
+    object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+    return cpu;
+}
+
+void cpu_state_reset(CPURISCVState *env)
+{
+    RISCVCPU *cpu = riscv_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
+
+    env->active_tc.PC = RISCV_START_PC; // STARTING PC VALUE def'd in cpu.h
+    cs->exception_index = EXCP_NONE;
+}
+
+void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb, 
target_ulong *data)
+{
+    env->active_tc.PC = data[0];
+}
diff --git a/target-riscv/translate_init.c b/target-riscv/translate_init.c
new file mode 100644
index 0000000..3d898be
--- /dev/null
+++ b/target-riscv/translate_init.c
@@ -0,0 +1,63 @@
+/*
+ * RISC-V emulation for qemu: CPU Initialization Routines.
+ *
+ * Author: Sagar Karandikar, address@hidden
+ *
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define MCPUID_RV64I   (2L << 62)
+#define MCPUID_SUPER   (1L << ('S' - 'A'))
+#define MCPUID_USER    (1L << ('U' - 'A'))
+#define MCPUID_I       (1L << ('I' - 'A'))
+#define MCPUID_M       (1L << ('M' - 'A'))
+#define MCPUID_A       (1L << ('A' - 'A'))
+#define MCPUID_F       (1L << ('F' - 'A'))
+#define MCPUID_D       (1L << ('D' - 'A'))
+
+struct riscv_def_t {
+    const char *name;
+    target_ulong init_mcpuid_reg;
+};
+
+/* RISC-V CPU definitions */
+static const riscv_def_t riscv_defs[] =
+{
+    {
+        .name = "riscv-generic",
+        // for now, hardcode RV64G:
+        .init_mcpuid_reg = MCPUID_RV64I | MCPUID_SUPER | /*MCPUID_USER |*/ 
MCPUID_I
+            | MCPUID_M | MCPUID_A | MCPUID_F | MCPUID_D,
+    },
+};
+
+static const riscv_def_t *cpu_riscv_find_by_name (const char *name)
+{
+    int i;
+    for (i = 0; i < ARRAY_SIZE(riscv_defs); i++) {
+        if (strcasecmp(name, riscv_defs[i].name) == 0) {
+            return &riscv_defs[i];
+        }
+    }
+    return NULL;
+}
+
+void riscv_cpu_list (FILE *f, fprintf_function cpu_fprintf)
+{
+    int i;
+    for (i = 0; i < ARRAY_SIZE(riscv_defs); i++) {
+        (*cpu_fprintf)(f, "RISCV '%s'\n", riscv_defs[i].name);
+    }
+}
-- 
2.7.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]