diff -urpN qemu-orig/configure qemu-new/configure --- qemu-orig/configure 2008-03-01 22:23:17.000000000 +0000 +++ qemu-new/configure 2008-03-22 22:54:25.000000000 +0000 @@ -47,6 +47,9 @@ case "$cpu" in "Power Macintosh"|ppc|ppc64) cpu="powerpc" ;; + parisc|parisc64) + cpu="hppa" + ;; mips) cpu="mips" ;; @@ -571,7 +574,7 @@ fi else # if cross compiling, cannot launch a program, so make a static guess -if test "$cpu" = "powerpc" -o "$cpu" = "mips" -o "$cpu" = "mips64" -o "$cpu" = "s390" -o "$cpu" = "sparc" -o "$cpu" = "sparc64" -o "$cpu" = "m68k" -o "$cpu" = "armv4b"; then +if test "$cpu" = "powerpc" -o "$cpu" = "mips" -o "$cpu" = "mips64" -o "$cpu" = "s390" -o "$cpu" = "sparc" -o "$cpu" = "sparc64" -o "$cpu" = "m68k" -o "$cpu" = "armv4b" -o "$cpu" = "hppa"; then bigendian="yes" fi @@ -833,6 +836,9 @@ elif test "$cpu" = "armv4l" ; then elif test "$cpu" = "powerpc" ; then echo "ARCH=ppc" >> $config_mak echo "#define HOST_PPC 1" >> $config_h +elif test "$cpu" = "hppa" ; then + echo "ARCH=hppa" >> $config_mak + echo "#define HOST_HPPA 1" >> $config_h elif test "$cpu" = "mips" ; then echo "ARCH=mips" >> $config_mak echo "#define HOST_MIPS 1" >> $config_h diff -urpN qemu-orig/cpu-all.h qemu-new/cpu-all.h --- qemu-orig/cpu-all.h 2008-03-22 22:43:41.000000000 +0000 +++ qemu-new/cpu-all.h 2008-03-22 22:49:35.000000000 +0000 @@ -20,7 +20,7 @@ #ifndef CPU_ALL_H #define CPU_ALL_H -#if defined(__arm__) || defined(__sparc__) || defined(__mips__) +#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) #define WORDS_ALIGNED #endif @@ -952,6 +952,15 @@ static inline int64_t cpu_get_real_ticks return val; } +#elif defined(__hppa__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int val; + asm volatile ("mfctl %%cr16, %0" : "=r"(val)); + return val; +} + #elif defined(__ia64) static inline int64_t cpu_get_real_ticks(void) diff -urpN qemu-orig/cpu-exec.c qemu-new/cpu-exec.c --- qemu-orig/cpu-exec.c 2008-03-22 22:43:41.000000000 +0000 +++ qemu-new/cpu-exec.c 2008-03-22 22:55:10.000000000 +0000 @@ -657,6 +657,17 @@ int cpu_exec(CPUState *env1) "o0", "o1", "o2", "o3", "o4", "o5", "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7"); +#elif defined(__hppa__) + asm volatile ("ble 0(%%sr4,%1)\n" + "copy %%r31,%%r18\n" + "copy %%r28,%0\n" + : "=r" (T0) + : "r" (gen_func) + : "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", + "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", + "r30", "r31"); #elif defined(__arm__) asm volatile ("mov pc, %0\n\t" ".global exec_loop\n\t" @@ -1484,6 +1495,24 @@ int cpu_signal_handler(int host_signum, is_write, &uc->uc_sigmask, puc); } +#elif defined(__hppa__) + +int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) +{ + struct siginfo *info = pinfo; + struct ucontext *uc = puc; + unsigned long pc; + int is_write; + + pc = uc->uc_mcontext.sc_iaoq[0]; + /* FIXME: compute is_write */ + is_write = 0; + return handle_cpu_signal(pc, (unsigned long)info->si_addr, + is_write, + &uc->uc_sigmask, puc); +} + #else #error host CPU specific signal handler needed diff -urpN qemu-orig/disas.c qemu-new/disas.c --- qemu-orig/disas.c 2007-12-25 00:26:36.000000000 +0000 +++ qemu-new/disas.c 2008-03-22 22:49:35.000000000 +0000 @@ -279,6 +279,8 @@ void disas(FILE *out, void *code, unsign print_insn = print_insn_m68k; #elif defined(__s390__) print_insn = print_insn_s390; +#elif defined(__hppa__) + print_insn = print_insn_hppa; #else fprintf(out, "0x%lx: Asm output not supported on this arch\n", (long) code); diff -urpN qemu-orig/dis-asm.h qemu-new/dis-asm.h --- qemu-orig/dis-asm.h 2008-02-27 15:25:24.000000000 +0000 +++ qemu-new/dis-asm.h 2008-03-22 22:49:35.000000000 +0000 @@ -157,6 +157,10 @@ enum bfd_architecture #define bfd_mach_ppc_7400 7400 bfd_arch_rs6000, /* IBM RS/6000 */ bfd_arch_hppa, /* HP PA RISC */ +#define bfd_mach_hppa10 10 +#define bfd_mach_hppa11 11 +#define bfd_mach_hppa20 20 +#define bfd_mach_hppa20w 25 bfd_arch_d10v, /* Mitsubishi D10V */ bfd_arch_z8k, /* Zilog Z8000 */ #define bfd_mach_z8001 1 diff -urpN qemu-orig/dyngen.c qemu-new/dyngen.c --- qemu-orig/dyngen.c 2008-02-27 17:53:27.000000000 +0000 +++ qemu-new/dyngen.c 2008-03-22 22:49:35.000000000 +0000 @@ -117,6 +117,13 @@ #define elf_check_arch(x) ((x) == EM_68K) #define ELF_USES_RELOCA +#elif defined(HOST_HPPA) + +#define ELF_CLASS ELFCLASS32 +#define ELF_ARCH EM_PARISC +#define elf_check_arch(x) ((x) == EM_PARISC) +#define ELF_USES_RELOCA + #elif defined(HOST_MIPS) #define ELF_CLASS ELFCLASS32 @@ -1223,7 +1230,7 @@ int get_reloc_expr(char *name, int name_ snprintf(name, name_size, "param%s", p); return 1; } else { -#ifdef HOST_SPARC +#if defined(HOST_SPARC) || defined(HOST_HPPA) if (sym_name[0] == '.') snprintf(name, name_size, "(long)(&__dot_%s)", @@ -1661,6 +1668,43 @@ void gen_code(const char *name, host_ulo error("rts expected at the end of %s", name); copy_size = p - p_start; } +#elif defined(HOST_HPPA) + { + uint8_t *p; + p = p_start; + while (p < p_end) { + uint32_t insn = get32((uint32_t *)p); + if (insn == 0x6bc23fd9 || + insn == 0x08030241 || + insn == 0x081e0243 || + (insn & 0x37de0000) == 0x37de0000 || + (insn & 0xffffc000) == 0x6fc10000) + p += 4; + else + break; + } + start_offset += p - p_start; + p_start = p; + p = p_end - 4; + + while (p > p_start) { + uint32_t insn = get32((uint32_t *)p); + if ((insn & 0xffffc000) == 0x347e0000 || + (insn & 0x0fc010e0) == 0x0fc01080 || + (insn & 0x37de0000) == 0x37de0000 || + insn == 0x48623fd9 || + insn == 0xe840c000 || + insn == 0xe840c002) + p -= 4; + else + break; + } + p += 4; + if (p <= p_start) + error("empty code for %s", name); + + copy_size = p - p_start; + } #elif defined(HOST_MIPS) || defined(HOST_MIPS64) { #define INSN_RETURN 0x03e00008 @@ -1746,7 +1790,7 @@ void gen_code(const char *name, host_ulo !strstart(sym_name, "__op_param", NULL) && !strstart(sym_name, "__op_jmp", NULL) && !strstart(sym_name, "__op_gen_label", NULL)) { -#if defined(HOST_SPARC) +#if defined(HOST_SPARC) || defined(HOST_HPPA) if (sym_name[0] == '.') { fprintf(outfile, "extern char __dot_%s __asm__(\"%s\");\n", @@ -1774,8 +1818,13 @@ void gen_code(const char *name, host_ulo } } +#ifdef __hppa__ + fprintf(outfile, " memcpy(gen_code_ptr, (void *)((char *)__canonicalize_funcptr_for_compare(%s)+%d), %d);\n", + name, (int)(start_offset - offset), copy_size); +#else fprintf(outfile, " memcpy(gen_code_ptr, (void *)((char *)&%s+%d), %d);\n", name, (int)(start_offset - offset), copy_size); +#endif /* emit code offset information */ { @@ -2581,6 +2630,82 @@ void gen_code(const char *name, host_ulo } } } +#elif defined(HOST_HPPA) + { + char relname[256]; + int type, is_label; + int addend; + int reloc_offset; + for (i = 0, rel = relocs; i < nb_relocs; i++, rel++) { + if (rel->r_offset >= start_offset && + rel->r_offset < start_offset + copy_size) { + sym_name = get_rel_sym_name(rel); + sym_name = strtab + symtab[ELF32_R_SYM(rel->r_info)].st_name; + is_label = get_reloc_expr(relname, sizeof(relname), sym_name); + type = ELF32_R_TYPE(rel->r_info); + addend = rel->r_addend; + reloc_offset = rel->r_offset - start_offset; + + if (is_label) { + switch (type) { + case R_PARISC_PCREL17F: + fprintf(outfile, +" tcg_out_reloc(s, gen_code_ptr + %d, %d, %s, %d);\n", + reloc_offset, type, relname, addend); + break; + default: + error("unsupported hppa label relocation (%d)", type); + } + } else { + switch (type) { + case R_PARISC_DIR21L: + fprintf(outfile, +" hppa_patch21l((uint32_t *)(gen_code_ptr + %d), %s, %d);\n", + reloc_offset, relname, addend); + break; + case R_PARISC_DIR14R: + fprintf(outfile, +" hppa_patch14r((uint32_t *)(gen_code_ptr + %d), %s, %d);\n", + reloc_offset, relname, addend); + break; + case R_PARISC_PCREL17F: + if (strstart(sym_name, "__op_gen_label", NULL)) { + fprintf(outfile, +" hppa_patch17f((uint32_t *)(gen_code_ptr + %d), %s, %d);\n", + reloc_offset, relname, addend); + } else { + fprintf(outfile, +" HPPA_RECORD_BRANCH(hppa_stubs, (uint32_t *)(gen_code_ptr + %d), %s);\n", + reloc_offset, relname); + } + break; + case R_PARISC_DPREL21L: + if (strstart(sym_name, "__op_param", &p)) + fprintf(outfile, +" hppa_load_imm21l((uint32_t *)(gen_code_ptr + %d), param%s, %d);\n", + reloc_offset, p, addend); + else + fprintf(outfile, +" hppa_patch21l_dprel((uint32_t *)(gen_code_ptr + %d), %s, %d);\n", + reloc_offset, relname, addend); + break; + case R_PARISC_DPREL14R: + if (strstart(sym_name, "__op_param", &p)) + fprintf(outfile, +" hppa_load_imm14r((uint32_t *)(gen_code_ptr + %d), param%s, %d);\n", + reloc_offset, p, addend); + else + fprintf(outfile, +" hppa_patch14r_dprel((uint32_t *)(gen_code_ptr + %d), %s, %d);\n", + reloc_offset, relname, addend); + break; + default: + error("unsupported hppa relocation (%d)", type); + } + } + } + } + } #elif defined(HOST_MIPS) || defined(HOST_MIPS64) { for (i = 0, rel = relocs; i < nb_relocs; i++, rel++) { diff -urpN qemu-orig/dyngen-exec.h qemu-new/dyngen-exec.h --- qemu-orig/dyngen-exec.h 2007-12-25 00:26:36.000000000 +0000 +++ qemu-new/dyngen-exec.h 2008-03-22 22:49:35.000000000 +0000 @@ -124,6 +124,11 @@ extern int printf(const char *, ...); #define AREG1 "r4" #define AREG2 "r5" #define AREG3 "r6" +#elif defined(__hppa__) +#define AREG0 "r17" +#define AREG1 "r14" +#define AREG2 "r15" +#define AREG3 "r16" #elif defined(__mips__) #define AREG0 "fp" #define AREG1 "s0" @@ -279,6 +284,8 @@ extern int __op_jmp0, __op_jmp1, __op_jm #elif defined(__mips__) #define EXIT_TB() asm volatile ("jr $ra") #define GOTO_LABEL_PARAM(n) asm volatile (".set noat; la $1, " ASM_NAME(__op_gen_label) #n "; jr $1; .set at") +#elif defined(__hppa__) +#define GOTO_LABEL_PARAM(n) asm volatile ("b,n " ASM_NAME(__op_gen_label) #n) #else #error unsupported CPU #endif diff -urpN qemu-orig/exec-all.h qemu-new/exec-all.h --- qemu-orig/exec-all.h 2008-03-22 22:45:51.000000000 +0000 +++ qemu-new/exec-all.h 2008-03-22 22:53:50.000000000 +0000 @@ -297,6 +297,30 @@ extern CPUWriteMemoryFunc *io_mem_write[ extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; +#if defined(__hppa__) + +typedef int spinlock_t[4]; + +#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 } + +static inline void resetlock (spinlock_t *p) +{ + (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1; +} + +#else + +typedef int spinlock_t; + +#define SPIN_LOCK_UNLOCKED 0 + +static inline void resetlock (spinlock_t *p) +{ + *p = SPIN_LOCK_UNLOCKED; +} + +#endif + #if defined(__powerpc__) static inline int testandset (int *p) { @@ -396,6 +420,27 @@ static inline int testandset (int *p) : "cc","memory"); return ret; } +#elif defined(__hppa__) + +#define __PA_LDCW_ALIGNMENT 16 + +static inline void *ldcw_align (void *p) { + unsigned long a = (unsigned long)p; + a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); + return (void *)a; +} + +static inline int testandset (spinlock_t *p) +{ + unsigned int ret; + p = ldcw_align(p); + __asm__ __volatile__("ldcw 0(%1),%0" + : "=r" (ret) + : "r" (p) + : "memory" ); + return !ret; +} + #elif defined(__ia64) #include @@ -428,15 +473,6 @@ static inline int testandset (int *p) #error unimplemented CPU support #endif -typedef int spinlock_t; - -#define SPIN_LOCK_UNLOCKED 0 - -static inline void resetlock(spinlock_t *lock) -{ - *lock = SPIN_LOCK_UNLOCKED; -} - #if defined(CONFIG_USER_ONLY) static inline void spin_lock(spinlock_t *lock) { diff -urpN qemu-orig/hppa.ld qemu-new/hppa.ld --- qemu-orig/hppa.ld 1970-01-01 01:00:00.000000000 +0100 +++ qemu-new/hppa.ld 2008-03-05 21:06:26.000000000 +0000 @@ -0,0 +1,214 @@ +/* Default linker script, for normal executables */ +OUTPUT_FORMAT("elf32-hppa-linux", "elf32-hppa-linux", + "elf32-hppa-linux") +OUTPUT_ARCH(hppa:hppa1.1) +ENTRY(_start) +SEARCH_DIR("/usr/hppa-linux-gnu/lib"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib"); +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x60000000); . = 0x60000000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .hash : { *(.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.sdata : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) } + .rela.sdata : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) } + .rel.sbss : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) } + .rela.sbss : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) } + .rel.sdata2 : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) } + .rela.sdata2 : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) } + .rel.sbss2 : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) } + .rela.sbss2 : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.plt : { *(.rel.plt) } + .rela.plt : { *(.rela.plt) } + .init : + { + KEEP (*(.init)) + } =0x08000240 + .text : + { + *(.text .stub .text.* .gnu.linkonce.t.*) + KEEP (*(.text.*personality*)) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } =0x08000240 + .fini : + { + KEEP (*(.fini)) + } =0x08000240 + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .sdata2 : + { + *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) + } + .sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) } + .PARISC.unwind : { *(.PARISC.unwind) } + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) } + /* Adjust the address for the data segment. We want to adjust up to + the same address within the page on the next page up. */ + . = ALIGN(0x10000) + (. & (0x10000 - 1)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + .preinit_array : + { + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + } + .init_array : + { + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array)) + PROVIDE_HIDDEN (__init_array_end = .); + } + .fini_array : + { + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(.fini_array)) + KEEP (*(SORT(.fini_array.*))) + PROVIDE_HIDDEN (__fini_array_end = .); + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .data : + { + PROVIDE ($global$ = .); + *(.data .data.* .gnu.linkonce.d.*) + KEEP (*(.gnu.linkonce.d.*personality*)) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + .plt : { *(.plt) } + .got : { *(.got.plt) *(.got) } + /* We want the small data sections together, so single-instruction offsets + can access them all, and initialized data all before uninitialized, so + we can shorten the on-disk segment size. */ + .sdata : + { + *(.sdata .sdata.* .gnu.linkonce.s.*) + } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + .sbss : + { + *(.dynsbss) + *(.sbss .sbss.* .gnu.linkonce.sb.*) + *(.scommon) + } + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. + FIXME: Why do we need it? When there is no .bss section, we don't + pad the .data section. */ + . = ALIGN(. != 0 ? 32 / 8 : 1); + } + . = ALIGN(32 / 8); + . = ALIGN(32 / 8); + _end = .; PROVIDE (end = .); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /DISCARD/ : { *(.note.GNU-stack) } +} diff -urpN qemu-orig/Makefile.target qemu-new/Makefile.target --- qemu-orig/Makefile.target 2008-03-22 22:43:41.000000000 +0000 +++ qemu-new/Makefile.target 2008-03-22 22:49:35.000000000 +0000 @@ -128,6 +128,11 @@ ifeq ($(ARCH),alpha) CFLAGS+=-msmall-data endif +ifeq ($(ARCH),hppa) +OP_CFLAGS=-O1 -fno-delayed-branch +BASE_LDFLAGS+=-Wl,-T,$(SRC_PATH)/$(ARCH).ld +endif + ifeq ($(ARCH),ia64) CFLAGS+=-mno-sdata OP_CFLAGS+=-mno-sdata @@ -267,6 +272,9 @@ endif ifeq ($(findstring sh4, $(TARGET_ARCH) $(ARCH)),sh4) LIBOBJS+=sh4-dis.o endif +ifeq ($(findstring hppa, $(TARGET_BASE_ARCH) $(ARCH)),hppa) +LIBOBJS+=hppa-dis.o +endif ifeq ($(findstring s390, $(TARGET_ARCH) $(ARCH)),s390) LIBOBJS+=s390-dis.o endif diff -urpN qemu-orig/tcg/hppa/relocs.h qemu-new/tcg/hppa/relocs.h --- qemu-orig/tcg/hppa/relocs.h 1970-01-01 01:00:00.000000000 +0100 +++ qemu-new/tcg/hppa/relocs.h 2008-03-22 23:05:34.000000000 +0000 @@ -0,0 +1,116 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* Field selection types defined by hppa */ +#define rnd(x) (((x)+0x1000)&~0x1fff) +/* lsel: select left 21 bits */ +#define lsel(v,a) (((v)+(a))>>11) +/* rsel: select right 11 bits */ +#define rsel(v,a) (((v)+(a))&0x7ff) +/* lrsel with rounding of addend to nearest 8k */ +#define lrsel(v,a) (((v)+rnd(a))>>11) +/* rrsel with rounding of addend to nearest 8k */ +#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a))) + +#define mask(x,sz) ((x) & ~((1<<(sz))-1)) + +static inline int reassemble_14(int as14) +{ + return (((as14 & 0x1fff) << 1) | + ((as14 & 0x2000) >> 13)); +} + +static inline int reassemble_17(int as17) +{ + return (((as17 & 0x10000) >> 16) | + ((as17 & 0x0f800) << 5) | + ((as17 & 0x00400) >> 8) | + ((as17 & 0x003ff) << 3)); +} + +static inline int reassemble_21(int as21) +{ + return (((as21 & 0x100000) >> 20) | + ((as21 & 0x0ffe00) >> 8) | + ((as21 & 0x000180) << 7) | + ((as21 & 0x00007c) << 14) | + ((as21 & 0x000003) << 12)); +} + +static inline void hppa_patch21l(uint32_t *insn, int val, int addend) +{ + val = lrsel(val, addend); + *insn = mask(*insn, 21) | reassemble_21(val); +} + +static inline void hppa_patch14r(uint32_t *insn, int val, int addend) +{ + val = rrsel(val, addend); + *insn = mask(*insn, 14) | reassemble_14(val); +} + +static inline void hppa_patch17r(uint32_t *insn, int val, int addend) +{ + val = rrsel(val, addend); + *insn = (*insn & ~0x1f1ffd) | reassemble_17(val); +} + + +static inline void hppa_patch21l_dprel(uint32_t *insn, int val, int addend) +{ + register unsigned int dp asm("r27"); + hppa_patch21l(insn, val - dp, addend); +} + +static inline void hppa_patch14r_dprel(uint32_t *insn, int val, int addend) +{ + register unsigned int dp asm("r27"); + hppa_patch14r(insn, val - dp, addend); +} + +static inline void hppa_patch17f(uint32_t *insn, int val, int addend) +{ + int dot = (int)insn & ~0x3; + int v = ((val + addend) - dot - 8) / 4; + if (v > (1 << 16) || v < -(1 << 16)) { + printf("cannot fit branch to offset %d [%08x->%08x]\n", v, dot, val); + abort(); + } + *insn = (*insn & ~0x1f1ffd) | reassemble_17(v); +} + +static inline void hppa_load_imm21l(uint32_t *insn, int val, int addend) +{ + /* Transform addil L'sym(%dp) to ldil L'val, %r1 */ + *insn = 0x20200000 | reassemble_21(lrsel(val, 0)); +} + +static inline void hppa_load_imm14r(uint32_t *insn, int val, int addend) +{ + /* Transform ldw R'sym(%r1), %rN to ldo R'sym(%r1), %rN */ + hppa_patch14r(insn, val, addend); + /* HACK */ + if (addend == 0) + *insn = (*insn & ~0xfc000000) | (0x0d << 26); +} diff -urpN qemu-orig/tcg/hppa/tcg-target.c qemu-new/tcg/hppa/tcg-target.c --- qemu-orig/tcg/hppa/tcg-target.c 1970-01-01 01:00:00.000000000 +0100 +++ qemu-new/tcg/hppa/tcg-target.c 2008-03-22 23:14:27.000000000 +0000 @@ -0,0 +1,678 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "%r0", + "%r1", + "%rp", + "%r3", + "%r4", + "%r5", + "%r6", + "%r7", + "%r8", + "%r9", + "%r10", + "%r11", + "%r12", + "%r13", + "%r14", + "%r15", + "%r16", + "%r17", + "%r18", + "%r19", + "%r20", + "%r21", + "%r22", + "%r23", + "%r24", + "%r25", + "%r26", + "%dp", + "%ret0", + "%ret1", + "%sp", + "%r31", +}; + +static const int tcg_target_reg_alloc_order[] = { + TCG_REG_R1, + TCG_REG_R3, + TCG_REG_R4, + TCG_REG_R5, + TCG_REG_R6, + TCG_REG_R7, + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R18, +}; + +static const int tcg_target_call_iarg_regs[4] = { + TCG_REG_R26, + TCG_REG_R25, + TCG_REG_R24, + TCG_REG_R23, +}; + +static const int tcg_target_call_oarg_regs[2] = { + TCG_REG_RET0, + TCG_REG_RET1, +}; + +static void patch_reloc(uint8_t *code_ptr, int type, + tcg_target_long value, tcg_target_long addend) +{ + switch (type) { + case R_PARISC_PCREL17F: + hppa_patch17f((uint32_t *)code_ptr, value, addend); + break; + default: + tcg_abort(); + } +} + +/* maximum number of register used for input function arguments */ +static inline int tcg_target_get_call_iarg_regs_count(int flags) +{ + return 4; +} + +/* parse target specific constraints */ +int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +{ + const char *ct_str; + + ct_str = *pct_str; + switch (ct_str[0]) { + case 'r': + case 'L': /* qemu_ld/st constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + break; + default: + return -1; + } + ct_str++; + *pct_str = ct_str; + return 0; +} + +/* test if a constant matches the constraint */ +static inline int tcg_target_const_match(tcg_target_long val, + const TCGArgConstraint *arg_ct) +{ + int ct; + + ct = arg_ct->ct; + + /* TODO */ + + return 0; +} + +#define INSN_OP(x) ((x) << 26) +#define INSN_EXT3BR(x) ((x) << 13) +#define INSN_EXT3SH(x) ((x) << 10) +#define INSN_EXT4(x) ((x) << 6) +#define INSN_EXT5(x) (x) +#define INSN_EXT6(x) ((x) << 6) +#define INSN_EXT7(x) ((x) << 6) +#define INSN_EXT8A(x) ((x) << 6) +#define INSN_EXT8B(x) ((x) << 5) +#define INSN_T(x) (x) +#define INSN_R1(x) ((x) << 16) +#define INSN_R2(x) ((x) << 21) +#define INSN_DEP_LEN(x) (32 - (x)) +#define INSN_SHDEP_CP(x) ((31 - (x)) << 5) +#define INSN_SHDEP_P(x) ((x) << 5) + +/* Logical ADD */ +#define ARITH_ADD (INSN_OP(0x02) | INSN_EXT6(0x28)) +#define ARITH_AND (INSN_OP(0x02) | INSN_EXT6(0x08)) +#define ARITH_OR (INSN_OP(0x02) | INSN_EXT6(0x09)) +#define ARITH_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a)) +#define ARITH_SUB (INSN_OP(0x02) | INSN_EXT6(0x10)) + +#define SHD (INSN_OP(0x34) | INSN_EXT3SH(2)) +#define VSHD (INSN_OP(0x34) | INSN_EXT3SH(0)) +#define DEP (INSN_OP(0x35) | INSN_EXT3SH(3)) +#define ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2)) +#define ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0)) +#define EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7)) +#define VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5)) + +#define SUBI (INSN_OP(0x25)) +#define MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2)) + +#define BLE (INSN_OP(0x39) | (1 << 13)) +#define BV (INSN_OP(0x3a) | INSN_EXT3BR(6)) +#define BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2) +#define LDIL (INSN_OP(0x08)) +#define LDO (INSN_OP(0x0d)) +#define LDB (INSN_OP(0x10)) +#define LDH (INSN_OP(0x11)) +#define LDW (INSN_OP(0x12)) +#define STB (INSN_OP(0x18)) +#define STH (INSN_OP(0x19)) +#define STW (INSN_OP(0x1a)) + +static int lowsignext(uint32_t val, int start, int length) +{ + return (((val << 1) & ~(~0 << length)) | + ((val >> (length - 1)) & 1)) << start; +} + +static inline void tcg_out_mov(TCGContext *s, int ret, int arg) +{ + /* PA1.1 defines COPY as OR r,0,t + * PA2.0 defines it as LDO 0(r),t + * Which is better? -- *shrug* */ + tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(arg) | reassemble_14(0)); +} + +static inline void tcg_out_movi(TCGContext *s, TCGType type, + int ret, tcg_target_long arg) +{ + if (arg == (arg & 0x1fff)) { + tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(TCG_REG_R0) | + reassemble_14(arg)); + } else { + tcg_out32(s, LDIL | INSN_R2(ret) | + reassemble_21(((uint32_t)arg & 0xfffff000) >> 11)); + if (arg & 0xfff) + tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(ret) | + reassemble_14(arg & 0xfff)); + } +} + +static inline void tcg_out_ld_raw(TCGContext *s, int ret, + tcg_target_long arg) +{ + tcg_out32(s, LDIL | INSN_R2(ret) | + reassemble_21(((uint32_t)arg & 0xfffff000) >> 11)); + tcg_out32(s, LDW | INSN_R1(ret) | INSN_R2(ret) | + reassemble_14(arg & 0xfff)); +} + +static inline void tcg_out_ld_ptr(TCGContext *s, int ret, + tcg_target_long arg) +{ + tcg_out_ld_raw(s, ret, arg); +} + +static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, + int op) +{ + if (offset == (offset & 0xfff)) + tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | + reassemble_14(offset)); + else + fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset); +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret, + int arg1, tcg_target_long arg2) +{ + fprintf(stderr, "unimplemented %s\n", __func__); +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, int ret, + int arg1, tcg_target_long arg2) +{ + fprintf(stderr, "unimplemented %s\n", __func__); +} + +static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op) +{ + tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2)); +} + +static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) +{ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R26, val); + tcg_out_arith(s, reg, reg, TCG_REG_R26, ARITH_ADD); +} + +static inline void tcg_out_nop(TCGContext *s) +{ + tcg_out32(s, ARITH_OR | INSN_T(TCG_REG_R0) | INSN_R1(TCG_REG_R0) | + INSN_R2(TCG_REG_R0)); +} + +static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg) { + tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) | + INSN_SHDEP_P(31) | INSN_DEP_LEN(8)); +} + +static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg) { + tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) | + INSN_SHDEP_P(31) | INSN_DEP_LEN(16)); +} + +static inline void tcg_out_bswap16(TCGContext *s, int ret, int arg) { + if(ret != arg) + tcg_out_mov(s, ret, arg); + tcg_out32(s, DEP | INSN_R2(ret) | INSN_R1(ret) | + INSN_SHDEP_CP(15) | INSN_DEP_LEN(8)); + tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(TCG_REG_R0) | + INSN_R2(ret) | INSN_SHDEP_CP(8)); +} + +static inline void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp) { + tcg_out32(s, SHD | INSN_T(temp) | INSN_R1(arg) | + INSN_R2(arg) | INSN_SHDEP_CP(16)); + tcg_out32(s, DEP | INSN_R2(temp) | INSN_R1(temp) | + INSN_SHDEP_CP(15) | INSN_DEP_LEN(8)); + tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(arg) | + INSN_R2(temp) | INSN_SHDEP_CP(8)); +} + +#if defined(CONFIG_SOFTMMU) +extern void __ldb_mmu(void); +extern void __ldw_mmu(void); +extern void __ldl_mmu(void); +extern void __ldq_mmu(void); + +extern void __stb_mmu(void); +extern void __stw_mmu(void); +extern void __stl_mmu(void); +extern void __stq_mmu(void); + +static void *qemu_ld_helpers[4] = { + __ldb_mmu, + __ldw_mmu, + __ldl_mmu, + __ldq_mmu, +}; + +static void *qemu_st_helpers[4] = { + __stb_mmu, + __stw_mmu, + __stl_mmu, + __stq_mmu, +}; +#endif + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, + int opc) +{ +#if defined(CONFIG_SOFTMMU) + fprintf(stderr, "unimplemented qld\n"); + tcg_abort(); +#endif + + int addr_reg, data_reg, data_reg2, bswap; + + data_reg = *args++; + if (opc == 3) + data_reg2 = *args++; + addr_reg = *args++; + +#ifdef TARGET_WORDS_BIGENDIAN + bswap = 0; +#else + bswap = 1; +#endif + switch (opc) { + case 0: + tcg_out_ldst(s, data_reg, addr_reg, 0, LDB); + break; + case 0 | 4: + tcg_out_ldst(s, data_reg, addr_reg, 0, LDB); + tcg_out_ext8s(s, data_reg, data_reg); + break; + case 1: + tcg_out_ldst(s, data_reg, addr_reg, 0, LDH); + if (bswap) + tcg_out_bswap16(s, data_reg, data_reg); + break; + case 1 | 4: + tcg_out_ldst(s, data_reg, addr_reg, 0, LDH); + if (bswap) + tcg_out_bswap16(s, data_reg, data_reg); + tcg_out_ext16s(s, data_reg, data_reg); + break; + case 2: + tcg_out_ldst(s, data_reg, addr_reg, 0, LDW); + if (bswap) + tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R26); + break; + case 3: + if (!bswap) { + tcg_out_ldst(s, data_reg, addr_reg, 0, LDW); + tcg_out_ldst(s, data_reg2, addr_reg, 4, LDW); + } else { + tcg_out_ldst(s, data_reg, addr_reg, 4, LDW); + tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R26); + tcg_out_ldst(s, data_reg2, addr_reg, 0, LDW); + tcg_out_bswap32(s, data_reg2, data_reg2, TCG_REG_R26); + } + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, + int opc) +{ +#if defined(CONFIG_SOFTMMU) + fprintf(stderr, "unimplemented qst\n"); + tcg_abort(); +#endif + + int addr_reg, data_reg, data_reg2, bswap; + + data_reg = *args++; + addr_reg = *args++; + +#ifdef TARGET_WORDS_BIGENDIAN + bswap = 0; +#else + bswap = 1; +#endif + switch (opc) { + case 0: + tcg_out_ldst(s, data_reg, addr_reg, 0, STB); + break; + case 1: + if (bswap) { + tcg_out_bswap16(s, TCG_REG_R26, data_reg); + data_reg = TCG_REG_R26; + } + tcg_out_ldst(s, data_reg, addr_reg, 0, STH); + break; + case 2: + if (bswap) { + tcg_out_bswap32(s, TCG_REG_R26, data_reg, TCG_REG_R26); + data_reg = TCG_REG_R26; + } + tcg_out_ldst(s, data_reg, addr_reg, 0, STW); + break; + case 3: + if (!bswap) { + tcg_out_ldst(s, data_reg, addr_reg, 0, STW); + tcg_out_ldst(s, data_reg2, addr_reg, 4, STW); + } else { + tcg_out_bswap32(s, TCG_REG_R26, data_reg, TCG_REG_R26); + tcg_out_ldst(s, TCG_REG_R26, addr_reg, 4, STW); + tcg_out_bswap32(s, TCG_REG_R26, data_reg2, TCG_REG_R26); + tcg_out_ldst(s, TCG_REG_R26, addr_reg, 0, STW); + } + break; + default: + tcg_abort(); + } +} + +static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args, + const int *const_args) +{ + int c; + + switch (opc) { + case INDEX_op_exit_tb: + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, args[0]); + tcg_out32(s, BV_N | INSN_R2(TCG_REG_R18)); + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_offset) { + /* direct jump method */ + fprintf(stderr, "goto_tb direct\n"); + tcg_abort(); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R26, args[0]); + tcg_out32(s, BV_N | INSN_R2(TCG_REG_R26)); + s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; + } else { + /* indirect jump method */ + tcg_out_ld_ptr(s, TCG_REG_R26, + (tcg_target_long)(s->tb_next + args[0])); + tcg_out32(s, BV_N | INSN_R2(TCG_REG_R26)); + } + s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; + break; + case INDEX_op_call: + tcg_out32(s, BLE | INSN_R2(args[0])); + tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31); + break; + case INDEX_op_jmp: + fprintf(stderr, "unimplemented jmp\n"); + tcg_abort(); + break; + case INDEX_op_br: + fprintf(stderr, "unimplemented br\n"); + tcg_abort(); + break; + case INDEX_op_movi_i32: + tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]); + break; + + case INDEX_op_ld8u_i32: + tcg_out_ldst(s, args[0], args[1], args[2], LDB); + break; + case INDEX_op_ld8s_i32: + tcg_out_ldst(s, args[0], args[1], args[2], LDB); + tcg_out_ext8s(s, args[0], args[0]); + break; + case INDEX_op_ld16u_i32: + tcg_out_ldst(s, args[0], args[1], args[2], LDH); + break; + case INDEX_op_ld16s_i32: + tcg_out_ldst(s, args[0], args[1], args[2], LDH); + tcg_out_ext16s(s, args[0], args[0]); + break; + case INDEX_op_ld_i32: + tcg_out_ldst(s, args[0], args[1], args[2], LDW); + break; + + case INDEX_op_st8_i32: + tcg_out_ldst(s, args[0], args[1], args[2], STB); + break; + case INDEX_op_st16_i32: + tcg_out_ldst(s, args[0], args[1], args[2], STH); + break; + case INDEX_op_st_i32: + tcg_out_ldst(s, args[0], args[1], args[2], STW); + break; + + case INDEX_op_sub_i32: + c = ARITH_SUB; + goto gen_arith; + case INDEX_op_and_i32: + c = ARITH_AND; + goto gen_arith; + case INDEX_op_or_i32: + c = ARITH_OR; + goto gen_arith; + case INDEX_op_xor_i32: + c = ARITH_XOR; + goto gen_arith; + case INDEX_op_add_i32: + c = ARITH_ADD; + goto gen_arith; + + case INDEX_op_shl_i32: + tcg_out32(s, SUBI | INSN_R1(TCG_REG_R26) | INSN_R2(args[2]) | + lowsignext(0x1f, 0, 11)); + tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(TCG_REG_R26)); + tcg_out32(s, ZVDEP | INSN_R2(args[0]) | INSN_R1(args[1]) | + INSN_DEP_LEN(32)); + break; + case INDEX_op_shr_i32: + tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(args[2])); + tcg_out32(s, VSHD | INSN_T(args[0]) | INSN_R1(TCG_REG_R0) | + INSN_R2(args[1])); + break; + case INDEX_op_sar_i32: + tcg_out32(s, SUBI | INSN_R1(TCG_REG_R26) | INSN_R2(args[2]) | + lowsignext(0x1f, 0, 11)); + tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(TCG_REG_R26)); + tcg_out32(s, VEXTRS | INSN_R2(args[0]) | INSN_R1(args[1]) | + INSN_DEP_LEN(32)); + break; + + case INDEX_op_mul_i32: + fprintf(stderr, "unimplemented mul\n"); + break; + case INDEX_op_mulu2_i32: + fprintf(stderr, "unimplemented mulu2\n"); + break; + case INDEX_op_div2_i32: + fprintf(stderr, "unimplemented div2\n"); + break; + case INDEX_op_divu2_i32: + fprintf(stderr, "unimplemented divu2\n"); + break; + + case INDEX_op_brcond_i32: + fprintf(stderr, "unimplemented brcond\n"); + break; + + case INDEX_op_qemu_ld8u: + tcg_out_qemu_ld(s, args, 0); + break; + case INDEX_op_qemu_ld8s: + tcg_out_qemu_ld(s, args, 0 | 4); + break; + case INDEX_op_qemu_ld16u: + tcg_out_qemu_ld(s, args, 1); + break; + case INDEX_op_qemu_ld16s: + tcg_out_qemu_ld(s, args, 1 | 4); + break; + case INDEX_op_qemu_ld32u: + tcg_out_qemu_ld(s, args, 2); + break; + + case INDEX_op_qemu_st8: + tcg_out_qemu_st(s, args, 0); + break; + case INDEX_op_qemu_st16: + tcg_out_qemu_st(s, args, 1); + break; + case INDEX_op_qemu_st32: + tcg_out_qemu_st(s, args, 2); + break; + + default: + fprintf(stderr, "unknown opcode 0x%x\n", opc); + tcg_abort(); + } + return; + +gen_arith: + tcg_out_arith(s, args[0], args[1], args[2], c); +} + +static const TCGTargetOpDef hppa_op_defs[] = { + { INDEX_op_exit_tb, { } }, + { INDEX_op_goto_tb, { } }, + + { INDEX_op_call, { "r" } }, + { INDEX_op_jmp, { "r" } }, + { INDEX_op_br, { } }, + + { INDEX_op_mov_i32, { "r", "r" } }, + { INDEX_op_movi_i32, { "r" } }, + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_st8_i32, { "r", "r" } }, + { INDEX_op_st16_i32, { "r", "r" } }, + { INDEX_op_st_i32, { "r", "r" } }, + + { INDEX_op_add_i32, { "r", "r", "r" } }, + { INDEX_op_sub_i32, { "r", "r", "r" } }, + { INDEX_op_and_i32, { "r", "r", "r" } }, + { INDEX_op_or_i32, { "r", "r", "r" } }, + { INDEX_op_xor_i32, { "r", "r", "r" } }, + + { INDEX_op_shl_i32, { "r", "r", "r" } }, + { INDEX_op_shr_i32, { "r", "r", "r" } }, + { INDEX_op_sar_i32, { "r", "r", "r" } }, + + { INDEX_op_brcond_i32, { "r", "r" } }, + +#if TARGET_LONG_BITS == 32 + { INDEX_op_qemu_ld8u, { "r", "L" } }, + { INDEX_op_qemu_ld8s, { "r", "L" } }, + { INDEX_op_qemu_ld16u, { "r", "L" } }, + { INDEX_op_qemu_ld16s, { "r", "L" } }, + { INDEX_op_qemu_ld32u, { "r", "L" } }, + { INDEX_op_qemu_ld64, { "r", "r", "L" } }, + + { INDEX_op_qemu_st8, { "L", "L" } }, + { INDEX_op_qemu_st16, { "L", "L" } }, + { INDEX_op_qemu_st32, { "L", "L" } }, + { INDEX_op_qemu_st64, { "L", "L", "L" } }, +#else + { INDEX_op_qemu_ld8u, { "r", "L", "L" } }, + { INDEX_op_qemu_ld8s, { "r", "L", "L" } }, + { INDEX_op_qemu_ld16u, { "r", "L", "L" } }, + { INDEX_op_qemu_ld16s, { "r", "L", "L" } }, + { INDEX_op_qemu_ld32u, { "r", "L", "L" } }, + { INDEX_op_qemu_ld32s, { "r", "L", "L" } }, + { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } }, + + { INDEX_op_qemu_st8, { "L", "L", "L" } }, + { INDEX_op_qemu_st16, { "L", "L", "L" } }, + { INDEX_op_qemu_st32, { "L", "L", "L" } }, + { INDEX_op_qemu_st64, { "L", "L", "L", "L" } }, +#endif + { -1 }, +}; + +void tcg_target_init(TCGContext *s) +{ + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); + tcg_regset_set32(tcg_target_call_clobber_regs, 0, + (1 << TCG_REG_R20) | + (1 << TCG_REG_R21) | + (1 << TCG_REG_R22) | + (1 << TCG_REG_R23) | + (1 << TCG_REG_R24) | + (1 << TCG_REG_R25) | + (1 << TCG_REG_R26)); + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R26); /* reserved */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */ + + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */ + + tcg_add_target_add_op_defs(hppa_op_defs); +} diff -urpN qemu-orig/tcg/hppa/tcg-target.h qemu-new/tcg/hppa/tcg-target.h --- qemu-orig/tcg/hppa/tcg-target.h 1970-01-01 01:00:00.000000000 +0100 +++ qemu-new/tcg/hppa/tcg-target.h 2008-03-22 23:04:06.000000000 +0000 @@ -0,0 +1,146 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "relocs.h" + +#define TCG_TARGET_HPPA 1 + +#if defined(_PA_RISC1_1) +#define TCG_TARGET_REG_BITS 32 +#else +#error unsupported +#endif + +#define TCG_TARGET_WORDS_BIGENDIAN + +#define TCG_TARGET_NB_REGS 32 + +enum { + TCG_REG_R0 = 0, + TCG_REG_R1, + TCG_REG_RP, + TCG_REG_R3, + TCG_REG_R4, + TCG_REG_R5, + TCG_REG_R6, + TCG_REG_R7, + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, + TCG_REG_R15, + TCG_REG_R16, + TCG_REG_R17, + TCG_REG_R18, + TCG_REG_R19, + TCG_REG_R20, + TCG_REG_R21, + TCG_REG_R22, + TCG_REG_R23, + TCG_REG_R24, + TCG_REG_R25, + TCG_REG_R26, + TCG_REG_DP, + TCG_REG_RET0, + TCG_REG_RET1, + TCG_REG_SP, + TCG_REG_R31, +}; + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_STACK_GROWSUP + +/* optional instructions */ +//#define TCG_TARGET_HAS_ext8s_i32 +//#define TCG_TARGET_HAS_ext16s_i32 +//#define TCG_TARGET_HAS_bswap16_i32 +//#define TCG_TARGET_HAS_bswap_i32 + +/* Note: must be synced with dyngen-exec.h */ +#define TCG_AREG0 TCG_REG_R17 +#define TCG_AREG1 TCG_REG_R14 +#define TCG_AREG2 TCG_REG_R15 +#define TCG_AREG3 TCG_REG_R16 + +static inline void flush_icache_range(unsigned long start, unsigned long stop) +{ + start &= ~31; + while (start <= stop) + { + asm volatile ("fdc 0(%0)\n" + "sync\n" + "fic 0(%%sr4, %0)\n" + "sync\n" + : : "r"(start) : "memory"); + start += 32; + } +} + +/* supplied by libgcc */ +extern void *__canonicalize_funcptr_for_compare(void *); + +#ifndef CONFIG_NO_DYNGEN_OP + +struct hppa_branch_stub { + uint32_t *location; + long target; + struct hppa_branch_stub *next; +}; + +#define HPPA_RECORD_BRANCH(LIST, LOC, TARGET) \ +do { \ + struct hppa_branch_stub *stub = alloca(sizeof(struct hppa_branch_stub)); \ + stub->location = LOC; \ + stub->target = TARGET; \ + stub->next = LIST; \ + LIST = stub; \ +} while (0) + +static inline void hppa_process_stubs(struct hppa_branch_stub *stub, + uint8_t **gen_code_pp) +{ + uint32_t *s = (uint32_t *)*gen_code_pp; + uint32_t *p = s + 1; + + if (!stub) return; + + for (; stub != NULL; stub = stub->next) { + unsigned long l = (unsigned long)p; + /* stub: + * ldil L'target, %r1 + * be,n R'target(%sr4,%r1) + */ + *p++ = 0x20200000 | reassemble_21(lrsel(stub->target, 0)); + *p++ = 0xe0202002 | (reassemble_17(rrsel(stub->target, 0) >> 2)); + hppa_patch17f(stub->location, l, 0); + } + *s = 0xe8000002 | reassemble_17((p - s) - 2); + *gen_code_pp = (uint8_t *)p; +} + +#endif /* CONFIG_NO_DYNGEN_OP */ diff -urpN qemu-orig/tcg/i386/tcg-target.c qemu-new/tcg/i386/tcg-target.c --- qemu-orig/tcg/i386/tcg-target.c 2008-03-22 22:43:44.000000000 +0000 +++ qemu-new/tcg/i386/tcg-target.c 2008-03-23 00:00:20.000000000 +0000 @@ -47,8 +47,9 @@ const int tcg_target_call_iarg_regs[3] = const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX }; static void patch_reloc(uint8_t *code_ptr, int type, - tcg_target_long value) + tcg_target_long value, tcg_target_long addend) { + value += addend; switch(type) { case R_386_32: *(uint32_t *)code_ptr = value; diff -urpN qemu-orig/tcg/sparc/tcg-target.c qemu-new/tcg/sparc/tcg-target.c --- qemu-orig/tcg/sparc/tcg-target.c 2008-03-22 22:43:44.000000000 +0000 +++ qemu-new/tcg/sparc/tcg-target.c 2008-03-22 23:59:48.000000000 +0000 @@ -88,8 +88,9 @@ static const int tcg_target_call_oarg_re }; static void patch_reloc(uint8_t *code_ptr, int type, - tcg_target_long value) + tcg_target_long value, tcg_target_long addend) { + value += addend; switch (type) { case R_SPARC_32: if (value != (uint32_t)value) diff -urpN qemu-orig/tcg/tcg.c qemu-new/tcg/tcg.c --- qemu-orig/tcg/tcg.c 2008-03-22 22:43:44.000000000 +0000 +++ qemu-new/tcg/tcg.c 2008-03-22 23:09:41.000000000 +0000 @@ -53,7 +53,7 @@ static void patch_reloc(uint8_t *code_ptr, int type, - tcg_target_long value); + tcg_target_long value, tcg_target_long addend); TCGOpDef tcg_op_defs[] = { #define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size }, @@ -100,7 +100,7 @@ void tcg_out_reloc(TCGContext *s, uint8_ /* FIXME: This may break relocations on RISC targets that modify instruction fields in place. The caller may not have written the initial value. */ - patch_reloc(code_ptr, type, l->u.value + addend); + patch_reloc(code_ptr, type, l->u.value, addend); } else { /* add a new relocation entry */ r = tcg_malloc(sizeof(TCGRelocation)); @@ -123,7 +123,7 @@ static void tcg_out_label(TCGContext *s, tcg_abort(); r = l->u.first_reloc; while (r != NULL) { - patch_reloc(r->ptr, r->type, value + r->addend); + patch_reloc(r->ptr, r->type, value, r->addend); r = r->next; } l->has_value = 1; @@ -1472,7 +1472,7 @@ static int tcg_reg_alloc_call(TCGContext int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params; TCGArg arg, func_arg; TCGTemp *ts; - tcg_target_long stack_offset, call_stack_size; + tcg_target_long stack_offset, call_stack_size, func_addr; int const_func_arg; TCGRegSet allocated_regs; const TCGArgConstraint *arg_ct; @@ -1494,7 +1494,11 @@ static int tcg_reg_alloc_call(TCGContext call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long); call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & ~(TCG_TARGET_STACK_ALIGN - 1); +#ifdef TCG_TARGET_STACK_GROWSUP + tcg_out_addi(s, TCG_REG_CALL_STACK, call_stack_size); +#else tcg_out_addi(s, TCG_REG_CALL_STACK, -call_stack_size); +#endif stack_offset = 0; for(i = nb_regs; i < nb_params; i++) { @@ -1517,7 +1521,11 @@ static int tcg_reg_alloc_call(TCGContext } else { tcg_abort(); } +#ifdef TCG_TARGET_STACK_GROWSUP + stack_offset -= sizeof(tcg_target_long); +#else stack_offset += sizeof(tcg_target_long); +#endif } /* assign input registers */ @@ -1546,6 +1554,10 @@ static int tcg_reg_alloc_call(TCGContext func_arg = args[nb_oargs + nb_iargs - 1]; arg_ct = &def->args_ct[0]; ts = &s->temps[func_arg]; + func_addr = ts->val; +#ifdef HOST_HPPA + func_addr = (tcg_target_long)__canonicalize_funcptr_for_compare((void *)func_addr); +#endif const_func_arg = 0; if (ts->val_type == TEMP_VAL_MEM) { reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); @@ -1559,12 +1571,12 @@ static int tcg_reg_alloc_call(TCGContext } func_arg = reg; } else if (ts->val_type == TEMP_VAL_CONST) { - if (tcg_target_const_match(ts->val, arg_ct)) { + if (tcg_target_const_match(func_addr, arg_ct)) { const_func_arg = 1; - func_arg = ts->val; + func_arg = func_addr; } else { reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); - tcg_out_movi(s, ts->type, reg, ts->val); + tcg_out_movi(s, ts->type, reg, func_addr); func_arg = reg; } } else { @@ -1604,7 +1616,11 @@ static int tcg_reg_alloc_call(TCGContext tcg_out_op(s, opc, &func_arg, &const_func_arg); +#ifdef TCG_TARGET_STACK_GROWSUP + tcg_out_addi(s, TCG_REG_CALL_STACK, -call_stack_size); +#else tcg_out_addi(s, TCG_REG_CALL_STACK, call_stack_size); +#endif /* assign output registers and emit moves if needed */ for(i = 0; i < nb_oargs; i++) { diff -urpN qemu-orig/tcg/tcg-dyngen.c qemu-new/tcg/tcg-dyngen.c --- qemu-orig/tcg/tcg-dyngen.c 2008-03-22 22:43:44.000000000 +0000 +++ qemu-new/tcg/tcg-dyngen.c 2008-03-22 22:49:35.000000000 +0000 @@ -469,6 +469,10 @@ const TCGArg *dyngen_op(TCGContext *s, i { uint8_t *gen_code_ptr; +#ifdef HOST_HPPA + struct hppa_branch_stub *hppa_stubs = NULL; +#endif + gen_code_ptr = s->code_ptr; switch(opc) { @@ -478,6 +482,11 @@ const TCGArg *dyngen_op(TCGContext *s, i default: tcg_abort(); } + +#ifdef HOST_HPPA + hppa_process_stubs(hppa_stubs, &gen_code_ptr); +#endif + s->code_ptr = gen_code_ptr; return opparam_ptr; } diff -urpN qemu-orig/tcg/x86_64/tcg-target.c qemu-new/tcg/x86_64/tcg-target.c --- qemu-orig/tcg/x86_64/tcg-target.c 2008-03-22 22:43:44.000000000 +0000 +++ qemu-new/tcg/x86_64/tcg-target.c 2008-03-23 00:00:49.000000000 +0000 @@ -74,8 +74,9 @@ const int tcg_target_call_oarg_regs[2] = }; static void patch_reloc(uint8_t *code_ptr, int type, - tcg_target_long value) + tcg_target_long value, tcg_target_long addend) { + value += addend; switch(type) { case R_X86_64_32: if (value != (uint32_t)value)