qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 3/5] Use __asm__ instead of asm or __asm


From: blauwirbel
Subject: [Qemu-devel] [PATCH 3/5] Use __asm__ instead of asm or __asm
Date: Sun, 8 Jul 2012 11:51:28 +0000

From: Blue Swirl <address@hidden>

Replace asm and __asm with __asm__.

Signed-off-by: Blue Swirl <address@hidden>
---
 bswap.h               |   40 ++++++++++++++++++++--------------------
 cache-utils.h         |   10 +++++-----
 exec-all.h            |    6 +++---
 qemu-barrier.h        |   14 +++++++-------
 qemu-timer.h          |   18 +++++++++---------
 target-i386/cpu.c     |   26 +++++++++++++-------------
 target-ppc/kvm.c      |    4 ++--
 target-ppc/kvm_ppc.h  |    2 +-
 tcg/arm/tcg-target.h  |    8 ++++----
 tcg/hppa/tcg-target.h |   10 +++++-----
 tcg/ia64/tcg-target.h |    4 ++--
 tcg/s390/tcg-target.c |   14 +++++++-------
 tcg/tcg.c             |    2 +-
 13 files changed, 79 insertions(+), 79 deletions(-)

diff --git a/bswap.h b/bswap.h
index cc7f84d..7b33d9e 100644
--- a/bswap.h
+++ b/bswap.h
@@ -526,10 +526,10 @@ static inline int lduw_be_p(const void *ptr)
 {
 #if defined(__i386__)
     int val;
-    asm volatile ("movzwl %1, %0\n"
-                  "xchgb %b0, %h0\n"
-                  : "=q" (val)
-                  : "m" (*(uint16_t *)ptr));
+    __asm__ volatile ("movzwl %1, %0\n"
+                      "xchgb %b0, %h0\n"
+                      : "=q" (val)
+                      : "m" (*(uint16_t *)ptr));
     return val;
 #else
     const uint8_t *b = ptr;
@@ -541,10 +541,10 @@ static inline int ldsw_be_p(const void *ptr)
 {
 #if defined(__i386__)
     int val;
-    asm volatile ("movzwl %1, %0\n"
-                  "xchgb %b0, %h0\n"
-                  : "=q" (val)
-                  : "m" (*(uint16_t *)ptr));
+    __asm__ volatile ("movzwl %1, %0\n"
+                      "xchgb %b0, %h0\n"
+                      : "=q" (val)
+                      : "m" (*(uint16_t *)ptr));
     return (int16_t)val;
 #else
     const uint8_t *b = ptr;
@@ -556,10 +556,10 @@ static inline int ldl_be_p(const void *ptr)
 {
 #if defined(__i386__) || defined(__x86_64__)
     int val;
-    asm volatile ("movl %1, %0\n"
-                  "bswap %0\n"
-                  : "=r" (val)
-                  : "m" (*(uint32_t *)ptr));
+    __asm__ volatile ("movl %1, %0\n"
+                      "bswap %0\n"
+                      : "=r" (val)
+                      : "m" (*(uint32_t *)ptr));
     return val;
 #else
     const uint8_t *b = ptr;
@@ -578,10 +578,10 @@ static inline uint64_t ldq_be_p(const void *ptr)
 static inline void stw_be_p(void *ptr, int v)
 {
 #if defined(__i386__)
-    asm volatile ("xchgb %b0, %h0\n"
-                  "movw %w0, %1\n"
-                  : "=q" (v)
-                  : "m" (*(uint16_t *)ptr), "0" (v));
+    __asm__ volatile ("xchgb %b0, %h0\n"
+                      "movw %w0, %1\n"
+                      : "=q" (v)
+                      : "m" (*(uint16_t *)ptr), "0" (v));
 #else
     uint8_t *d = (uint8_t *) ptr;
     d[0] = v >> 8;
@@ -592,10 +592,10 @@ static inline void stw_be_p(void *ptr, int v)
 static inline void stl_be_p(void *ptr, int v)
 {
 #if defined(__i386__) || defined(__x86_64__)
-    asm volatile ("bswap %0\n"
-                  "movl %0, %1\n"
-                  : "=r" (v)
-                  : "m" (*(uint32_t *)ptr), "0" (v));
+    __asm__ volatile ("bswap %0\n"
+                      "movl %0, %1\n"
+                      : "=r" (v)
+                      : "m" (*(uint32_t *)ptr), "0" (v));
 #else
     uint8_t *d = (uint8_t *) ptr;
     d[0] = v >> 24;
diff --git a/cache-utils.h b/cache-utils.h
index 2c57f78..a32cbd2 100644
--- a/cache-utils.h
+++ b/cache-utils.h
@@ -24,17 +24,17 @@ static inline void flush_icache_range(uintptr_t start, 
uintptr_t stop)
     start1 = start & ~(dsize - 1);
     stop1 = (stop + dsize - 1) & ~(dsize - 1);
     for (p = start1; p < stop1; p += dsize) {
-        asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
+        __asm__ volatile ("dcbst 0,%0" : : "r"(p) : "memory");
     }
-    asm volatile ("sync" : : : "memory");
+    __asm__ volatile ("sync" : : : "memory");
 
     start &= start & ~(isize - 1);
     stop1 = (stop + isize - 1) & ~(isize - 1);
     for (p = start1; p < stop1; p += isize) {
-        asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
+        __asm__ volatile ("icbi 0,%0" : : "r"(p) : "memory");
     }
-    asm volatile ("sync" : : : "memory");
-    asm volatile ("isync" : : : "memory");
+    __asm__ volatile ("sync" : : : "memory");
+    __asm__ volatile ("isync" : : : "memory");
 }
 
 #else
diff --git a/exec-all.h b/exec-all.h
index 9bda7f7..994d8b9 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -224,9 +224,9 @@ static inline void tb_set_jmp_target1(uintptr_t jmp_addr, 
uintptr_t addr)
 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 {
 #if !QEMU_GNUC_PREREQ(4, 1)
-    register unsigned long _beg __asm ("a1");
-    register unsigned long _end __asm ("a2");
-    register unsigned long _flg __asm ("a3");
+    register unsigned long _beg __asm__ ("a1");
+    register unsigned long _end __asm__ ("a2");
+    register unsigned long _flg __asm__ ("a3");
 #endif
 
     /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
diff --git a/qemu-barrier.h b/qemu-barrier.h
index 7e11197..989a91a 100644
--- a/qemu-barrier.h
+++ b/qemu-barrier.h
@@ -2,7 +2,7 @@
 #define __QEMU_BARRIER_H 1
 
 /* Compiler barrier */
-#define barrier()   asm volatile("" ::: "memory")
+#define barrier()   __asm__ volatile("" ::: "memory")
 
 #if defined(__i386__)
 
@@ -22,14 +22,14 @@
 #if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
 #define smp_mb() __sync_synchronize()
 #else
-#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
+#define smp_mb() __asm__ volatile("lock; addl $0,0(%%esp) " ::: "memory")
 #endif
 
 #elif defined(__x86_64__)
 
 #define smp_wmb()   barrier()
 #define smp_rmb()   barrier()
-#define smp_mb() asm volatile("mfence" ::: "memory")
+#define smp_mb() __asm__ volatile("mfence" ::: "memory")
 
 #elif defined(_ARCH_PPC)
 
@@ -38,15 +38,15 @@
  * need to order cacheable and non-cacheable stores with respect to
  * each other
  */
-#define smp_wmb()   asm volatile("eieio" ::: "memory")
+#define smp_wmb()   __asm__ volatile("eieio" ::: "memory")
 
 #if defined(__powerpc64__)
-#define smp_rmb()   asm volatile("lwsync" ::: "memory")
+#define smp_rmb()   __asm__ volatile("lwsync" ::: "memory")
 #else
-#define smp_rmb()   asm volatile("sync" ::: "memory")
+#define smp_rmb()   __asm__ volatile("sync" ::: "memory")
 #endif
 
-#define smp_mb()   asm volatile("sync" ::: "memory")
+#define smp_mb()   __asm__ volatile("sync" ::: "memory")
 
 #else
 
diff --git a/qemu-timer.h b/qemu-timer.h
index f8af595..0aca342 100644
--- a/qemu-timer.h
+++ b/qemu-timer.h
@@ -174,7 +174,7 @@ static inline int64_t cpu_get_real_ticks(void)
 static inline int64_t cpu_get_real_ticks(void)
 {
     int64_t val;
-    asm volatile ("rdtsc" : "=A" (val));
+    __asm__ volatile ("rdtsc" : "=A" (val));
     return val;
 }
 
@@ -184,7 +184,7 @@ static inline int64_t cpu_get_real_ticks(void)
 {
     uint32_t low,high;
     int64_t val;
-    asm volatile("rdtsc" : "=a" (low), "=d" (high));
+    __asm__ volatile("rdtsc" : "=a" (low), "=d" (high));
     val = high;
     val <<= 32;
     val |= low;
@@ -196,7 +196,7 @@ static inline int64_t cpu_get_real_ticks(void)
 static inline int64_t cpu_get_real_ticks(void)
 {
     int val;
-    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
+    __asm__ volatile ("mfctl %%cr16, %0" : "=r"(val));
     return val;
 }
 
@@ -205,7 +205,7 @@ static inline int64_t cpu_get_real_ticks(void)
 static inline int64_t cpu_get_real_ticks(void)
 {
     int64_t val;
-    asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
+    __asm__ volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
     return val;
 }
 
@@ -214,7 +214,7 @@ static inline int64_t cpu_get_real_ticks(void)
 static inline int64_t cpu_get_real_ticks(void)
 {
     int64_t val;
-    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
+    __asm__ volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
     return val;
 }
 
@@ -224,7 +224,7 @@ static inline int64_t cpu_get_real_ticks (void)
 {
 #if defined(_LP64)
     uint64_t        rval;
-    asm volatile("rd %%tick,%0" : "=r"(rval));
+    __asm__ volatile("rd %%tick,%0" : "=r"(rval));
     return rval;
 #else
     union {
@@ -234,8 +234,8 @@ static inline int64_t cpu_get_real_ticks (void)
             uint32_t low;
         }       i32;
     } rval;
-    asm volatile("rd %%tick,%1; srlx %1,32,%0"
-                 : "=r"(rval.i32.high), "=r"(rval.i32.low));
+    __asm__ volatile("rd %%tick,%1; srlx %1,32,%0"
+                     : "=r"(rval.i32.high), "=r"(rval.i32.low));
     return rval.i64;
 #endif
 }
@@ -277,7 +277,7 @@ static inline int64_t cpu_get_real_ticks(void)
     uint64_t cc;
     uint32_t cur, ofs;
 
-    asm volatile("rpcc %0" : "=r"(cc));
+    __asm__ volatile("rpcc %0" : "=r"(cc));
     cur = cc;
     ofs = cc >> 32;
     return cur - ofs;
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 5521709..2d4acaa 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -114,20 +114,20 @@ void host_cpuid(uint32_t function, uint32_t count,
     uint32_t vec[4];
 
 #ifdef __x86_64__
-    asm volatile("cpuid"
-                 : "=a"(vec[0]), "=b"(vec[1]),
-                   "=c"(vec[2]), "=d"(vec[3])
-                 : "0"(function), "c"(count) : "cc");
+    __asm__ volatile("cpuid"
+                     : "=a"(vec[0]), "=b"(vec[1]),
+                       "=c"(vec[2]), "=d"(vec[3])
+                     : "0"(function), "c"(count) : "cc");
 #else
-    asm volatile("pusha \n\t"
-                 "cpuid \n\t"
-                 "mov %%eax, 0(%2) \n\t"
-                 "mov %%ebx, 4(%2) \n\t"
-                 "mov %%ecx, 8(%2) \n\t"
-                 "mov %%edx, 12(%2) \n\t"
-                 "popa"
-                 : : "a"(function), "c"(count), "S"(vec)
-                 : "memory", "cc");
+    __asm__ volatile("pusha \n\t"
+                     "cpuid \n\t"
+                     "mov %%eax, 0(%2) \n\t"
+                     "mov %%ebx, 4(%2) \n\t"
+                     "mov %%ecx, 8(%2) \n\t"
+                     "mov %%edx, 12(%2) \n\t"
+                     "popa"
+                     : : "a"(function), "c"(count), "S"(vec)
+                     : "memory", "cc");
 #endif
 
     if (eax)
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index 829e180..249a18d 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -1105,8 +1105,8 @@ static inline uint32_t mfpvr(void)
 {
     uint32_t pvr;
 
-    asm ("mfpvr %0"
-         : "=r"(pvr));
+    __asm__ ("mfpvr %0"
+             : "=r"(pvr));
     return pvr;
 }
 
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index e2f8703..e83cece 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -113,7 +113,7 @@ static inline int kvmppc_fixup_cpu(CPUPPCState *env)
 #define kvmppc_eieio() \
     do {                                          \
         if (kvm_enabled()) {                          \
-            asm volatile("eieio" : : : "memory"); \
+            __asm__ volatile("eieio" : : : "memory"); \
         } \
     } while (0)
 #endif
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index f90b834..15dd35e 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -87,9 +87,9 @@ static inline void flush_icache_range(tcg_target_ulong start,
 #if QEMU_GNUC_PREREQ(4, 1)
     __builtin___clear_cache((char *) start, (char *) stop);
 #else
-    register unsigned long _beg __asm ("a1") = start;
-    register unsigned long _end __asm ("a2") = stop;
-    register unsigned long _flg __asm ("a3") = 0;
-    __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+    register unsigned long _beg __asm__ ("a1") = start;
+    register unsigned long _end __asm__ ("a2") = stop;
+    register unsigned long _flg __asm__ ("a3") = 0;
+    __asm__ __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" 
(_flg));
 #endif
 }
diff --git a/tcg/hppa/tcg-target.h b/tcg/hppa/tcg-target.h
index d4bf6fe..402bebf 100644
--- a/tcg/hppa/tcg-target.h
+++ b/tcg/hppa/tcg-target.h
@@ -113,11 +113,11 @@ static inline void flush_icache_range(tcg_target_ulong 
start,
 {
     start &= ~31;
     while (start <= stop) {
-        asm volatile ("fdc 0(%0)\n\t"
-                      "sync\n\t"
-                      "fic 0(%%sr4, %0)\n\t"
-                      "sync"
-                      : : "r"(start) : "memory");
+        __asm__ volatile ("fdc 0(%0)\n\t"
+                          "sync\n\t"
+                          "fic 0(%%sr4, %0)\n\t"
+                          "sync"
+                          : : "r"(start) : "memory");
         start += 32;
     }
 }
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
index 0631b9f..7b642e1 100644
--- a/tcg/ia64/tcg-target.h
+++ b/tcg/ia64/tcg-target.h
@@ -153,7 +153,7 @@ static inline void flush_icache_range(tcg_target_ulong 
start,
     stop = (stop + (32UL - 1UL)) & ~(32UL - 1UL);
 
     for (; start < stop; start += 32UL) {
-        asm volatile ("fc.i %0" :: "r" (start));
+        __asm__ volatile ("fc.i %0" :: "r" (start));
     }
-    asm volatile (";;sync.i;;srlz.i;;");
+    __asm__ volatile (";;sync.i;;srlz.i;;");
 }
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index 04662c1..ae368df 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -2233,8 +2233,8 @@ static void query_facilities(void)
        kernel-only, storing its results at absolute address 200.  */
     /* stfle 0(%r1) */
     r1 = &facilities;
-    asm volatile(".word 0xb2b0,0x1000"
-                 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
+    __asm__ volatile(".word 0xb2b0,0x1000"
+                     : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
 
     if (got_sigill) {
         /* STORE FACILITY EXTENDED is not available.  Probe for one of each
@@ -2246,7 +2246,7 @@ static void query_facilities(void)
         /* Test for z/Architecture.  Required even in 31-bit mode.  */
         got_sigill = 0;
         /* agr %r0,%r0 */
-        asm volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
+        __asm__ volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
         if (!got_sigill) {
             facilities |= FACILITY_ZARCH_ACTIVE;
         }
@@ -2255,8 +2255,8 @@ static void query_facilities(void)
         got_sigill = 0;
         /* ly %r0,0(%r1) */
         r1 = &facilities;
-        asm volatile(".word 0xe300,0x1000,0x0058"
-                     : "=r"(r0) : "r"(r1) : "cc");
+        __asm__ volatile(".word 0xe300,0x1000,0x0058"
+                         : "=r"(r0) : "r"(r1) : "cc");
         if (!got_sigill) {
             facilities |= FACILITY_LONG_DISP;
         }
@@ -2264,7 +2264,7 @@ static void query_facilities(void)
         /* Test for extended immediates.  */
         got_sigill = 0;
         /* afi %r0,0 */
-        asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
+        __asm__ volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
         if (!got_sigill) {
             facilities |= FACILITY_EXT_IMM;
         }
@@ -2272,7 +2272,7 @@ static void query_facilities(void)
         /* Test for general-instructions-extension.  */
         got_sigill = 0;
         /* msfi %r0,1 */
-        asm volatile(".word 0xc201,0x0000,0x0001");
+        __asm__ volatile(".word 0xc201,0x0000,0x0001");
         if (!got_sigill) {
             facilities |= FACILITY_GEN_INST_EXT;
         }
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 8386b70..3dcc4d7 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -2294,7 +2294,7 @@ struct jit_descriptor {
 void __jit_debug_register_code(void) __attribute__((noinline));
 void __jit_debug_register_code(void)
 {
-    asm("");
+    __asm__("");
 }
 
 /* Must statically initialize the version, because GDB may check
-- 
1.7.2.5




reply via email to

[Prev in Thread] Current Thread [Next in Thread]