Use __asm__ instead of asm.
One less macro to worry about in bcm.c.
Change-Id: I321084c0d4ed1bec38c541b04f5b3468350c6eaa
Reviewed-on: https://boringssl-review.googlesource.com/19565
Reviewed-by: David Benjamin <davidben@google.com>
Commit-Queue: David Benjamin <davidben@google.com>
CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
diff --git a/crypto/fipsmodule/bn/asm/x86_64-gcc.c b/crypto/fipsmodule/bn/asm/x86_64-gcc.c
index a65b86f..bfd770f 100644
--- a/crypto/fipsmodule/bn/asm/x86_64-gcc.c
+++ b/crypto/fipsmodule/bn/asm/x86_64-gcc.c
@@ -61,39 +61,37 @@
#undef mul
#undef mul_add
-#define asm __asm__
-
// "m"(a), "+m"(r) is the way to favor DirectPath ยต-code;
// "g"(0) let the compiler to decide where does it
// want to keep the value of zero;
-#define mul_add(r, a, word, carry) \
- do { \
- register BN_ULONG high, low; \
- asm("mulq %3" : "=a"(low), "=d"(high) : "a"(word), "m"(a) : "cc"); \
- asm("addq %2,%0; adcq %3,%1" \
- : "+r"(carry), "+d"(high) \
- : "a"(low), "g"(0) \
- : "cc"); \
- asm("addq %2,%0; adcq %3,%1" \
- : "+m"(r), "+d"(high) \
- : "r"(carry), "g"(0) \
- : "cc"); \
- (carry) = high; \
+#define mul_add(r, a, word, carry) \
+ do { \
+ register BN_ULONG high, low; \
+ __asm__("mulq %3" : "=a"(low), "=d"(high) : "a"(word), "m"(a) : "cc"); \
+ __asm__("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry), "+d"(high) \
+ : "a"(low), "g"(0) \
+ : "cc"); \
+ __asm__("addq %2,%0; adcq %3,%1" \
+ : "+m"(r), "+d"(high) \
+ : "r"(carry), "g"(0) \
+ : "cc"); \
+ (carry) = high; \
} while (0)
-#define mul(r, a, word, carry) \
- do { \
- register BN_ULONG high, low; \
- asm("mulq %3" : "=a"(low), "=d"(high) : "a"(word), "g"(a) : "cc"); \
- asm("addq %2,%0; adcq %3,%1" \
- : "+r"(carry), "+d"(high) \
- : "a"(low), "g"(0) \
- : "cc"); \
- (r) = (carry); \
- (carry) = high; \
+#define mul(r, a, word, carry) \
+ do { \
+ register BN_ULONG high, low; \
+ __asm__("mulq %3" : "=a"(low), "=d"(high) : "a"(word), "g"(a) : "cc"); \
+ __asm__("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry), "+d"(high) \
+ : "a"(low), "g"(0) \
+ : "cc"); \
+ (r) = (carry); \
+ (carry) = high; \
} while (0)
#undef sqr
-#define sqr(r0, r1, a) asm("mulq %2" : "=a"(r0), "=d"(r1) : "a"(a) : "cc");
+#define sqr(r0, r1, a) __asm__("mulq %2" : "=a"(r0), "=d"(r1) : "a"(a) : "cc");
BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
BN_ULONG w) {
@@ -194,7 +192,7 @@
return 0;
}
- asm volatile (
+ __asm__ volatile (
" subq %0,%0 \n" // clear carry
" jmp 1f \n"
".p2align 4 \n"
@@ -221,7 +219,7 @@
return 0;
}
- asm volatile (
+ __asm__ volatile (
" subq %0,%0 \n" // clear borrow
" jmp 1f \n"
".p2align 4 \n"
@@ -246,38 +244,38 @@
// Keep in mind that carrying into high part of multiplication result can not
// overflow, because it cannot be all-ones.
-#define mul_add_c(a, b, c0, c1, c2) \
- do { \
- BN_ULONG t1, t2; \
- asm("mulq %3" : "=a"(t1), "=d"(t2) : "a"(a), "m"(b) : "cc"); \
- asm("addq %3,%0; adcq %4,%1; adcq %5,%2" \
- : "+r"(c0), "+r"(c1), "+r"(c2) \
- : "r"(t1), "r"(t2), "g"(0) \
- : "cc"); \
+#define mul_add_c(a, b, c0, c1, c2) \
+ do { \
+ BN_ULONG t1, t2; \
+ __asm__("mulq %3" : "=a"(t1), "=d"(t2) : "a"(a), "m"(b) : "cc"); \
+ __asm__("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0), "+r"(c1), "+r"(c2) \
+ : "r"(t1), "r"(t2), "g"(0) \
+ : "cc"); \
} while (0)
-#define sqr_add_c(a, i, c0, c1, c2) \
- do { \
- BN_ULONG t1, t2; \
- asm("mulq %2" : "=a"(t1), "=d"(t2) : "a"((a)[i]) : "cc"); \
- asm("addq %3,%0; adcq %4,%1; adcq %5,%2" \
- : "+r"(c0), "+r"(c1), "+r"(c2) \
- : "r"(t1), "r"(t2), "g"(0) \
- : "cc"); \
+#define sqr_add_c(a, i, c0, c1, c2) \
+ do { \
+ BN_ULONG t1, t2; \
+ __asm__("mulq %2" : "=a"(t1), "=d"(t2) : "a"((a)[i]) : "cc"); \
+ __asm__("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0), "+r"(c1), "+r"(c2) \
+ : "r"(t1), "r"(t2), "g"(0) \
+ : "cc"); \
} while (0)
-#define mul_add_c2(a, b, c0, c1, c2) \
- do { \
- BN_ULONG t1, t2; \
- asm("mulq %3" : "=a"(t1), "=d"(t2) : "a"(a), "m"(b) : "cc"); \
- asm("addq %3,%0; adcq %4,%1; adcq %5,%2" \
- : "+r"(c0), "+r"(c1), "+r"(c2) \
- : "r"(t1), "r"(t2), "g"(0) \
- : "cc"); \
- asm("addq %3,%0; adcq %4,%1; adcq %5,%2" \
- : "+r"(c0), "+r"(c1), "+r"(c2) \
- : "r"(t1), "r"(t2), "g"(0) \
- : "cc"); \
+#define mul_add_c2(a, b, c0, c1, c2) \
+ do { \
+ BN_ULONG t1, t2; \
+ __asm__("mulq %3" : "=a"(t1), "=d"(t2) : "a"(a), "m"(b) : "cc"); \
+ __asm__("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0), "+r"(c1), "+r"(c2) \
+ : "r"(t1), "r"(t2), "g"(0) \
+ : "cc"); \
+ __asm__("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0), "+r"(c1), "+r"(c2) \
+ : "r"(t1), "r"(t2), "g"(0) \
+ : "cc"); \
} while (0)
#define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)