Modernize OPENSSL_COMPILE_ASSERT, part 2.
The change seems to have stuck, so bring us closer to C/++11 static asserts.
(If we later find we need to support worse toolchains, we can always use
__LINE__ or __COUNTER__ to avoid duplicate typedef names and just punt on
embedding the message into the type name.)
Change-Id: I0e5bb1106405066f07740728e19ebe13cae3e0ee
Reviewed-on: https://boringssl-review.googlesource.com/c/33145
Commit-Queue: Adam Langley <agl@google.com>
Reviewed-by: Adam Langley <agl@google.com>
CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
diff --git a/crypto/asn1/a_enum.c b/crypto/asn1/a_enum.c
index 4a77971..11e60ac 100644
--- a/crypto/asn1/a_enum.c
+++ b/crypto/asn1/a_enum.c
@@ -120,8 +120,8 @@
else if (i != V_ASN1_ENUMERATED)
return -1;
- OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) >= sizeof(long),
- long_larger_than_uint64_t);
+ OPENSSL_STATIC_ASSERT(sizeof(uint64_t) >= sizeof(long),
+ "long larger than uint64_t");
if (a->length > (int)sizeof(uint64_t)) {
/* hmm... a bit ugly */
diff --git a/crypto/asn1/a_int.c b/crypto/asn1/a_int.c
index dd74550..6dc18ba 100644
--- a/crypto/asn1/a_int.c
+++ b/crypto/asn1/a_int.c
@@ -400,8 +400,8 @@
else if (i != V_ASN1_INTEGER)
return -1;
- OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) >= sizeof(long),
- long_larger_than_uint64_t);
+ OPENSSL_STATIC_ASSERT(sizeof(uint64_t) >= sizeof(long),
+ "long larger than uint64_t");
if (a->length > (int)sizeof(uint64_t)) {
/* hmm... a bit ugly, return all ones */
diff --git a/crypto/base64/base64.c b/crypto/base64/base64.c
index b701b0d..349452d 100644
--- a/crypto/base64/base64.c
+++ b/crypto/base64/base64.c
@@ -98,8 +98,8 @@
return ret;
}
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_ENCODE_CTX *)(NULL))->data) % 3 == 0,
- data_length_must_be_multiple_of_base64_chunk_size);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_ENCODE_CTX *)(NULL))->data) % 3 == 0,
+ "data length must be a multiple of base64 chunk size");
int EVP_EncodedLength(size_t *out_len, size_t len) {
if (len + 2 < len) {
diff --git a/crypto/cipher_extra/e_aesccm.c b/crypto/cipher_extra/e_aesccm.c
index 37a9add..3e18659 100644
--- a/crypto/cipher_extra/e_aesccm.c
+++ b/crypto/cipher_extra/e_aesccm.c
@@ -33,13 +33,13 @@
CCM128_CONTEXT ccm;
};
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
- sizeof(struct aead_aes_ccm_ctx),
- AEAD_state_too_small);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
+ sizeof(struct aead_aes_ccm_ctx),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
-OPENSSL_COMPILE_ASSERT(alignof(union evp_aead_ctx_st_state) >=
- alignof(struct aead_aes_ccm_ctx),
- AEAD_state_insufficient_alignment);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
+ alignof(struct aead_aes_ccm_ctx),
+ "AEAD state has insufficient alignment");
#endif
static int aead_aes_ccm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
diff --git a/crypto/cipher_extra/e_aesctrhmac.c b/crypto/cipher_extra/e_aesctrhmac.c
index 54a50ec..8c45c81 100644
--- a/crypto/cipher_extra/e_aesctrhmac.c
+++ b/crypto/cipher_extra/e_aesctrhmac.c
@@ -35,13 +35,13 @@
SHA256_CTX outer_init_state;
};
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
- sizeof(struct aead_aes_ctr_hmac_sha256_ctx),
- AEAD_state_too_small);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
+ sizeof(struct aead_aes_ctr_hmac_sha256_ctx),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
-OPENSSL_COMPILE_ASSERT(alignof(union evp_aead_ctx_st_state) >=
- alignof(struct aead_aes_ctr_hmac_sha256_ctx),
- AEAD_state_insufficient_alignment);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
+ alignof(struct aead_aes_ctr_hmac_sha256_ctx),
+ "AEAD state has insufficient alignment");
#endif
static void hmac_init(SHA256_CTX *out_inner, SHA256_CTX *out_outer,
diff --git a/crypto/cipher_extra/e_aesgcmsiv.c b/crypto/cipher_extra/e_aesgcmsiv.c
index bf6c530..1deb918 100644
--- a/crypto/cipher_extra/e_aesgcmsiv.c
+++ b/crypto/cipher_extra/e_aesgcmsiv.c
@@ -38,12 +38,12 @@
// The assembly code assumes 8-byte alignment of the EVP_AEAD_CTX's state, and
// aligns to 16 bytes itself.
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) + 8 >=
- sizeof(struct aead_aes_gcm_siv_asm_ctx),
- AEAD_state_too_small_opt);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) + 8 >=
+ sizeof(struct aead_aes_gcm_siv_asm_ctx),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
-OPENSSL_COMPILE_ASSERT(alignof(union evp_aead_ctx_st_state) >= 8,
- AEAD_state_insufficient_alignment_opt);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >= 8,
+ "AEAD state has insufficient alignment");
#endif
// asm_ctx_from_ctx returns a 16-byte aligned context pointer from |ctx|.
@@ -560,13 +560,13 @@
unsigned is_256:1;
};
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
- sizeof(struct aead_aes_gcm_siv_ctx),
- AEAD_state_too_small);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
+ sizeof(struct aead_aes_gcm_siv_ctx),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
-OPENSSL_COMPILE_ASSERT(alignof(union evp_aead_ctx_st_state) >=
- alignof(struct aead_aes_gcm_siv_ctx),
- AEAD_state_insufficient_alignment);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
+ alignof(struct aead_aes_gcm_siv_ctx),
+ "AEAD state has insufficient alignment");
#endif
static int aead_aes_gcm_siv_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
diff --git a/crypto/cipher_extra/e_chacha20poly1305.c b/crypto/cipher_extra/e_chacha20poly1305.c
index 5aee4ae..1c175e9 100644
--- a/crypto/cipher_extra/e_chacha20poly1305.c
+++ b/crypto/cipher_extra/e_chacha20poly1305.c
@@ -35,13 +35,13 @@
uint8_t key[32];
};
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
- sizeof(struct aead_chacha20_poly1305_ctx),
- AEAD_state_too_small);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
+ sizeof(struct aead_chacha20_poly1305_ctx),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
-OPENSSL_COMPILE_ASSERT(alignof(union evp_aead_ctx_st_state) >=
- alignof(struct aead_chacha20_poly1305_ctx),
- AEAD_state_insufficient_alignment);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
+ alignof(struct aead_chacha20_poly1305_ctx),
+ "AEAD state has insufficient alignment");
#endif
// For convenience (the x86_64 calling convention allows only six parameters in
@@ -78,9 +78,9 @@
return sse41_capable;
}
-OPENSSL_COMPILE_ASSERT(sizeof(union open_data) == 48, wrong_open_data_size);
-OPENSSL_COMPILE_ASSERT(sizeof(union seal_data) == 48 + 8 + 8,
- wrong_seal_data_size);
+OPENSSL_STATIC_ASSERT(sizeof(union open_data) == 48, "wrong open_data size");
+OPENSSL_STATIC_ASSERT(sizeof(union seal_data) == 48 + 8 + 8,
+ "wrong seal_data size");
// chacha20_poly1305_open is defined in chacha20_poly1305_x86_64.pl. It decrypts
// |plaintext_len| bytes from |ciphertext| and writes them to |out_plaintext|.
diff --git a/crypto/cipher_extra/e_tls.c b/crypto/cipher_extra/e_tls.c
index 1f1fc3a..ff41989 100644
--- a/crypto/cipher_extra/e_tls.c
+++ b/crypto/cipher_extra/e_tls.c
@@ -42,15 +42,16 @@
char implicit_iv;
} AEAD_TLS_CTX;
-OPENSSL_COMPILE_ASSERT(EVP_MAX_MD_SIZE < 256, mac_key_len_fits_in_uint8_t);
+OPENSSL_STATIC_ASSERT(EVP_MAX_MD_SIZE < 256,
+ "mac_key_len does not fit in uint8_t");
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
- sizeof(AEAD_TLS_CTX),
- AEAD_state_too_small);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
+ sizeof(AEAD_TLS_CTX),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
-OPENSSL_COMPILE_ASSERT(alignof(union evp_aead_ctx_st_state) >=
- alignof(AEAD_TLS_CTX),
- AEAD_state_insufficient_alignment);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
+ alignof(AEAD_TLS_CTX),
+ "AEAD state has insufficient alignment");
#endif
static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
diff --git a/crypto/err/err_data_generate.go b/crypto/err/err_data_generate.go
index 893ebff..da965df 100644
--- a/crypto/err/err_data_generate.go
+++ b/crypto/err/err_data_generate.go
@@ -275,9 +275,9 @@
`)
for i, name := range libraryNames {
- fmt.Fprintf(out, "OPENSSL_COMPILE_ASSERT(ERR_LIB_%s == %d, library_values_changed_%d);\n", name, i+1, i+1)
+ fmt.Fprintf(out, "OPENSSL_STATIC_ASSERT(ERR_LIB_%s == %d, \"library value changed\");\n", name, i+1)
}
- fmt.Fprintf(out, "OPENSSL_COMPILE_ASSERT(ERR_NUM_LIBS == %d, library_values_changed_num);\n", len(libraryNames)+1)
+ fmt.Fprintf(out, "OPENSSL_STATIC_ASSERT(ERR_NUM_LIBS == %d, \"number of libraries changed\");\n", len(libraryNames)+1)
out.WriteString("\n")
e.reasons.WriteTo(out, "Reason")
diff --git a/crypto/evp/scrypt.c b/crypto/evp/scrypt.c
index 53a4554..2feb650 100644
--- a/crypto/evp/scrypt.c
+++ b/crypto/evp/scrypt.c
@@ -30,7 +30,7 @@
// A block_t is a Salsa20 block.
typedef struct { uint32_t words[16]; } block_t;
-OPENSSL_COMPILE_ASSERT(sizeof(block_t) == 64, block_t_has_padding);
+OPENSSL_STATIC_ASSERT(sizeof(block_t) == 64, "block_t has padding");
#define R(a, b) (((a) << (b)) | ((a) >> (32 - (b))))
@@ -173,7 +173,7 @@
// Allocate and divide up the scratch space. |max_mem| fits in a size_t, which
// is no bigger than uint64_t, so none of these operations may overflow.
- OPENSSL_COMPILE_ASSERT(UINT64_MAX >= ((size_t)-1), size_t_exceeds_u64);
+ OPENSSL_STATIC_ASSERT(UINT64_MAX >= ((size_t)-1), "size_t exceeds uint64_t");
size_t B_blocks = p * 2 * r;
size_t B_bytes = B_blocks * sizeof(block_t);
size_t T_blocks = 2 * r;
diff --git a/crypto/fipsmodule/bn/bn.c b/crypto/fipsmodule/bn/bn.c
index c020d96..51b828a 100644
--- a/crypto/fipsmodule/bn/bn.c
+++ b/crypto/fipsmodule/bn/bn.c
@@ -406,8 +406,8 @@
void bn_select_words(BN_ULONG *r, BN_ULONG mask, const BN_ULONG *a,
const BN_ULONG *b, size_t num) {
for (size_t i = 0; i < num; i++) {
- OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
- crypto_word_t_too_small);
+ OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
r[i] = constant_time_select_w(mask, a[i], b[i]);
}
}
diff --git a/crypto/fipsmodule/bn/cmp.c b/crypto/fipsmodule/bn/cmp.c
index 692adb5..fe478b6 100644
--- a/crypto/fipsmodule/bn/cmp.c
+++ b/crypto/fipsmodule/bn/cmp.c
@@ -65,8 +65,8 @@
static int bn_cmp_words_consttime(const BN_ULONG *a, size_t a_len,
const BN_ULONG *b, size_t b_len) {
- OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
- crypto_word_t_too_small);
+ OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
int ret = 0;
// Process the common words in little-endian order.
size_t min = a_len < b_len ? a_len : b_len;
diff --git a/crypto/fipsmodule/bn/montgomery.c b/crypto/fipsmodule/bn/montgomery.c
index 851c0a0..006d2b2 100644
--- a/crypto/fipsmodule/bn/montgomery.c
+++ b/crypto/fipsmodule/bn/montgomery.c
@@ -167,11 +167,6 @@
return to;
}
-OPENSSL_COMPILE_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2,
- BN_MONT_CTX_N0_LIMBS_VALUE_INVALID);
-OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) * BN_MONT_CTX_N0_LIMBS ==
- sizeof(uint64_t), BN_MONT_CTX_set_64_bit_mismatch);
-
static int bn_mont_ctx_set_N_and_n0(BN_MONT_CTX *mont, const BIGNUM *mod) {
if (BN_is_zero(mod)) {
OPENSSL_PUT_ERROR(BN, BN_R_DIV_BY_ZERO);
@@ -202,6 +197,11 @@
// others, we could use a shorter R value and use faster |BN_ULONG|-based
// math instead of |uint64_t|-based math, which would be double-precision.
// However, currently only the assembler files know which is which.
+ OPENSSL_STATIC_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2,
+ "BN_MONT_CTX_N0_LIMBS value is invalid");
+ OPENSSL_STATIC_ASSERT(
+ sizeof(BN_ULONG) * BN_MONT_CTX_N0_LIMBS == sizeof(uint64_t),
+ "uint64_t is insufficient precision for n0");
uint64_t n0 = bn_mont_n0(&mont->N);
mont->n0[0] = (BN_ULONG)n0;
#if BN_MONT_CTX_N0_LIMBS == 2
diff --git a/crypto/fipsmodule/bn/montgomery_inv.c b/crypto/fipsmodule/bn/montgomery_inv.c
index 94d99e8..c80873f 100644
--- a/crypto/fipsmodule/bn/montgomery_inv.c
+++ b/crypto/fipsmodule/bn/montgomery_inv.c
@@ -22,11 +22,11 @@
static uint64_t bn_neg_inv_mod_r_u64(uint64_t n);
-OPENSSL_COMPILE_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2,
- BN_MONT_CTX_N0_LIMBS_VALUE_INVALID_2);
-OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) ==
- BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG),
- BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T);
+OPENSSL_STATIC_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2,
+ "BN_MONT_CTX_N0_LIMBS value is invalid");
+OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) * BN_MONT_CTX_N0_LIMBS ==
+ sizeof(uint64_t),
+ "uint64_t is insufficient precision for n0");
// LG_LITTLE_R is log_2(r).
#define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2)
diff --git a/crypto/fipsmodule/bn/mul.c b/crypto/fipsmodule/bn/mul.c
index a1582a2..640d8cd 100644
--- a/crypto/fipsmodule/bn/mul.c
+++ b/crypto/fipsmodule/bn/mul.c
@@ -409,8 +409,8 @@
BN_ULONG c_neg = c - bn_sub_words(&t[n2 * 2], t, &t[n2], n2);
BN_ULONG c_pos = c + bn_add_words(&t[n2], t, &t[n2], n2);
bn_select_words(&t[n2], neg, &t[n2 * 2], &t[n2], n2);
- OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
- crypto_word_t_too_small);
+ OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
c = constant_time_select_w(neg, c_neg, c_pos);
// We now have our three components. Add them together.
@@ -523,8 +523,8 @@
BN_ULONG c_neg = c - bn_sub_words(&t[n2 * 2], t, &t[n2], n2);
BN_ULONG c_pos = c + bn_add_words(&t[n2], t, &t[n2], n2);
bn_select_words(&t[n2], neg, &t[n2 * 2], &t[n2], n2);
- OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
- crypto_word_t_too_small);
+ OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
c = constant_time_select_w(neg, c_neg, c_pos);
// We now have our three components. Add them together.
diff --git a/crypto/fipsmodule/bn/random.c b/crypto/fipsmodule/bn/random.c
index e41a0ef..f6812f1 100644
--- a/crypto/fipsmodule/bn/random.c
+++ b/crypto/fipsmodule/bn/random.c
@@ -195,8 +195,8 @@
}
// |a| < |b| iff a[1..len-1] are all zero and a[0] < b.
- OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
- crypto_word_t_too_small);
+ OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
crypto_word_t mask = 0;
for (size_t i = 1; i < len; i++) {
mask |= a[i];
diff --git a/crypto/fipsmodule/bn/rsaz_exp.c b/crypto/fipsmodule/bn/rsaz_exp.c
index 3f355b6..64dfff8 100644
--- a/crypto/fipsmodule/bn/rsaz_exp.c
+++ b/crypto/fipsmodule/bn/rsaz_exp.c
@@ -45,11 +45,13 @@
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
void RSAZ_1024_mod_exp_avx2(BN_ULONG result_norm[16],
- const BN_ULONG base_norm[16], const BN_ULONG exponent[16],
- const BN_ULONG m_norm[16], const BN_ULONG RR[16], BN_ULONG k0,
- BN_ULONG storage_words[MOD_EXP_CTIME_STORAGE_LEN]) {
- OPENSSL_COMPILE_ASSERT(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH % 64 == 0,
- MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH_is_large_enough);
+ const BN_ULONG base_norm[16],
+ const BN_ULONG exponent[16],
+ const BN_ULONG m_norm[16], const BN_ULONG RR[16],
+ BN_ULONG k0,
+ BN_ULONG storage_words[MOD_EXP_CTIME_STORAGE_LEN]) {
+ OPENSSL_STATIC_ASSERT(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH % 64 == 0,
+ "MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH is too small");
unsigned char *storage = (unsigned char *)storage_words;
assert((uintptr_t)storage % 64 == 0);
diff --git a/crypto/fipsmodule/bn/shift.c b/crypto/fipsmodule/bn/shift.c
index ccf7141..523da67 100644
--- a/crypto/fipsmodule/bn/shift.c
+++ b/crypto/fipsmodule/bn/shift.c
@@ -296,15 +296,15 @@
}
static int bn_count_low_zero_bits_word(BN_ULONG l) {
- OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
- crypto_word_t_too_small);
- OPENSSL_COMPILE_ASSERT(sizeof(int) <= sizeof(crypto_word_t),
- crypto_word_t_too_small_2);
- OPENSSL_COMPILE_ASSERT(BN_BITS2 == sizeof(BN_ULONG) * 8,
- bn_ulong_has_padding_bits);
+ OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
+ OPENSSL_STATIC_ASSERT(sizeof(int) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
+ OPENSSL_STATIC_ASSERT(BN_BITS2 == sizeof(BN_ULONG) * 8,
+ "BN_ULONG has padding bits");
// C has very bizarre rules for types smaller than an int.
- OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) >= sizeof(int),
- bn_ulong_is_promoted_to_int);
+ OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) >= sizeof(int),
+ "BN_ULONG gets promoted to int");
crypto_word_t mask;
int bits = 0;
@@ -342,10 +342,10 @@
}
int BN_count_low_zero_bits(const BIGNUM *bn) {
- OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
- crypto_word_t_too_small);
- OPENSSL_COMPILE_ASSERT(sizeof(int) <= sizeof(crypto_word_t),
- crypto_word_t_too_small_2);
+ OPENSSL_STATIC_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
+ OPENSSL_STATIC_ASSERT(sizeof(int) <= sizeof(crypto_word_t),
+ "crypto_word_t is too small");
int ret = 0;
crypto_word_t saw_nonzero = 0;
diff --git a/crypto/fipsmodule/cipher/e_aes.c b/crypto/fipsmodule/cipher/e_aes.c
index a24515a..068465b 100644
--- a/crypto/fipsmodule/cipher/e_aes.c
+++ b/crypto/fipsmodule/cipher/e_aes.c
@@ -906,13 +906,13 @@
return 1;
}
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
- sizeof(struct aead_aes_gcm_ctx),
- AEAD_state_too_small);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
+ sizeof(struct aead_aes_gcm_ctx),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
- OPENSSL_COMPILE_ASSERT(
- alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_aes_gcm_ctx),
- AEAD_state_insufficient_alignment);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
+ alignof(struct aead_aes_gcm_ctx),
+ "AEAD state has insufficient alignment");
#endif
static int aead_aes_gcm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
@@ -1079,13 +1079,13 @@
uint64_t min_next_nonce;
};
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
- sizeof(struct aead_aes_gcm_tls12_ctx),
- AEAD_state_too_small_tls12);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
+ sizeof(struct aead_aes_gcm_tls12_ctx),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
-OPENSSL_COMPILE_ASSERT(alignof(union evp_aead_ctx_st_state) >=
- alignof(struct aead_aes_gcm_tls12_ctx),
- AEAD_state_insufficient_alignment_tls12);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
+ alignof(struct aead_aes_gcm_tls12_ctx),
+ "AEAD state has insufficient alignment");
#endif
static int aead_aes_gcm_tls12_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
@@ -1173,13 +1173,13 @@
uint8_t first;
};
-OPENSSL_COMPILE_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
- sizeof(struct aead_aes_gcm_tls13_ctx),
- AEAD_state_too_small_tls13);
+OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
+ sizeof(struct aead_aes_gcm_tls13_ctx),
+ "AEAD state is too small");
#if defined(__GNUC__) || defined(__clang__)
-OPENSSL_COMPILE_ASSERT(alignof(union evp_aead_ctx_st_state) >=
- alignof(struct aead_aes_gcm_tls13_ctx),
- AEAD_state_insufficient_alignment_tls13);
+OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
+ alignof(struct aead_aes_gcm_tls13_ctx),
+ "AEAD state has insufficient alignment");
#endif
static int aead_aes_gcm_tls13_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
diff --git a/crypto/fipsmodule/ec/internal.h b/crypto/fipsmodule/ec/internal.h
index d78d719..05175a5 100644
--- a/crypto/fipsmodule/ec/internal.h
+++ b/crypto/fipsmodule/ec/internal.h
@@ -88,8 +88,8 @@
#define EC_MAX_BYTES 66
#define EC_MAX_WORDS ((EC_MAX_BYTES + BN_BYTES - 1) / BN_BYTES)
-OPENSSL_COMPILE_ASSERT(EC_MAX_WORDS <= BN_SMALL_MAX_WORDS,
- bn_small_functions_applicable);
+OPENSSL_STATIC_ASSERT(EC_MAX_WORDS <= BN_SMALL_MAX_WORDS,
+ "bn_*_small functions not usable");
// An EC_SCALAR is an integer fully reduced modulo the order. Only the first
// |order->width| words are used. An |EC_SCALAR| is specific to an |EC_GROUP|
diff --git a/crypto/fipsmodule/ecdsa/ecdsa.c b/crypto/fipsmodule/ecdsa/ecdsa.c
index e1395b7..010ee02 100644
--- a/crypto/fipsmodule/ecdsa/ecdsa.c
+++ b/crypto/fipsmodule/ecdsa/ecdsa.c
@@ -211,8 +211,8 @@
} else {
// Pass a SHA512 hash of the private key and digest as additional data
// into the RBG. This is a hardening measure against entropy failure.
- OPENSSL_COMPILE_ASSERT(SHA512_DIGEST_LENGTH >= 32,
- additional_data_is_too_large_for_sha512);
+ OPENSSL_STATIC_ASSERT(SHA512_DIGEST_LENGTH >= 32,
+ "additional_data is too large for SHA-512");
SHA512_CTX sha;
uint8_t additional_data[SHA512_DIGEST_LENGTH];
SHA512_Init(&sha);
diff --git a/crypto/fipsmodule/modes/cfb.c b/crypto/fipsmodule/modes/cfb.c
index d3a38d6..0a81f3b 100644
--- a/crypto/fipsmodule/modes/cfb.c
+++ b/crypto/fipsmodule/modes/cfb.c
@@ -54,7 +54,8 @@
#include "internal.h"
-OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size_cfb);
+OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0,
+ "block cannot be divided into size_t");
void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const AES_KEY *key, uint8_t ivec[16], unsigned *num,
diff --git a/crypto/fipsmodule/modes/ctr.c b/crypto/fipsmodule/modes/ctr.c
index 5093408..b806b9a 100644
--- a/crypto/fipsmodule/modes/ctr.c
+++ b/crypto/fipsmodule/modes/ctr.c
@@ -69,7 +69,8 @@
} while (n);
}
-OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size_ctr);
+OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0,
+ "block cannot be divided into size_t");
// The input encrypted as though 128bit counter mode is being used. The extra
// state information to record how much of the 128bit block we have used is
diff --git a/crypto/fipsmodule/modes/ofb.c b/crypto/fipsmodule/modes/ofb.c
index b1b4d87..4c70ce6 100644
--- a/crypto/fipsmodule/modes/ofb.c
+++ b/crypto/fipsmodule/modes/ofb.c
@@ -54,7 +54,8 @@
#include "internal.h"
-OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size_ofb);
+OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0,
+ "block cannot be divided into size_t");
void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const AES_KEY *key, uint8_t ivec[16], unsigned *num,
diff --git a/crypto/fipsmodule/rand/ctrdrbg.c b/crypto/fipsmodule/rand/ctrdrbg.c
index f2fe8b3..b2fda1d 100644
--- a/crypto/fipsmodule/rand/ctrdrbg.c
+++ b/crypto/fipsmodule/rand/ctrdrbg.c
@@ -64,8 +64,8 @@
return 1;
}
-OPENSSL_COMPILE_ASSERT(CTR_DRBG_ENTROPY_LEN % AES_BLOCK_SIZE == 0,
- not_a_multiple_of_block_size);
+OPENSSL_STATIC_ASSERT(CTR_DRBG_ENTROPY_LEN % AES_BLOCK_SIZE == 0,
+ "not a multiple of AES block size");
// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a
// big-endian number.
diff --git a/crypto/refcount_lock.c b/crypto/refcount_lock.c
index 8b855d6..fb1c11f 100644
--- a/crypto/refcount_lock.c
+++ b/crypto/refcount_lock.c
@@ -21,8 +21,8 @@
#if !defined(OPENSSL_C11_ATOMIC)
-OPENSSL_COMPILE_ASSERT((CRYPTO_refcount_t)-1 == CRYPTO_REFCOUNT_MAX,
- CRYPTO_REFCOUNT_MAX_is_incorrect);
+OPENSSL_STATIC_ASSERT((CRYPTO_refcount_t)-1 == CRYPTO_REFCOUNT_MAX,
+ "CRYPTO_REFCOUNT_MAX is incorrect");
static struct CRYPTO_STATIC_MUTEX g_refcount_lock = CRYPTO_STATIC_MUTEX_INIT;
diff --git a/crypto/thread_pthread.c b/crypto/thread_pthread.c
index f8bf595..832e90e 100644
--- a/crypto/thread_pthread.c
+++ b/crypto/thread_pthread.c
@@ -24,8 +24,8 @@
#include <openssl/type_check.h>
-OPENSSL_COMPILE_ASSERT(sizeof(CRYPTO_MUTEX) >= sizeof(pthread_rwlock_t),
- CRYPTO_MUTEX_too_small);
+OPENSSL_STATIC_ASSERT(sizeof(CRYPTO_MUTEX) >= sizeof(pthread_rwlock_t),
+ "CRYPTO_MUTEX is too small");
void CRYPTO_MUTEX_init(CRYPTO_MUTEX *lock) {
if (pthread_rwlock_init((pthread_rwlock_t *) lock, NULL) != 0) {
diff --git a/crypto/thread_win.c b/crypto/thread_win.c
index 248870a..8b2b2da 100644
--- a/crypto/thread_win.c
+++ b/crypto/thread_win.c
@@ -27,8 +27,8 @@
#include <openssl/type_check.h>
-OPENSSL_COMPILE_ASSERT(sizeof(CRYPTO_MUTEX) >= sizeof(SRWLOCK),
- CRYPTO_MUTEX_too_small);
+OPENSSL_STATIC_ASSERT(sizeof(CRYPTO_MUTEX) >= sizeof(SRWLOCK),
+ "CRYPTO_MUTEX is too small");
static BOOL CALLBACK call_once_init(INIT_ONCE *once, void *arg, void **out) {
void (**init)(void) = (void (**)(void))arg;
diff --git a/include/openssl/ssl3.h b/include/openssl/ssl3.h
index 67d06f4..e3910f0 100644
--- a/include/openssl/ssl3.h
+++ b/include/openssl/ssl3.h
@@ -251,9 +251,9 @@
#define SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
(EVP_AEAD_MAX_OVERHEAD + EVP_AEAD_MAX_NONCE_LENGTH)
-OPENSSL_COMPILE_ASSERT(
- SSL3_RT_MAX_ENCRYPTED_OVERHEAD >= SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD,
- max_overheads_are_consistent);
+OPENSSL_STATIC_ASSERT(SSL3_RT_MAX_ENCRYPTED_OVERHEAD >=
+ SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD,
+ "max overheads are inconsistent");
// SSL3_RT_MAX_COMPRESSED_LENGTH is an alias for
// |SSL3_RT_MAX_PLAIN_LENGTH|. Compression is gone, so don't include the
diff --git a/include/openssl/stack.h b/include/openssl/stack.h
index c1bf520..924228a 100644
--- a/include/openssl/stack.h
+++ b/include/openssl/stack.h
@@ -416,9 +416,9 @@
// DEFINE_SPECIAL_STACK_OF defines |STACK_OF(type)| to be a stack whose elements
// are |type|, where |type| must be a typedef for a pointer.
-#define DEFINE_SPECIAL_STACK_OF(type) \
- OPENSSL_COMPILE_ASSERT(sizeof(type) == sizeof(void *), \
- special_stack_of_non_pointer_##type); \
+#define DEFINE_SPECIAL_STACK_OF(type) \
+ OPENSSL_STATIC_ASSERT(sizeof(type) == sizeof(void *), \
+ #type " is not a pointer"); \
BORINGSSL_DEFINE_STACK_OF_IMPL(type, type, const type)
diff --git a/include/openssl/type_check.h b/include/openssl/type_check.h
index 90d81f7..c267938 100644
--- a/include/openssl/type_check.h
+++ b/include/openssl/type_check.h
@@ -64,19 +64,15 @@
#endif
-// TODO(davidben): |OPENSSL_COMPILE_ASSERT| used to be implemented with a
-// typedef, so the |msg| parameter is a token. It now requires C11 or C++11
-// static asserts. If this change survives to 2018-11-05, switch the parameter
-// to a string. (Maybe rename to |OPENSSL_STATIC_ASSERT| while we're at it.)
#if defined(__cplusplus) || (defined(_MSC_VER) && !defined(__clang__))
// In C++ and non-clang MSVC, |static_assert| is a keyword.
-#define OPENSSL_COMPILE_ASSERT(cond, msg) static_assert(cond, #msg)
+#define OPENSSL_STATIC_ASSERT(cond, msg) static_assert(cond, msg)
#else
// C11 defines the |_Static_assert| keyword and the |static_assert| macro in
// assert.h. While the former is available at all versions in Clang and GCC, the
// later depends on libc and, in glibc, depends on being built in C11 mode. We
// do not require this, for now, so use |_Static_assert| directly.
-#define OPENSSL_COMPILE_ASSERT(cond, msg) _Static_assert(cond, #msg)
+#define OPENSSL_STATIC_ASSERT(cond, msg) _Static_assert(cond, msg)
#endif
// CHECKED_CAST casts |p| from type |from| to type |to|.
diff --git a/third_party/fiat/curve25519.c b/third_party/fiat/curve25519.c
index 58a5ed0..15623c6 100644
--- a/third_party/fiat/curve25519.c
+++ b/third_party/fiat/curve25519.c
@@ -1396,8 +1396,8 @@
}
static void fe_copy_lt(fe_loose *h, const fe *f) {
- OPENSSL_COMPILE_ASSERT(sizeof(fe_loose) == sizeof(fe),
- fe_and_fe_loose_mismatch);
+ OPENSSL_STATIC_ASSERT(sizeof(fe_loose) == sizeof(fe),
+ "fe and fe_loose mismatch");
OPENSSL_memmove(h, f, sizeof(fe));
}
#if !defined(OPENSSL_SMALL)
diff --git a/third_party/fiat/p256.c b/third_party/fiat/p256.c
index 893c9d4..414b7e0 100644
--- a/third_party/fiat/p256.c
+++ b/third_party/fiat/p256.c
@@ -903,9 +903,9 @@
static void fe_to_generic(EC_FELEM *out, const fe in) {
// This works because 256 is a multiple of 64, so there are no excess bytes to
// zero when rounding up to |BN_ULONG|s.
- OPENSSL_COMPILE_ASSERT(
+ OPENSSL_STATIC_ASSERT(
256 / 8 == sizeof(BN_ULONG) * ((256 + BN_BITS2 - 1) / BN_BITS2),
- bytes_left_over);
+ "fe_tobytes leaves bytes uninitialized");
fe_tobytes(out->bytes, in);
}