Replace some more C unions. I don't think these are all UB by C's rules, but it's easier not to think about the pointers. Still more to go, but these were some easy ones. Bug: 301 Change-Id: Icdcb7fb40f85983cbf566786c5f7dbfd7bb06571 Reviewed-on: https://boringssl-review.googlesource.com/c/boringssl/+/52905 Reviewed-by: Bob Beck <bbe@google.com> Commit-Queue: Bob Beck <bbe@google.com>
diff --git a/crypto/bytestring/cbb.c b/crypto/bytestring/cbb.c index 12587cd..6ce20ad 100644 --- a/crypto/bytestring/cbb.c +++ b/crypto/bytestring/cbb.c
@@ -542,14 +542,11 @@ return CBB_add_asn1_uint64(cbb, value); } - union { - int64_t i; - uint8_t bytes[sizeof(int64_t)]; - } u; - u.i = value; + uint8_t bytes[sizeof(int64_t)]; + memcpy(bytes, &value, sizeof(value)); int start = 7; // Skip leading sign-extension bytes unless they are necessary. - while (start > 0 && (u.bytes[start] == 0xff && (u.bytes[start - 1] & 0x80))) { + while (start > 0 && (bytes[start] == 0xff && (bytes[start - 1] & 0x80))) { start--; } @@ -558,7 +555,7 @@ return 0; } for (int i = start; i >= 0; i--) { - if (!CBB_add_u8(&child, u.bytes[i])) { + if (!CBB_add_u8(&child, bytes[i])) { return 0; } }
diff --git a/crypto/bytestring/cbs.c b/crypto/bytestring/cbs.c index 010897b..741ecfb 100644 --- a/crypto/bytestring/cbs.c +++ b/crypto/bytestring/cbs.c
@@ -496,15 +496,12 @@ if (len > sizeof(int64_t)) { return 0; } - union { - int64_t i; - uint8_t bytes[sizeof(int64_t)]; - } u; - memset(u.bytes, is_negative ? 0xff : 0, sizeof(u.bytes)); // Sign-extend. + uint8_t sign_extend[sizeof(int64_t)]; + memset(sign_extend, is_negative ? 0xff : 0, sizeof(sign_extend)); for (size_t i = 0; i < len; i++) { - u.bytes[i] = data[len - i - 1]; + sign_extend[i] = data[len - i - 1]; } - *out = u.i; + memcpy(out, sign_extend, sizeof(sign_extend)); return 1; }
diff --git a/crypto/cipher_extra/e_aesgcmsiv.c b/crypto/cipher_extra/e_aesgcmsiv.c index 387eaff..555922d 100644 --- a/crypto/cipher_extra/e_aesgcmsiv.c +++ b/crypto/cipher_extra/e_aesgcmsiv.c
@@ -262,17 +262,10 @@ aesgcmsiv_polyval_horner(out_tag, auth_key, scratch, 1); } - union { - uint8_t c[16]; - struct { - uint64_t ad; - uint64_t in; - } bitlens; - } length_block; - - length_block.bitlens.ad = ad_len * 8; - length_block.bitlens.in = in_len * 8; - aesgcmsiv_polyval_horner(out_tag, auth_key, length_block.c, 1); + uint8_t length_block[16]; + CRYPTO_store_u64_le(length_block, ad_len * 8); + CRYPTO_store_u64_le(length_block + 8, in_len * 8); + aesgcmsiv_polyval_horner(out_tag, auth_key, length_block, 1); for (size_t i = 0; i < 12; i++) { out_tag[i] ^= nonce[i]; @@ -289,18 +282,15 @@ int is_128_bit, uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t tag[16], const struct aead_aes_gcm_siv_asm_ctx *enc_key_expanded) { - alignas(16) union { - uint8_t c[16]; - uint32_t u32[4]; - } counter; + alignas(16) uint8_t counter[16]; OPENSSL_memcpy(&counter, tag, sizeof(counter)); - counter.c[15] |= 0x80; - counter.u32[0] += in_len / 16; + counter[15] |= 0x80; + CRYPTO_store_u32_le(counter, CRYPTO_load_u32_le(counter) + in_len / 16); if (is_128_bit) { - aes128gcmsiv_ecb_enc_block(&counter.c[0], &counter.c[0], enc_key_expanded); + aes128gcmsiv_ecb_enc_block(counter, counter, enc_key_expanded); } else { - aes256gcmsiv_ecb_enc_block(&counter.c[0], &counter.c[0], enc_key_expanded); + aes256gcmsiv_ecb_enc_block(counter, counter, enc_key_expanded); } const size_t last_bytes_offset = in_len & ~15; @@ -308,7 +298,7 @@ uint8_t *last_bytes_out = &out[last_bytes_offset]; const uint8_t *last_bytes_in = &in[last_bytes_offset]; for (size_t i = 0; i < last_bytes_len; i++) { - last_bytes_out[i] = last_bytes_in[i] ^ counter.c[i]; + last_bytes_out[i] = last_bytes_in[i] ^ counter[i]; } } @@ -489,18 +479,11 @@ scratch, 1); } - union { - uint8_t c[16]; - struct { - uint64_t ad; - uint64_t in; - } bitlens; - } length_block; - - length_block.bitlens.ad = ad_len * 8; - length_block.bitlens.in = plaintext_len * 8; + uint8_t length_block[16]; + CRYPTO_store_u64_le(length_block, ad_len * 8); + CRYPTO_store_u64_le(length_block + 8, plaintext_len * 8); aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, - length_block.c, 1); + length_block, 1); for (size_t i = 0; i < 12; i++) { calculated_tag[i] ^= nonce[i]; @@ -619,18 +602,15 @@ static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t initial_counter[AES_BLOCK_SIZE], block128_f enc_block, const AES_KEY *key) { - union { - uint32_t w[4]; - uint8_t c[16]; - } counter; + uint8_t counter[16]; - OPENSSL_memcpy(counter.c, initial_counter, AES_BLOCK_SIZE); - counter.c[15] |= 0x80; + OPENSSL_memcpy(counter, initial_counter, AES_BLOCK_SIZE); + counter[15] |= 0x80; for (size_t done = 0; done < in_len;) { uint8_t keystream[AES_BLOCK_SIZE]; - enc_block(counter.c, keystream, key); - counter.w[0]++; + enc_block(counter, keystream, key); + CRYPTO_store_u32_le(counter, CRYPTO_load_u32_le(counter) + 1); size_t todo = AES_BLOCK_SIZE; if (in_len - done < todo) { @@ -670,17 +650,10 @@ CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch)); } - union { - uint8_t c[16]; - struct { - uint64_t ad; - uint64_t in; - } bitlens; - } length_block; - - length_block.bitlens.ad = ad_len * 8; - length_block.bitlens.in = in_len * 8; - CRYPTO_POLYVAL_update_blocks(&polyval_ctx, length_block.c, + uint8_t length_block[16]; + CRYPTO_store_u64_le(length_block, ad_len * 8); + CRYPTO_store_u64_le(length_block + 8, in_len * 8); + CRYPTO_POLYVAL_update_blocks(&polyval_ctx, length_block, sizeof(length_block)); CRYPTO_POLYVAL_finish(&polyval_ctx, out_tag);
diff --git a/crypto/curve25519/curve25519.c b/crypto/curve25519/curve25519.c index e316acd..4bf37b9 100644 --- a/crypto/curve25519/curve25519.c +++ b/crypto/curve25519/curve25519.c
@@ -1936,11 +1936,8 @@ OPENSSL_memcpy(pkcopy, public_key, 32); uint8_t rcopy[32]; OPENSSL_memcpy(rcopy, signature, 32); - union { - uint64_t u64[4]; - uint8_t u8[32]; - } scopy; - OPENSSL_memcpy(&scopy.u8[0], signature + 32, 32); + uint8_t scopy[32]; + OPENSSL_memcpy(scopy, signature + 32, 32); // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in // the range [0, order) in order to prevent signature malleability. @@ -1953,9 +1950,10 @@ UINT64_C(0x1000000000000000), }; for (size_t i = 3;; i--) { - if (scopy.u64[i] > kOrder[i]) { + uint64_t word = CRYPTO_load_u64_le(scopy + i * 8); + if (word > kOrder[i]) { return 0; - } else if (scopy.u64[i] < kOrder[i]) { + } else if (word < kOrder[i]) { break; } else if (i == 0) { return 0; @@ -1973,7 +1971,7 @@ x25519_sc_reduce(h); ge_p2 R; - ge_double_scalarmult_vartime(&R, h, &A, scopy.u8); + ge_double_scalarmult_vartime(&R, h, &A, scopy); uint8_t rcheck[32]; x25519_ge_tobytes(rcheck, &R);
diff --git a/crypto/fipsmodule/cipher/e_aes.c b/crypto/fipsmodule/cipher/e_aes.c index 7677355..61f4ef3 100644 --- a/crypto/fipsmodule/cipher/e_aes.c +++ b/crypto/fipsmodule/cipher/e_aes.c
@@ -99,16 +99,13 @@ out += 16 * bsaes_blocks; blocks -= bsaes_blocks; - union { - uint32_t u32[4]; - uint8_t u8[16]; - } new_ivec; - memcpy(new_ivec.u8, ivec, 16); - uint32_t ctr = CRYPTO_bswap4(new_ivec.u32[3]) + bsaes_blocks; - new_ivec.u32[3] = CRYPTO_bswap4(ctr); + uint8_t new_ivec[16]; + memcpy(new_ivec, ivec, 12); + uint32_t ctr = CRYPTO_load_u32_be(ivec + 12) + bsaes_blocks; + CRYPTO_store_u32_be(new_ivec + 12, ctr); // Finish any remaining blocks with |vpaes_ctr32_encrypt_blocks|. - vpaes_ctr32_encrypt_blocks(in, out, blocks, key, new_ivec.u8); + vpaes_ctr32_encrypt_blocks(in, out, blocks, key, new_ivec); } #endif // BSAES
diff --git a/crypto/fipsmodule/modes/gcm.c b/crypto/fipsmodule/modes/gcm.c index 5b909aa..1a77ec0 100644 --- a/crypto/fipsmodule/modes/gcm.c +++ b/crypto/fipsmodule/modes/gcm.c
@@ -137,76 +137,69 @@ const uint8_t gcm_key[16]) { *out_is_avx = 0; - union { - uint64_t u[2]; - uint8_t c[16]; - } H; - - OPENSSL_memcpy(H.c, gcm_key, 16); - - // H is stored in host byte order - H.u[0] = CRYPTO_bswap8(H.u[0]); - H.u[1] = CRYPTO_bswap8(H.u[1]); - - OPENSSL_memcpy(out_key, H.c, 16); + // H is stored in host byte order. + uint64_t H[2] = {CRYPTO_load_u64_be(gcm_key), + CRYPTO_load_u64_be(gcm_key + 8)}; + out_key->hi = H[0]; + out_key->lo = H[1]; #if defined(GHASH_ASM_X86_64) if (crypto_gcm_clmul_enabled()) { if (CRYPTO_is_AVX_capable() && CRYPTO_is_MOVBE_capable()) { - gcm_init_avx(out_table, H.u); + gcm_init_avx(out_table, H); *out_mult = gcm_gmult_avx; *out_hash = gcm_ghash_avx; *out_is_avx = 1; return; } - gcm_init_clmul(out_table, H.u); + gcm_init_clmul(out_table, H); *out_mult = gcm_gmult_clmul; *out_hash = gcm_ghash_clmul; return; } if (CRYPTO_is_SSSE3_capable()) { - gcm_init_ssse3(out_table, H.u); + gcm_init_ssse3(out_table, H); *out_mult = gcm_gmult_ssse3; *out_hash = gcm_ghash_ssse3; return; } #elif defined(GHASH_ASM_X86) if (crypto_gcm_clmul_enabled()) { - gcm_init_clmul(out_table, H.u); + gcm_init_clmul(out_table, H); *out_mult = gcm_gmult_clmul; *out_hash = gcm_ghash_clmul; return; } if (CRYPTO_is_SSSE3_capable()) { - gcm_init_ssse3(out_table, H.u); + gcm_init_ssse3(out_table, H); *out_mult = gcm_gmult_ssse3; *out_hash = gcm_ghash_ssse3; return; } #elif defined(GHASH_ASM_ARM) if (gcm_pmull_capable()) { - gcm_init_v8(out_table, H.u); + gcm_init_v8(out_table, H); *out_mult = gcm_gmult_v8; *out_hash = gcm_ghash_v8; return; } if (gcm_neon_capable()) { - gcm_init_neon(out_table, H.u); + gcm_init_neon(out_table, H); *out_mult = gcm_gmult_neon; *out_hash = gcm_ghash_neon; return; } #elif defined(GHASH_ASM_PPC64LE) if (CRYPTO_is_PPC64LE_vcrypto_capable()) { - gcm_init_p8(out_table, H.u); + gcm_init_p8(out_table, H); *out_mult = gcm_gmult_p8; *out_hash = gcm_ghash_p8; return; } #endif - gcm_init_nohw(out_table, H.u); + gcm_init_nohw(out_table, H); *out_mult = gcm_gmult_nohw; *out_hash = gcm_ghash_nohw; }
diff --git a/crypto/internal.h b/crypto/internal.h index c9b5e8e..2e94399 100644 --- a/crypto/internal.h +++ b/crypto/internal.h
@@ -881,6 +881,16 @@ OPENSSL_memcpy(out, &v, sizeof(v)); } +static inline uint64_t CRYPTO_load_u64_le(const void *in) { + uint64_t v; + OPENSSL_memcpy(&v, in, sizeof(v)); + return v; +} + +static inline void CRYPTO_store_u64_le(void *out, uint64_t v) { + OPENSSL_memcpy(out, &v, sizeof(v)); +} + static inline uint64_t CRYPTO_load_u64_be(const void *ptr) { uint64_t ret; OPENSSL_memcpy(&ret, ptr, sizeof(ret));
diff --git a/crypto/siphash/siphash.c b/crypto/siphash/siphash.c index bb9a0c1..0921eac 100644 --- a/crypto/siphash/siphash.c +++ b/crypto/siphash/siphash.c
@@ -48,8 +48,7 @@ v[3] = key[1] ^ UINT64_C(0x7465646279746573); while (input_len >= sizeof(uint64_t)) { - uint64_t m; - memcpy(&m, input, sizeof(m)); + uint64_t m = CRYPTO_load_u64_le(input); v[3] ^= m; siphash_round(v); siphash_round(v); @@ -59,18 +58,16 @@ input_len -= sizeof(uint64_t); } - union { - uint8_t bytes[8]; - uint64_t word; - } last_block; - last_block.word = 0; - OPENSSL_memcpy(last_block.bytes, input, input_len); - last_block.bytes[7] = orig_input_len & 0xff; + uint8_t last_block[8]; + OPENSSL_memset(last_block, 0, sizeof(last_block)); + OPENSSL_memcpy(last_block, input, input_len); + last_block[7] = orig_input_len & 0xff; - v[3] ^= last_block.word; + uint64_t last_block_word = CRYPTO_load_u64_le(last_block); + v[3] ^= last_block_word; siphash_round(v); siphash_round(v); - v[0] ^= last_block.word; + v[0] ^= last_block_word; v[2] ^= 0xff; siphash_round(v);
diff --git a/crypto/siphash/siphash_test.cc b/crypto/siphash/siphash_test.cc index 6d8f9e7..6407e0d 100644 --- a/crypto/siphash/siphash_test.cc +++ b/crypto/siphash/siphash_test.cc
@@ -23,14 +23,12 @@ TEST(SipHash, Basic) { // This is the example from appendix A of the SipHash paper. - union { - uint8_t bytes[16]; - uint64_t words[2]; - } key; - + uint8_t key_bytes[16]; for (unsigned i = 0; i < 16; i++) { - key.bytes[i] = i; + key_bytes[i] = i; } + uint64_t key[2]; + memcpy(key, key_bytes, sizeof(key)); uint8_t input[15]; for (unsigned i = 0; i < sizeof(input); i++) { @@ -38,7 +36,7 @@ } EXPECT_EQ(UINT64_C(0xa129ca6149be45e5), - SIPHASH_24(key.words, input, sizeof(input))); + SIPHASH_24(key, input, sizeof(input))); } TEST(SipHash, Vectors) {