Run the comment converter on libcrypto.
crypto/{asn1,x509,x509v3,pem} were skipped as they are still OpenSSL
style.
Change-Id: I3cd9a60e1cb483a981aca325041f3fbce294247c
Reviewed-on: https://boringssl-review.googlesource.com/19504
Reviewed-by: Adam Langley <agl@google.com>
Commit-Queue: David Benjamin <davidben@google.com>
CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
diff --git a/crypto/fipsmodule/aes/aes.c b/crypto/fipsmodule/aes/aes.c
index c68a5d5..a988b39 100644
--- a/crypto/fipsmodule/aes/aes.c
+++ b/crypto/fipsmodule/aes/aes.c
@@ -59,16 +59,16 @@
#if defined(OPENSSL_NO_ASM) || \
(!defined(OPENSSL_X86) && !defined(OPENSSL_X86_64) && !defined(OPENSSL_ARM))
-/* Te0[x] = S [x].[02, 01, 01, 03];
- * Te1[x] = S [x].[03, 02, 01, 01];
- * Te2[x] = S [x].[01, 03, 02, 01];
- * Te3[x] = S [x].[01, 01, 03, 02];
- *
- * Td0[x] = Si[x].[0e, 09, 0d, 0b];
- * Td1[x] = Si[x].[0b, 0e, 09, 0d];
- * Td2[x] = Si[x].[0d, 0b, 0e, 09];
- * Td3[x] = Si[x].[09, 0d, 0b, 0e];
- * Td4[x] = Si[x].[01]; */
+// Te0[x] = S [x].[02, 01, 01, 03];
+// Te1[x] = S [x].[03, 02, 01, 01];
+// Te2[x] = S [x].[01, 03, 02, 01];
+// Te3[x] = S [x].[01, 01, 03, 02];
+//
+// Td0[x] = Si[x].[0e, 09, 0d, 0b];
+// Td1[x] = Si[x].[0b, 0e, 09, 0d];
+// Td2[x] = Si[x].[0d, 0b, 0e, 09];
+// Td3[x] = Si[x].[09, 0d, 0b, 0e];
+// Td4[x] = Si[x].[01];
static const uint32_t Te0[256] = {
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU,
@@ -531,7 +531,7 @@
static const uint32_t rcon[] = {
0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000,
0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000,
- /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+ // for 128-bit blocks, Rijndael never uses more than 10 rcon values
};
int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
@@ -634,7 +634,7 @@
int i, j, status;
uint32_t temp;
- /* first, start with an encryption schedule */
+ // first, start with an encryption schedule
status = AES_set_encrypt_key(key, bits, aeskey);
if (status < 0) {
return status;
@@ -642,7 +642,7 @@
rk = aeskey->rd_key;
- /* invert the order of the round keys: */
+ // invert the order of the round keys:
for (i = 0, j = 4 * aeskey->rounds; i < j; i += 4, j -= 4) {
temp = rk[i];
rk[i] = rk[j];
@@ -657,8 +657,8 @@
rk[i + 3] = rk[j + 3];
rk[j + 3] = temp;
}
- /* apply the inverse MixColumn transform to all round keys but the first and
- * the last: */
+ // apply the inverse MixColumn transform to all round keys but the first and
+ // the last:
for (i = 1; i < (int)aeskey->rounds; i++) {
rk += 4;
rk[0] =
@@ -682,19 +682,19 @@
uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
#ifndef FULL_UNROLL
int r;
-#endif /* ?FULL_UNROLL */
+#endif // ?FULL_UNROLL
assert(in && out && key);
rk = key->rd_key;
- /* map byte array block to cipher state
- * and add initial round key: */
+ // map byte array block to cipher state
+ // and add initial round key:
s0 = GETU32(in) ^ rk[0];
s1 = GETU32(in + 4) ^ rk[1];
s2 = GETU32(in + 8) ^ rk[2];
s3 = GETU32(in + 12) ^ rk[3];
#ifdef FULL_UNROLL
- /* round 1: */
+ // round 1:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[4];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -703,7 +703,7 @@
Te3[s1 & 0xff] ^ rk[6];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[7];
- /* round 2: */
+ // round 2:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[8];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -712,7 +712,7 @@
Te3[t1 & 0xff] ^ rk[10];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[11];
- /* round 3: */
+ // round 3:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[12];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -721,7 +721,7 @@
Te3[s1 & 0xff] ^ rk[14];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[15];
- /* round 4: */
+ // round 4:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[16];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -730,7 +730,7 @@
Te3[t1 & 0xff] ^ rk[18];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[19];
- /* round 5: */
+ // round 5:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[20];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -739,7 +739,7 @@
Te3[s1 & 0xff] ^ rk[22];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[23];
- /* round 6: */
+ // round 6:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[24];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -748,7 +748,7 @@
Te3[t1 & 0xff] ^ rk[26];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[27];
- /* round 7: */
+ // round 7:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[28];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -757,7 +757,7 @@
Te3[s1 & 0xff] ^ rk[30];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[31];
- /* round 8: */
+ // round 8:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[32];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -766,7 +766,7 @@
Te3[t1 & 0xff] ^ rk[34];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[35];
- /* round 9: */
+ // round 9:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[36];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -776,7 +776,7 @@
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[39];
if (key->rounds > 10) {
- /* round 10: */
+ // round 10:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[40];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -785,7 +785,7 @@
Te3[t1 & 0xff] ^ rk[42];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[43];
- /* round 11: */
+ // round 11:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[44];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -795,7 +795,7 @@
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
Te3[s2 & 0xff] ^ rk[47];
if (key->rounds > 12) {
- /* round 12: */
+ // round 12:
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
Te3[t3 & 0xff] ^ rk[48];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
@@ -804,7 +804,7 @@
Te3[t1 & 0xff] ^ rk[50];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[t2 & 0xff] ^ rk[51];
- /* round 13: */
+ // round 13:
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
Te3[s3 & 0xff] ^ rk[52];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
@@ -816,10 +816,8 @@
}
}
rk += key->rounds << 2;
-#else /* !FULL_UNROLL */
- /*
- * Nr - 1 full rounds:
- */
+#else // !FULL_UNROLL
+ // Nr - 1 full rounds:
r = key->rounds >> 1;
for (;;) {
t0 = Te0[(s0 >> 24)] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
@@ -845,8 +843,8 @@
s3 = Te0[(t3 >> 24)] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
Te3[(t2) & 0xff] ^ rk[3];
}
-#endif /* ?FULL_UNROLL */
- /* apply last round and map cipher state to byte array block: */
+#endif // ?FULL_UNROLL
+ // apply last round and map cipher state to byte array block:
s0 = (Te2[(t0 >> 24)] & 0xff000000) ^ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^
(Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t3) & 0xff] & 0x000000ff) ^
rk[0];
@@ -870,19 +868,19 @@
uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
#ifndef FULL_UNROLL
int r;
-#endif /* ?FULL_UNROLL */
+#endif // ?FULL_UNROLL
assert(in && out && key);
rk = key->rd_key;
- /* map byte array block to cipher state
- * and add initial round key: */
+ // map byte array block to cipher state
+ // and add initial round key:
s0 = GETU32(in) ^ rk[0];
s1 = GETU32(in + 4) ^ rk[1];
s2 = GETU32(in + 8) ^ rk[2];
s3 = GETU32(in + 12) ^ rk[3];
#ifdef FULL_UNROLL
- /* round 1: */
+ // round 1:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[4];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -891,7 +889,7 @@
Td3[s3 & 0xff] ^ rk[6];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[7];
- /* round 2: */
+ // round 2:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[8];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -900,7 +898,7 @@
Td3[t3 & 0xff] ^ rk[10];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[11];
- /* round 3: */
+ // round 3:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[12];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -909,7 +907,7 @@
Td3[s3 & 0xff] ^ rk[14];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[15];
- /* round 4: */
+ // round 4:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[16];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -918,7 +916,7 @@
Td3[t3 & 0xff] ^ rk[18];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[19];
- /* round 5: */
+ // round 5:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[20];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -927,7 +925,7 @@
Td3[s3 & 0xff] ^ rk[22];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[23];
- /* round 6: */
+ // round 6:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[24];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -936,7 +934,7 @@
Td3[t3 & 0xff] ^ rk[26];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[27];
- /* round 7: */
+ // round 7:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[28];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -945,7 +943,7 @@
Td3[s3 & 0xff] ^ rk[30];
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[31];
- /* round 8: */
+ // round 8:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[32];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -954,7 +952,7 @@
Td3[t3 & 0xff] ^ rk[34];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[35];
- /* round 9: */
+ // round 9:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[36];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -964,7 +962,7 @@
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[39];
if (key->rounds > 10) {
- /* round 10: */
+ // round 10:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[40];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -973,7 +971,7 @@
Td3[t3 & 0xff] ^ rk[42];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[43];
- /* round 11: */
+ // round 11:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[44];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -983,7 +981,7 @@
t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
Td3[s0 & 0xff] ^ rk[47];
if (key->rounds > 12) {
- /* round 12: */
+ // round 12:
s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
Td3[t1 & 0xff] ^ rk[48];
s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
@@ -992,7 +990,7 @@
Td3[t3 & 0xff] ^ rk[50];
s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[t0 & 0xff] ^ rk[51];
- /* round 13: */
+ // round 13:
t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
Td3[s1 & 0xff] ^ rk[52];
t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
@@ -1004,10 +1002,8 @@
}
}
rk += key->rounds << 2;
-#else /* !FULL_UNROLL */
- /*
- * Nr - 1 full rounds:
- */
+#else // !FULL_UNROLL
+ // Nr - 1 full rounds:
r = key->rounds >> 1;
for (;;) {
t0 = Td0[(s0 >> 24)] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
@@ -1033,9 +1029,9 @@
s3 = Td0[(t3 >> 24)] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
Td3[(t0) & 0xff] ^ rk[3];
}
-#endif /* ?FULL_UNROLL */
- /* apply last round and
- * map cipher state to byte array block: */
+#endif // ?FULL_UNROLL
+ // apply last round and
+ // map cipher state to byte array block:
s0 = ((uint32_t)Td4[(t0 >> 24)] << 24) ^
((uint32_t)Td4[(t3 >> 16) & 0xff] << 16) ^
((uint32_t)Td4[(t2 >> 8) & 0xff] << 8) ^
@@ -1060,10 +1056,10 @@
#else
-/* In this case several functions are provided by asm code. However, one cannot
- * control asm symbol visibility with command line flags and such so they are
- * always hidden and wrapped by these C functions, which can be so
- * controlled. */
+// In this case several functions are provided by asm code. However, one cannot
+// control asm symbol visibility with command line flags and such so they are
+// always hidden and wrapped by these C functions, which can be so
+// controlled.
void asm_AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
@@ -1101,4 +1097,4 @@
}
}
-#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM) */
+#endif // OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM)
diff --git a/crypto/fipsmodule/aes/internal.h b/crypto/fipsmodule/aes/internal.h
index 01cff84..45db9ee 100644
--- a/crypto/fipsmodule/aes/internal.h
+++ b/crypto/fipsmodule/aes/internal.h
@@ -30,7 +30,7 @@
static int hwaes_capable(void) {
return CRYPTO_is_ARMv8_AES_capable();
}
-#endif /* !NO_ASM && (AES || AARCH64) */
+#endif // !NO_ASM && (AES || AARCH64)
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_PPC64LE)
#define HWAES
@@ -38,7 +38,7 @@
static int hwaes_capable(void) {
return CRYPTO_is_PPC64LE_vcrypto_capable();
}
-#endif /* !NO_ASM && PPC64LE */
+#endif // !NO_ASM && PPC64LE
#if defined(HWAES)
@@ -56,8 +56,8 @@
#else
-/* If HWAES isn't defined then we provide dummy functions for each of the hwaes
- * functions. */
+// If HWAES isn't defined then we provide dummy functions for each of the hwaes
+// functions.
static int hwaes_capable(void) { return 0; }
static int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits,
@@ -91,10 +91,10 @@
abort();
}
-#endif /* !HWAES */
+#endif // !HWAES
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_AES_INTERNAL_H */
+#endif // OPENSSL_HEADER_AES_INTERNAL_H
diff --git a/crypto/fipsmodule/aes/key_wrap.c b/crypto/fipsmodule/aes/key_wrap.c
index 73de17f..feee0c7 100644
--- a/crypto/fipsmodule/aes/key_wrap.c
+++ b/crypto/fipsmodule/aes/key_wrap.c
@@ -56,7 +56,7 @@
#include "../../internal.h"
-/* kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1. */
+// kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1.
static const uint8_t kDefaultIV[] = {
0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6,
};
@@ -65,7 +65,7 @@
int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out,
const uint8_t *in, size_t in_len) {
- /* See RFC 3394, section 2.2.1. */
+ // See RFC 3394, section 2.2.1.
if (in_len > INT_MAX - 8 || in_len < 8 || in_len % 8 != 0) {
return -1;
@@ -101,7 +101,7 @@
int AES_unwrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out,
const uint8_t *in, size_t in_len) {
- /* See RFC 3394, section 2.2.2. */
+ // See RFC 3394, section 2.2.2.
if (in_len > INT_MAX || in_len < 16 || in_len % 8 != 0) {
return -1;
diff --git a/crypto/fipsmodule/aes/mode_wrappers.c b/crypto/fipsmodule/aes/mode_wrappers.c
index 4929920..34514db 100644
--- a/crypto/fipsmodule/aes/mode_wrappers.c
+++ b/crypto/fipsmodule/aes/mode_wrappers.c
@@ -92,7 +92,7 @@
asm_AES_cbc_encrypt(in, out, len, key, ivec, enc);
}
-#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86) */
+#endif // OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86)
void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t length,
const AES_KEY *key, uint8_t *ivec, int *num) {
diff --git a/crypto/fipsmodule/bcm.c b/crypto/fipsmodule/bcm.c
index c6ea796..b506b43 100644
--- a/crypto/fipsmodule/bcm.c
+++ b/crypto/fipsmodule/bcm.c
@@ -13,7 +13,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#if !defined(_GNU_SOURCE)
-#define _GNU_SOURCE /* needed for syscall() on Linux. */
+#define _GNU_SOURCE // needed for syscall() on Linux.
#endif
#include <openssl/aead.h>
@@ -145,7 +145,7 @@
0xa7, 0x10, 0x93, 0x43, 0x53, 0x4e, 0xe3, 0x16, 0x73, 0x55, 0xce, 0xf2,
0x94, 0xc0, 0xbe, 0xb3,
};
- static const uint8_t kE[] = {0x01, 0x00, 0x01}; /* 65537 */
+ static const uint8_t kE[] = {0x01, 0x00, 0x01}; // 65537
static const uint8_t kD[] = {
0x2f, 0x2c, 0x1e, 0xd2, 0x3d, 0x2c, 0xb1, 0x9b, 0x21, 0x02, 0xce, 0xb8,
0x95, 0x5f, 0x4f, 0xd9, 0x21, 0x38, 0x11, 0x36, 0xb0, 0x9a, 0x36, 0xab,
@@ -288,8 +288,8 @@
}
#if !defined(OPENSSL_ASAN)
-/* These symbols are filled in by delocate.go. They point to the start and end
- * of the module, and the location of the integrity hash, respectively. */
+// These symbols are filled in by delocate.go. They point to the start and end
+// of the module, and the location of the integrity hash, respectively.
extern const uint8_t BORINGSSL_bcm_text_start[];
extern const uint8_t BORINGSSL_bcm_text_end[];
extern const uint8_t BORINGSSL_bcm_text_hash[];
@@ -300,8 +300,8 @@
CRYPTO_library_init();
#if !defined(OPENSSL_ASAN)
- /* Integrity tests cannot run under ASAN because it involves reading the full
- * .text section, which triggers the global-buffer overflow detection. */
+ // Integrity tests cannot run under ASAN because it involves reading the full
+ // .text section, which triggers the global-buffer overflow detection.
const uint8_t *const start = BORINGSSL_bcm_text_start;
const uint8_t *const end = BORINGSSL_bcm_text_end;
@@ -478,7 +478,7 @@
uint8_t aes_iv[16];
uint8_t output[256];
- /* AES-CBC Encryption KAT */
+ // AES-CBC Encryption KAT
memcpy(aes_iv, kAESIV, sizeof(kAESIV));
if (AES_set_encrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) {
goto err;
@@ -490,7 +490,7 @@
goto err;
}
- /* AES-CBC Decryption KAT */
+ // AES-CBC Decryption KAT
memcpy(aes_iv, kAESIV, sizeof(kAESIV));
if (AES_set_decrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) {
goto err;
@@ -511,7 +511,7 @@
goto err;
}
- /* AES-GCM Encryption KAT */
+ // AES-GCM Encryption KAT
if (!EVP_AEAD_CTX_seal(&aead_ctx, output, &out_len, sizeof(output), nonce,
EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()),
kPlaintext, sizeof(kPlaintext), NULL, 0) ||
@@ -520,7 +520,7 @@
goto err;
}
- /* AES-GCM Decryption KAT */
+ // AES-GCM Decryption KAT
if (!EVP_AEAD_CTX_open(&aead_ctx, output, &out_len, sizeof(output), nonce,
EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()),
kAESGCMCiphertext, sizeof(kAESGCMCiphertext), NULL,
@@ -538,7 +538,7 @@
DES_set_key(&kDESKey2, &des2);
DES_set_key(&kDESKey3, &des3);
- /* 3DES Encryption KAT */
+ // 3DES Encryption KAT
memcpy(&des_iv, &kDESIV, sizeof(des_iv));
DES_ede3_cbc_encrypt(kPlaintext, output, sizeof(kPlaintext), &des1, &des2,
&des3, &des_iv, DES_ENCRYPT);
@@ -547,7 +547,7 @@
goto err;
}
- /* 3DES Decryption KAT */
+ // 3DES Decryption KAT
memcpy(&des_iv, &kDESIV, sizeof(des_iv));
DES_ede3_cbc_encrypt(kDESCiphertext, output, sizeof(kDESCiphertext), &des1,
&des2, &des3, &des_iv, DES_DECRYPT);
@@ -556,21 +556,21 @@
goto err;
}
- /* SHA-1 KAT */
+ // SHA-1 KAT
SHA1(kPlaintext, sizeof(kPlaintext), output);
if (!check_test(kPlaintextSHA1, output, sizeof(kPlaintextSHA1),
"SHA-1 KAT")) {
goto err;
}
- /* SHA-256 KAT */
+ // SHA-256 KAT
SHA256(kPlaintext, sizeof(kPlaintext), output);
if (!check_test(kPlaintextSHA256, output, sizeof(kPlaintextSHA256),
"SHA-256 KAT")) {
goto err;
}
- /* SHA-512 KAT */
+ // SHA-512 KAT
SHA512(kPlaintext, sizeof(kPlaintext), output);
if (!check_test(kPlaintextSHA512, output, sizeof(kPlaintextSHA512),
"SHA-512 KAT")) {
@@ -583,11 +583,11 @@
goto err;
}
- /* RSA Sign KAT */
+ // RSA Sign KAT
unsigned sig_len;
- /* Disable blinding for the power-on tests because it's not needed and
- * triggers an entropy draw. */
+ // Disable blinding for the power-on tests because it's not needed and
+ // triggers an entropy draw.
rsa_key->flags |= RSA_FLAG_NO_BLINDING;
if (!RSA_sign(NID_sha256, kPlaintextSHA256, sizeof(kPlaintextSHA256), output,
@@ -597,7 +597,7 @@
goto err;
}
- /* RSA Verify KAT */
+ // RSA Verify KAT
if (!RSA_verify(NID_sha256, kPlaintextSHA256, sizeof(kPlaintextSHA256),
kRSASignature, sizeof(kRSASignature), rsa_key)) {
printf("RSA Verify KAT failed.\n");
@@ -612,9 +612,9 @@
goto err;
}
- /* ECDSA Sign/Verify PWCT */
+ // ECDSA Sign/Verify PWCT
- /* The 'k' value for ECDSA is fixed to avoid an entropy draw. */
+ // The 'k' value for ECDSA is fixed to avoid an entropy draw.
ec_key->fixed_k = BN_new();
if (ec_key->fixed_k == NULL ||
!BN_set_word(ec_key->fixed_k, 42)) {
@@ -641,7 +641,7 @@
ECDSA_SIG_free(sig);
EC_KEY_free(ec_key);
- /* DBRG KAT */
+ // DBRG KAT
CTR_DRBG_STATE drbg;
if (!CTR_DRBG_init(&drbg, kDRBGEntropy, kDRBGPersonalization,
sizeof(kDRBGPersonalization)) ||
@@ -676,4 +676,4 @@
exit(1);
}
}
-#endif /* BORINGSSL_FIPS */
+#endif // BORINGSSL_FIPS
diff --git a/crypto/fipsmodule/bn/add.c b/crypto/fipsmodule/bn/add.c
index 5848543..bbe275e 100644
--- a/crypto/fipsmodule/bn/add.c
+++ b/crypto/fipsmodule/bn/add.c
@@ -68,20 +68,19 @@
const BIGNUM *tmp;
int a_neg = a->neg, ret;
- /* a + b a+b
- * a + -b a-b
- * -a + b b-a
- * -a + -b -(a+b)
- */
+ // a + b a+b
+ // a + -b a-b
+ // -a + b b-a
+ // -a + -b -(a+b)
if (a_neg ^ b->neg) {
- /* only one is negative */
+ // only one is negative
if (a_neg) {
tmp = a;
a = b;
b = tmp;
}
- /* we are now a - b */
+ // we are now a - b
if (BN_ucmp(a, b) < 0) {
if (!BN_usub(r, b, a)) {
return 0;
@@ -142,7 +141,7 @@
}
}
if (carry) {
- /* carry != 0 => dif == 0 */
+ // carry != 0 => dif == 0
*rp = 1;
r->top++;
}
@@ -150,7 +149,7 @@
if (dif && rp != ap) {
while (dif--) {
- /* copy remaining words if ap != rp */
+ // copy remaining words if ap != rp
*(rp++) = *(ap++);
}
}
@@ -165,17 +164,17 @@
w &= BN_MASK2;
- /* degenerate case: w is zero */
+ // degenerate case: w is zero
if (!w) {
return 1;
}
- /* degenerate case: a is zero */
+ // degenerate case: a is zero
if (BN_is_zero(a)) {
return BN_set_word(a, w);
}
- /* handle 'a' when negative */
+ // handle 'a' when negative
if (a->neg) {
a->neg = 0;
i = BN_sub_word(a, w);
@@ -206,11 +205,10 @@
int add = 0, neg = 0;
const BIGNUM *tmp;
- /* a - b a-b
- * a - -b a+b
- * -a - b -(a+b)
- * -a - -b b-a
- */
+ // a - b a-b
+ // a - -b a+b
+ // -a - b -(a+b)
+ // -a - -b b-a
if (a->neg) {
if (b->neg) {
tmp = a;
@@ -236,7 +234,7 @@
return 1;
}
- /* We are actually doing a - b :-) */
+ // We are actually doing a - b :-)
max = (a->top > b->top) ? a->top : b->top;
if (!bn_wexpand(r, max)) {
@@ -267,7 +265,7 @@
min = b->top;
dif = max - min;
- if (dif < 0) /* hmm... should not be happening */
+ if (dif < 0) // hmm... should not be happening
{
OPENSSL_PUT_ERROR(BN, BN_R_ARG2_LT_ARG3);
return 0;
@@ -295,10 +293,10 @@
*(rp++) = t1 & BN_MASK2;
}
- if (carry) /* subtracted */
+ if (carry) // subtracted
{
if (!dif) {
- /* error: a < b */
+ // error: a < b
return 0;
}
@@ -329,12 +327,12 @@
w &= BN_MASK2;
- /* degenerate case: w is zero */
+ // degenerate case: w is zero
if (!w) {
return 1;
}
- /* degenerate case: a is zero */
+ // degenerate case: a is zero
if (BN_is_zero(a)) {
i = BN_set_word(a, w);
if (i != 0) {
@@ -343,7 +341,7 @@
return i;
}
- /* handle 'a' when negative */
+ // handle 'a' when negative
if (a->neg) {
a->neg = 0;
i = BN_add_word(a, w);
diff --git a/crypto/fipsmodule/bn/asm/x86_64-gcc.c b/crypto/fipsmodule/bn/asm/x86_64-gcc.c
index 72e7689..a65b86f 100644
--- a/crypto/fipsmodule/bn/asm/x86_64-gcc.c
+++ b/crypto/fipsmodule/bn/asm/x86_64-gcc.c
@@ -52,7 +52,7 @@
#include <openssl/bn.h>
-/* TODO(davidben): Get this file working on Windows x64. */
+// TODO(davidben): Get this file working on Windows x64.
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__GNUC__)
#include "../internal.h"
@@ -63,11 +63,9 @@
#define asm __asm__
-/*
- * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
- * "g"(0) let the compiler to decide where does it
- * want to keep the value of zero;
- */
+// "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
+// "g"(0) let the compiler to decide where does it
+// want to keep the value of zero;
#define mul_add(r, a, word, carry) \
do { \
register BN_ULONG high, low; \
@@ -197,7 +195,7 @@
}
asm volatile (
- " subq %0,%0 \n" /* clear carry */
+ " subq %0,%0 \n" // clear carry
" jmp 1f \n"
".p2align 4 \n"
"1:"
@@ -224,7 +222,7 @@
}
asm volatile (
- " subq %0,%0 \n" /* clear borrow */
+ " subq %0,%0 \n" // clear borrow
" jmp 1f \n"
".p2align 4 \n"
"1:"
@@ -241,14 +239,13 @@
return ret & 1;
}
-/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
-/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
-/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
-/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0)
- */
+// mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0)
+// mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0)
+// sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0)
+// sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0)
-/* Keep in mind that carrying into high part of multiplication result can not
- * overflow, because it cannot be all-ones. */
+// Keep in mind that carrying into high part of multiplication result can not
+// overflow, because it cannot be all-ones.
#define mul_add_c(a, b, c0, c1, c2) \
do { \
BN_ULONG t1, t2; \
@@ -539,4 +536,4 @@
#undef mul_add_c2
#undef sqr_add_c2
-#endif /* !NO_ASM && X86_64 && __GNUC__ */
+#endif // !NO_ASM && X86_64 && __GNUC__
diff --git a/crypto/fipsmodule/bn/bn.c b/crypto/fipsmodule/bn/bn.c
index af093e0..9ba1913 100644
--- a/crypto/fipsmodule/bn/bn.c
+++ b/crypto/fipsmodule/bn/bn.c
@@ -175,8 +175,8 @@
out->flags = BN_FLG_STATIC_DATA;
}
-/* BN_num_bits_word returns the minimum number of bits needed to represent the
- * value in |l|. */
+// BN_num_bits_word returns the minimum number of bits needed to represent the
+// value in |l|.
unsigned BN_num_bits_word(BN_ULONG l) {
static const unsigned char bits[256] = {
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -290,7 +290,7 @@
return 0;
}
OPENSSL_memmove(bn->d, words, num * sizeof(BN_ULONG));
- /* |bn_wexpand| verified that |num| isn't too large. */
+ // |bn_wexpand| verified that |num| isn't too large.
bn->top = (int)num;
bn_correct_top(bn);
bn->neg = 0;
diff --git a/crypto/fipsmodule/bn/bn_test.cc b/crypto/fipsmodule/bn/bn_test.cc
index 3cb5f75..fe03e5f 100644
--- a/crypto/fipsmodule/bn/bn_test.cc
+++ b/crypto/fipsmodule/bn/bn_test.cc
@@ -67,9 +67,9 @@
* Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems
* Laboratories. */
-/* Per C99, various stdint.h and inttypes.h macros (the latter used by bn.h) are
- * unavailable in C++ unless some macros are defined. C++11 overruled this
- * decision, but older Android NDKs still require it. */
+// Per C99, various stdint.h and inttypes.h macros (the latter used by bn.h) are
+// unavailable in C++ unless some macros are defined. C++11 overruled this
+// decision, but older Android NDKs still require it.
#if !defined(__STDC_CONSTANT_MACROS)
#define __STDC_CONSTANT_MACROS
#endif
diff --git a/crypto/fipsmodule/bn/bytes.c b/crypto/fipsmodule/bn/bytes.c
index 0988870..328d56e 100644
--- a/crypto/fipsmodule/bn/bytes.c
+++ b/crypto/fipsmodule/bn/bytes.c
@@ -90,8 +90,8 @@
return NULL;
}
- /* |bn_wexpand| must check bounds on |num_words| to write it into
- * |ret->dmax|. */
+ // |bn_wexpand| must check bounds on |num_words| to write it into
+ // |ret->dmax|.
assert(num_words <= INT_MAX);
ret->top = (int)num_words;
ret->neg = 0;
@@ -105,8 +105,8 @@
}
}
- /* need to call this due to clear byte at top if avoiding having the top bit
- * set (-ve number) */
+ // need to call this due to clear byte at top if avoiding having the top bit
+ // set (-ve number)
bn_correct_top(ret);
return ret;
}
@@ -128,7 +128,7 @@
return ret;
}
- /* Reserve enough space in |ret|. */
+ // Reserve enough space in |ret|.
size_t num_words = ((len - 1) / BN_BYTES) + 1;
if (!bn_wexpand(ret, num_words)) {
BN_free(bn);
@@ -136,11 +136,11 @@
}
ret->top = num_words;
- /* Make sure the top bytes will be zeroed. */
+ // Make sure the top bytes will be zeroed.
ret->d[num_words - 1] = 0;
- /* We only support little-endian platforms, so we can simply memcpy the
- * internal representation. */
+ // We only support little-endian platforms, so we can simply memcpy the
+ // internal representation.
OPENSSL_memcpy(ret->d, in, len);
bn_correct_top(ret);
@@ -160,24 +160,24 @@
}
int BN_bn2le_padded(uint8_t *out, size_t len, const BIGNUM *in) {
- /* If we don't have enough space, fail out. */
+ // If we don't have enough space, fail out.
size_t num_bytes = BN_num_bytes(in);
if (len < num_bytes) {
return 0;
}
- /* We only support little-endian platforms, so we can simply memcpy into the
- * internal representation. */
+ // We only support little-endian platforms, so we can simply memcpy into the
+ // internal representation.
OPENSSL_memcpy(out, in->d, num_bytes);
- /* Pad out the rest of the buffer with zeroes. */
+ // Pad out the rest of the buffer with zeroes.
OPENSSL_memset(out + num_bytes, 0, len - num_bytes);
return 1;
}
-/* constant_time_select_ulong returns |x| if |v| is 1 and |y| if |v| is 0. Its
- * behavior is undefined if |v| takes any other value. */
+// constant_time_select_ulong returns |x| if |v| is 1 and |y| if |v| is 0. Its
+// behavior is undefined if |v| takes any other value.
static BN_ULONG constant_time_select_ulong(int v, BN_ULONG x, BN_ULONG y) {
BN_ULONG mask = v;
mask--;
@@ -185,35 +185,35 @@
return (~mask & x) | (mask & y);
}
-/* constant_time_le_size_t returns 1 if |x| <= |y| and 0 otherwise. |x| and |y|
- * must not have their MSBs set. */
+// constant_time_le_size_t returns 1 if |x| <= |y| and 0 otherwise. |x| and |y|
+// must not have their MSBs set.
static int constant_time_le_size_t(size_t x, size_t y) {
return ((x - y - 1) >> (sizeof(size_t) * 8 - 1)) & 1;
}
-/* read_word_padded returns the |i|'th word of |in|, if it is not out of
- * bounds. Otherwise, it returns 0. It does so without branches on the size of
- * |in|, however it necessarily does not have the same memory access pattern. If
- * the access would be out of bounds, it reads the last word of |in|. |in| must
- * not be zero. */
+// read_word_padded returns the |i|'th word of |in|, if it is not out of
+// bounds. Otherwise, it returns 0. It does so without branches on the size of
+// |in|, however it necessarily does not have the same memory access pattern. If
+// the access would be out of bounds, it reads the last word of |in|. |in| must
+// not be zero.
static BN_ULONG read_word_padded(const BIGNUM *in, size_t i) {
- /* Read |in->d[i]| if valid. Otherwise, read the last word. */
+ // Read |in->d[i]| if valid. Otherwise, read the last word.
BN_ULONG l = in->d[constant_time_select_ulong(
constant_time_le_size_t(in->dmax, i), in->dmax - 1, i)];
- /* Clamp to zero if above |d->top|. */
+ // Clamp to zero if above |d->top|.
return constant_time_select_ulong(constant_time_le_size_t(in->top, i), 0, l);
}
int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) {
- /* Special case for |in| = 0. Just branch as the probability is negligible. */
+ // Special case for |in| = 0. Just branch as the probability is negligible.
if (BN_is_zero(in)) {
OPENSSL_memset(out, 0, len);
return 1;
}
- /* Check if the integer is too big. This case can exit early in non-constant
- * time. */
+ // Check if the integer is too big. This case can exit early in non-constant
+ // time.
if ((size_t)in->top > (len + (BN_BYTES - 1)) / BN_BYTES) {
return 0;
}
@@ -224,13 +224,13 @@
}
}
- /* Write the bytes out one by one. Serialization is done without branching on
- * the bits of |in| or on |in->top|, but if the routine would otherwise read
- * out of bounds, the memory access pattern can't be fixed. However, for an
- * RSA key of size a multiple of the word size, the probability of BN_BYTES
- * leading zero octets is low.
- *
- * See Falko Stenzke, "Manger's Attack revisited", ICICS 2010. */
+ // Write the bytes out one by one. Serialization is done without branching on
+ // the bits of |in| or on |in->top|, but if the routine would otherwise read
+ // out of bounds, the memory access pattern can't be fixed. However, for an
+ // RSA key of size a multiple of the word size, the probability of BN_BYTES
+ // leading zero octets is low.
+ //
+ // See Falko Stenzke, "Manger's Attack revisited", ICICS 2010.
size_t i = len;
while (i--) {
BN_ULONG l = read_word_padded(in, i / BN_BYTES);
diff --git a/crypto/fipsmodule/bn/cmp.c b/crypto/fipsmodule/bn/cmp.c
index 71c0465..7864707 100644
--- a/crypto/fipsmodule/bn/cmp.c
+++ b/crypto/fipsmodule/bn/cmp.c
@@ -159,14 +159,14 @@
if (dl < 0) {
for (i = dl; i < 0; i++) {
if (b[n - i] != 0) {
- return -1; /* a < b */
+ return -1; // a < b
}
}
}
if (dl > 0) {
for (i = dl; i > 0; i--) {
if (a[n + i] != 0) {
- return 1; /* a > b */
+ return 1; // a > b
}
}
}
diff --git a/crypto/fipsmodule/bn/ctx.c b/crypto/fipsmodule/bn/ctx.c
index 3819775..af50de9 100644
--- a/crypto/fipsmodule/bn/ctx.c
+++ b/crypto/fipsmodule/bn/ctx.c
@@ -62,24 +62,24 @@
#include "../../internal.h"
-/* How many bignums are in each "pool item"; */
+// How many bignums are in each "pool item";
#define BN_CTX_POOL_SIZE 16
-/* The stack frame info is resizing, set a first-time expansion size; */
+// The stack frame info is resizing, set a first-time expansion size;
#define BN_CTX_START_FRAMES 32
-/* A bundle of bignums that can be linked with other bundles */
+// A bundle of bignums that can be linked with other bundles
typedef struct bignum_pool_item {
- /* The bignum values */
+ // The bignum values
BIGNUM vals[BN_CTX_POOL_SIZE];
- /* Linked-list admin */
+ // Linked-list admin
struct bignum_pool_item *prev, *next;
} BN_POOL_ITEM;
typedef struct bignum_pool {
- /* Linked-list admin */
+ // Linked-list admin
BN_POOL_ITEM *head, *current, *tail;
- /* Stack depth and allocation size */
+ // Stack depth and allocation size
unsigned used, size;
} BN_POOL;
@@ -88,15 +88,14 @@
static BIGNUM *BN_POOL_get(BN_POOL *);
static void BN_POOL_release(BN_POOL *, unsigned int);
-/************/
-/* BN_STACK */
-/************/
-/* A wrapper to manage the "stack frames" */
+// BN_STACK
+
+// A wrapper to manage the "stack frames"
typedef struct bignum_ctx_stack {
- /* Array of indexes into the bignum stack */
+ // Array of indexes into the bignum stack
unsigned int *indexes;
- /* Number of stack frames, and the size of the allocated array */
+ // Number of stack frames, and the size of the allocated array
unsigned int depth, size;
} BN_STACK;
@@ -105,21 +104,20 @@
static int BN_STACK_push(BN_STACK *, unsigned int);
static unsigned int BN_STACK_pop(BN_STACK *);
-/**********/
-/* BN_CTX */
-/**********/
-/* The opaque BN_CTX type */
+// BN_CTX
+
+// The opaque BN_CTX type
struct bignum_ctx {
- /* The bignum bundles */
+ // The bignum bundles
BN_POOL pool;
- /* The "stack frames", if you will */
+ // The "stack frames", if you will
BN_STACK stack;
- /* The number of bignums currently assigned */
+ // The number of bignums currently assigned
unsigned int used;
- /* Depth of stack overflow */
+ // Depth of stack overflow
int err_stack;
- /* Block "gets" until an "end" (compatibility behaviour) */
+ // Block "gets" until an "end" (compatibility behaviour)
int too_many;
};
@@ -130,7 +128,7 @@
return NULL;
}
- /* Initialise the structure */
+ // Initialise the structure
BN_POOL_init(&ret->pool);
BN_STACK_init(&ret->stack);
ret->used = 0;
@@ -150,11 +148,11 @@
}
void BN_CTX_start(BN_CTX *ctx) {
- /* If we're already overflowing ... */
+ // If we're already overflowing ...
if (ctx->err_stack || ctx->too_many) {
ctx->err_stack++;
} else if (!BN_STACK_push(&ctx->stack, ctx->used)) {
- /* (Try to) get a new frame pointer */
+ // (Try to) get a new frame pointer
OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES);
ctx->err_stack++;
}
@@ -168,14 +166,14 @@
ret = BN_POOL_get(&ctx->pool);
if (ret == NULL) {
- /* Setting too_many prevents repeated "get" attempts from
- * cluttering the error stack. */
+ // Setting too_many prevents repeated "get" attempts from
+ // cluttering the error stack.
ctx->too_many = 1;
OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES);
return NULL;
}
- /* OK, make sure the returned bignum is "zero" */
+ // OK, make sure the returned bignum is "zero"
BN_zero(ret);
ctx->used++;
return ret;
@@ -186,20 +184,19 @@
ctx->err_stack--;
} else {
unsigned int fp = BN_STACK_pop(&ctx->stack);
- /* Does this stack frame have anything to release? */
+ // Does this stack frame have anything to release?
if (fp < ctx->used) {
BN_POOL_release(&ctx->pool, ctx->used - fp);
}
ctx->used = fp;
- /* Unjam "too_many" in case "get" had failed */
+ // Unjam "too_many" in case "get" had failed
ctx->too_many = 0;
}
}
-/************/
-/* BN_STACK */
-/************/
+
+// BN_STACK
static void BN_STACK_init(BN_STACK *st) {
st->indexes = NULL;
@@ -212,7 +209,7 @@
static int BN_STACK_push(BN_STACK *st, unsigned int idx) {
if (st->depth == st->size) {
- /* Need to expand */
+ // Need to expand
unsigned int newsize =
(st->size ? (st->size * 3 / 2) : BN_CTX_START_FRAMES);
unsigned int *newitems = OPENSSL_malloc(newsize * sizeof(unsigned int));
@@ -235,6 +232,7 @@
return st->indexes[--(st->depth)];
}
+
static void BN_POOL_init(BN_POOL *p) {
p->head = p->current = p->tail = NULL;
p->used = p->size = 0;
@@ -259,14 +257,14 @@
return NULL;
}
- /* Initialise the structure */
+ // Initialise the structure
for (size_t i = 0; i < BN_CTX_POOL_SIZE; i++) {
BN_init(&item->vals[i]);
}
item->prev = p->tail;
item->next = NULL;
- /* Link it in */
+ // Link it in
if (!p->head) {
p->head = p->current = p->tail = item;
} else {
@@ -277,7 +275,7 @@
p->size += BN_CTX_POOL_SIZE;
p->used++;
- /* Return the first bignum from the new pool */
+ // Return the first bignum from the new pool
return item->vals;
}
diff --git a/crypto/fipsmodule/bn/div.c b/crypto/fipsmodule/bn/div.c
index dae5656..1bcff50 100644
--- a/crypto/fipsmodule/bn/div.c
+++ b/crypto/fipsmodule/bn/div.c
@@ -65,8 +65,8 @@
#if !defined(BN_ULLONG)
-/* bn_div_words divides a double-width |h|,|l| by |d| and returns the result,
- * which must fit in a |BN_ULONG|. */
+// bn_div_words divides a double-width |h|,|l| by |d| and returns the result,
+// which must fit in a |BN_ULONG|.
static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) {
BN_ULONG dh, dl, q, ret = 0, th, tl, t;
int i, count = 2;
@@ -135,26 +135,26 @@
ret |= q;
return ret;
}
-#endif /* !defined(BN_ULLONG) */
+#endif // !defined(BN_ULLONG)
static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out,
BN_ULONG n0, BN_ULONG n1, BN_ULONG d0) {
- /* GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when
- * the |BN_ULLONG|-based C code is used.
- *
- * GCC bugs:
- * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224
- * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721
- * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183
- * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897
- * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668
- *
- * Clang bugs:
- * * https://llvm.org/bugs/show_bug.cgi?id=6397
- * * https://llvm.org/bugs/show_bug.cgi?id=12418
- *
- * These issues aren't specific to x86 and x86_64, so it might be worthwhile
- * to add more assembly language implementations. */
+ // GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when
+ // the |BN_ULLONG|-based C code is used.
+ //
+ // GCC bugs:
+ // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224
+ // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721
+ // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183
+ // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897
+ // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668
+ //
+ // Clang bugs:
+ // * https://llvm.org/bugs/show_bug.cgi?id=6397
+ // * https://llvm.org/bugs/show_bug.cgi?id=12418
+ //
+ // These issues aren't specific to x86 and x86_64, so it might be worthwhile
+ // to add more assembly language implementations.
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__GNUC__)
__asm__ volatile (
"divl %4"
@@ -178,17 +178,17 @@
#endif
}
-/* BN_div computes dv := num / divisor, rounding towards
- * zero, and sets up rm such that dv*divisor + rm = num holds.
- * Thus:
- * dv->neg == num->neg ^ divisor->neg (unless the result is zero)
- * rm->neg == num->neg (unless the remainder is zero)
- * If 'dv' or 'rm' is NULL, the respective value is not returned.
- *
- * This was specifically designed to contain fewer branches that may leak
- * sensitive information; see "New Branch Prediction Vulnerabilities in OpenSSL
- * and Necessary Software Countermeasures" by Onur Acıçmez, Shay Gueron, and
- * Jean-Pierre Seifert. */
+// BN_div computes dv := num / divisor, rounding towards
+// zero, and sets up rm such that dv*divisor + rm = num holds.
+// Thus:
+// dv->neg == num->neg ^ divisor->neg (unless the result is zero)
+// rm->neg == num->neg (unless the remainder is zero)
+// If 'dv' or 'rm' is NULL, the respective value is not returned.
+//
+// This was specifically designed to contain fewer branches that may leak
+// sensitive information; see "New Branch Prediction Vulnerabilities in OpenSSL
+// and Necessary Software Countermeasures" by Onur Acıçmez, Shay Gueron, and
+// Jean-Pierre Seifert.
int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
BN_CTX *ctx) {
int norm_shift, i, loop;
@@ -197,8 +197,8 @@
BN_ULONG d0, d1;
int num_n, div_n;
- /* Invalid zero-padding would have particularly bad consequences
- * so don't just rely on bn_check_top() here */
+ // Invalid zero-padding would have particularly bad consequences
+ // so don't just rely on bn_check_top() here
if ((num->top > 0 && num->d[num->top - 1] == 0) ||
(divisor->top > 0 && divisor->d[divisor->top - 1] == 0)) {
OPENSSL_PUT_ERROR(BN, BN_R_NOT_INITIALIZED);
@@ -223,7 +223,7 @@
goto err;
}
- /* First we normalise the numbers */
+ // First we normalise the numbers
norm_shift = BN_BITS2 - ((BN_num_bits(divisor)) % BN_BITS2);
if (!(BN_lshift(sdiv, divisor, norm_shift))) {
goto err;
@@ -235,9 +235,9 @@
}
snum->neg = 0;
- /* Since we don't want to have special-case logic for the case where snum is
- * larger than sdiv, we pad snum with enough zeroes without changing its
- * value. */
+ // Since we don't want to have special-case logic for the case where snum is
+ // larger than sdiv, we pad snum with enough zeroes without changing its
+ // value.
if (snum->top <= sdiv->top + 1) {
if (!bn_wexpand(snum, sdiv->top + 2)) {
goto err;
@@ -257,24 +257,24 @@
div_n = sdiv->top;
num_n = snum->top;
loop = num_n - div_n;
- /* Lets setup a 'window' into snum
- * This is the part that corresponds to the current
- * 'area' being divided */
+ // Lets setup a 'window' into snum
+ // This is the part that corresponds to the current
+ // 'area' being divided
wnum.neg = 0;
wnum.d = &(snum->d[loop]);
wnum.top = div_n;
- /* only needed when BN_ucmp messes up the values between top and max */
- wnum.dmax = snum->dmax - loop; /* so we don't step out of bounds */
+ // only needed when BN_ucmp messes up the values between top and max
+ wnum.dmax = snum->dmax - loop; // so we don't step out of bounds
- /* Get the top 2 words of sdiv */
- /* div_n=sdiv->top; */
+ // Get the top 2 words of sdiv
+ // div_n=sdiv->top;
d0 = sdiv->d[div_n - 1];
d1 = (div_n == 1) ? 0 : sdiv->d[div_n - 2];
- /* pointer to the 'top' of snum */
+ // pointer to the 'top' of snum
wnump = &(snum->d[num_n - 1]);
- /* Setup to 'res' */
+ // Setup to 'res'
res->neg = (num->neg ^ divisor->neg);
if (!bn_wexpand(res, (loop + 1))) {
goto err;
@@ -282,13 +282,13 @@
res->top = loop - 1;
resp = &(res->d[loop - 1]);
- /* space for temp */
+ // space for temp
if (!bn_wexpand(tmp, (div_n + 1))) {
goto err;
}
- /* if res->top == 0 then clear the neg value otherwise decrease
- * the resp pointer */
+ // if res->top == 0 then clear the neg value otherwise decrease
+ // the resp pointer
if (res->top == 0) {
res->neg = 0;
} else {
@@ -297,8 +297,8 @@
for (i = 0; i < loop - 1; i++, wnump--, resp--) {
BN_ULONG q, l0;
- /* the first part of the loop uses the top two words of snum and sdiv to
- * calculate a BN_ULONG q such that | wnum - sdiv * q | < sdiv */
+ // the first part of the loop uses the top two words of snum and sdiv to
+ // calculate a BN_ULONG q such that | wnum - sdiv * q | < sdiv
BN_ULONG n0, n1, rem = 0;
n0 = wnump[0];
@@ -306,7 +306,7 @@
if (n0 == d0) {
q = BN_MASK2;
} else {
- /* n0 < d0 */
+ // n0 < d0
bn_div_rem_words(&q, &rem, n0, n1, d0);
#ifdef BN_ULLONG
@@ -318,11 +318,11 @@
q--;
rem += d0;
if (rem < d0) {
- break; /* don't let rem overflow */
+ break; // don't let rem overflow
}
t2 -= d1;
}
-#else /* !BN_ULLONG */
+#else // !BN_ULLONG
BN_ULONG t2l, t2h;
BN_UMULT_LOHI(t2l, t2h, d1, q);
for (;;) {
@@ -332,43 +332,41 @@
q--;
rem += d0;
if (rem < d0) {
- break; /* don't let rem overflow */
+ break; // don't let rem overflow
}
if (t2l < d1) {
t2h--;
}
t2l -= d1;
}
-#endif /* !BN_ULLONG */
+#endif // !BN_ULLONG
}
l0 = bn_mul_words(tmp->d, sdiv->d, div_n, q);
tmp->d[div_n] = l0;
wnum.d--;
- /* ingore top values of the bignums just sub the two
- * BN_ULONG arrays with bn_sub_words */
+ // ingore top values of the bignums just sub the two
+ // BN_ULONG arrays with bn_sub_words
if (bn_sub_words(wnum.d, wnum.d, tmp->d, div_n + 1)) {
- /* Note: As we have considered only the leading
- * two BN_ULONGs in the calculation of q, sdiv * q
- * might be greater than wnum (but then (q-1) * sdiv
- * is less or equal than wnum)
- */
+ // Note: As we have considered only the leading
+ // two BN_ULONGs in the calculation of q, sdiv * q
+ // might be greater than wnum (but then (q-1) * sdiv
+ // is less or equal than wnum)
q--;
if (bn_add_words(wnum.d, wnum.d, sdiv->d, div_n)) {
- /* we can't have an overflow here (assuming
- * that q != 0, but if q == 0 then tmp is
- * zero anyway) */
+ // we can't have an overflow here (assuming
+ // that q != 0, but if q == 0 then tmp is
+ // zero anyway)
(*wnump)++;
}
}
- /* store part of the result */
+ // store part of the result
*resp = q;
}
bn_correct_top(snum);
if (rm != NULL) {
- /* Keep a copy of the neg flag in num because if rm==num
- * BN_rshift() will overwrite it.
- */
+ // Keep a copy of the neg flag in num because if rm==num
+ // BN_rshift() will overwrite it.
int neg = num->neg;
if (!BN_rshift(rm, snum, norm_shift)) {
goto err;
@@ -394,7 +392,7 @@
return 1;
}
- /* now -|d| < r < 0, so we have to set r := r + |d|. */
+ // now -|d| < r < 0, so we have to set r := r + |d|.
return (d->neg ? BN_sub : BN_add)(r, r, d);
}
@@ -425,8 +423,8 @@
return BN_nnmod(r, r, m, ctx);
}
-/* BN_mod_sub variant that may be used if both a and b are non-negative
- * and less than m */
+// BN_mod_sub variant that may be used if both a and b are non-negative
+// and less than m
int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
const BIGNUM *m) {
if (!BN_sub(r, a, b)) {
@@ -475,7 +473,7 @@
return 0;
}
- /* r->neg == 0, thus we don't need BN_nnmod */
+ // r->neg == 0, thus we don't need BN_nnmod
return BN_mod(r, r, m, ctx);
}
@@ -512,9 +510,9 @@
while (n > 0) {
int max_shift;
- /* 0 < r < m */
+ // 0 < r < m
max_shift = BN_num_bits(m) - BN_num_bits(r);
- /* max_shift >= 0 */
+ // max_shift >= 0
if (max_shift < 0) {
OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED);
@@ -537,7 +535,7 @@
--n;
}
- /* BN_num_bits(r) <= BN_num_bits(m) */
+ // BN_num_bits(r) <= BN_num_bits(m)
if (BN_cmp(r, m) >= 0) {
if (!BN_sub(r, r, m)) {
return 0;
@@ -574,7 +572,7 @@
w &= BN_MASK2;
if (!w) {
- /* actually this an error (division by zero) */
+ // actually this an error (division by zero)
return (BN_ULONG) - 1;
}
@@ -582,7 +580,7 @@
return 0;
}
- /* normalize input for |bn_div_rem_words|. */
+ // normalize input for |bn_div_rem_words|.
j = BN_BITS2 - BN_num_bits_word(w);
w <<= j;
if (!BN_lshift(a, a, j)) {
@@ -623,8 +621,8 @@
}
#ifndef BN_ULLONG
- /* If |w| is too long and we don't have |BN_ULLONG| then we need to fall back
- * to using |BN_div_word|. */
+ // If |w| is too long and we don't have |BN_ULLONG| then we need to fall back
+ // to using |BN_div_word|.
if (w > ((BN_ULONG)1 << BN_BITS4)) {
BIGNUM *tmp = BN_dup(a);
if (tmp == NULL) {
@@ -656,27 +654,27 @@
size_t num_words = 1 + ((e - 1) / BN_BITS2);
- /* If |a| definitely has less than |e| bits, just BN_copy. */
+ // If |a| definitely has less than |e| bits, just BN_copy.
if ((size_t) a->top < num_words) {
return BN_copy(r, a) != NULL;
}
- /* Otherwise, first make sure we have enough space in |r|.
- * Note that this will fail if num_words > INT_MAX. */
+ // Otherwise, first make sure we have enough space in |r|.
+ // Note that this will fail if num_words > INT_MAX.
if (!bn_wexpand(r, num_words)) {
return 0;
}
- /* Copy the content of |a| into |r|. */
+ // Copy the content of |a| into |r|.
OPENSSL_memcpy(r->d, a->d, num_words * sizeof(BN_ULONG));
- /* If |e| isn't word-aligned, we have to mask off some of our bits. */
+ // If |e| isn't word-aligned, we have to mask off some of our bits.
size_t top_word_exponent = e % (sizeof(BN_ULONG) * 8);
if (top_word_exponent != 0) {
r->d[num_words - 1] &= (((BN_ULONG) 1) << top_word_exponent) - 1;
}
- /* Fill in the remaining fields of |r|. */
+ // Fill in the remaining fields of |r|.
r->neg = a->neg;
r->top = (int) num_words;
bn_correct_top(r);
@@ -688,41 +686,41 @@
return 0;
}
- /* If the returned value was non-negative, we're done. */
+ // If the returned value was non-negative, we're done.
if (BN_is_zero(r) || !r->neg) {
return 1;
}
size_t num_words = 1 + (e - 1) / BN_BITS2;
- /* Expand |r| to the size of our modulus. */
+ // Expand |r| to the size of our modulus.
if (!bn_wexpand(r, num_words)) {
return 0;
}
- /* Clear the upper words of |r|. */
+ // Clear the upper words of |r|.
OPENSSL_memset(&r->d[r->top], 0, (num_words - r->top) * BN_BYTES);
- /* Set parameters of |r|. */
+ // Set parameters of |r|.
r->neg = 0;
r->top = (int) num_words;
- /* Now, invert every word. The idea here is that we want to compute 2^e-|x|,
- * which is actually equivalent to the twos-complement representation of |x|
- * in |e| bits, which is -x = ~x + 1. */
+ // Now, invert every word. The idea here is that we want to compute 2^e-|x|,
+ // which is actually equivalent to the twos-complement representation of |x|
+ // in |e| bits, which is -x = ~x + 1.
for (int i = 0; i < r->top; i++) {
r->d[i] = ~r->d[i];
}
- /* If our exponent doesn't span the top word, we have to mask the rest. */
+ // If our exponent doesn't span the top word, we have to mask the rest.
size_t top_word_exponent = e % BN_BITS2;
if (top_word_exponent != 0) {
r->d[r->top - 1] &= (((BN_ULONG) 1) << top_word_exponent) - 1;
}
- /* Keep the correct_top invariant for BN_add. */
+ // Keep the correct_top invariant for BN_add.
bn_correct_top(r);
- /* Finally, add one, for the reason described above. */
+ // Finally, add one, for the reason described above.
return BN_add(r, r, BN_value_one());
}
diff --git a/crypto/fipsmodule/bn/exponentiation.c b/crypto/fipsmodule/bn/exponentiation.c
index 187b845..ae78ff9 100644
--- a/crypto/fipsmodule/bn/exponentiation.c
+++ b/crypto/fipsmodule/bn/exponentiation.c
@@ -188,12 +188,12 @@
return ret;
}
-/* maximum precomputation table size for *variable* sliding windows */
+// maximum precomputation table size for *variable* sliding windows
#define TABLE_SIZE 32
typedef struct bn_recp_ctx_st {
- BIGNUM N; /* the divisor */
- BIGNUM Nr; /* the reciprocal */
+ BIGNUM N; // the divisor
+ BIGNUM Nr; // the reciprocal
int num_bits;
int shift;
int flags;
@@ -227,10 +227,10 @@
return 1;
}
-/* len is the expected size of the result We actually calculate with an extra
- * word of precision, so we can do faster division if the remainder is not
- * required.
- * r := 2^len / m */
+// len is the expected size of the result We actually calculate with an extra
+// word of precision, so we can do faster division if the remainder is not
+// required.
+// r := 2^len / m
static int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx) {
int ret = -1;
BIGNUM *t;
@@ -289,34 +289,34 @@
return 1;
}
- /* We want the remainder
- * Given input of ABCDEF / ab
- * we need multiply ABCDEF by 3 digests of the reciprocal of ab */
+ // We want the remainder
+ // Given input of ABCDEF / ab
+ // we need multiply ABCDEF by 3 digests of the reciprocal of ab
- /* i := max(BN_num_bits(m), 2*BN_num_bits(N)) */
+ // i := max(BN_num_bits(m), 2*BN_num_bits(N))
i = BN_num_bits(m);
j = recp->num_bits << 1;
if (j > i) {
i = j;
}
- /* Nr := round(2^i / N) */
+ // Nr := round(2^i / N)
if (i != recp->shift) {
recp->shift =
BN_reciprocal(&(recp->Nr), &(recp->N), i,
- ctx); /* BN_reciprocal returns i, or -1 for an error */
+ ctx); // BN_reciprocal returns i, or -1 for an error
}
if (recp->shift == -1) {
goto err;
}
- /* d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i -
- * BN_num_bits(N)))|
- * = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i -
- * BN_num_bits(N)))|
- * <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)|
- * = |m/N| */
+ // d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i -
+ // BN_num_bits(N)))|
+ // = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i -
+ // BN_num_bits(N)))|
+ // <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)|
+ // = |m/N|
if (!BN_rshift(a, m, recp->num_bits)) {
goto err;
}
@@ -383,7 +383,7 @@
}
ca = a;
} else {
- ca = x; /* Just do the mod */
+ ca = x; // Just do the mod
}
ret = BN_div_recp(NULL, r, ca, recp, ctx);
@@ -393,29 +393,29 @@
return ret;
}
-/* BN_window_bits_for_exponent_size -- macro for sliding window mod_exp
- * functions
- *
- * For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of
- * multiplications is a constant plus on average
- *
- * 2^(w-1) + (b-w)/(w+1);
- *
- * here 2^(w-1) is for precomputing the table (we actually need entries only
- * for windows that have the lowest bit set), and (b-w)/(w+1) is an
- * approximation for the expected number of w-bit windows, not counting the
- * first one.
- *
- * Thus we should use
- *
- * w >= 6 if b > 671
- * w = 5 if 671 > b > 239
- * w = 4 if 239 > b > 79
- * w = 3 if 79 > b > 23
- * w <= 2 if 23 > b
- *
- * (with draws in between). Very small exponents are often selected
- * with low Hamming weight, so we use w = 1 for b <= 23. */
+// BN_window_bits_for_exponent_size -- macro for sliding window mod_exp
+// functions
+//
+// For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of
+// multiplications is a constant plus on average
+//
+// 2^(w-1) + (b-w)/(w+1);
+//
+// here 2^(w-1) is for precomputing the table (we actually need entries only
+// for windows that have the lowest bit set), and (b-w)/(w+1) is an
+// approximation for the expected number of w-bit windows, not counting the
+// first one.
+//
+// Thus we should use
+//
+// w >= 6 if b > 671
+// w = 5 if 671 > b > 239
+// w = 4 if 239 > b > 79
+// w = 3 if 79 > b > 23
+// w <= 2 if 23 > b
+//
+// (with draws in between). Very small exponents are often selected
+// with low Hamming weight, so we use w = 1 for b <= 23.
#define BN_window_bits_for_exponent_size(b) \
((b) > 671 ? 6 : \
(b) > 239 ? 5 : \
@@ -427,14 +427,14 @@
int i, j, bits, ret = 0, wstart, window;
int start = 1;
BIGNUM *aa;
- /* Table of variables obtained from 'ctx' */
+ // Table of variables obtained from 'ctx'
BIGNUM *val[TABLE_SIZE];
BN_RECP_CTX recp;
bits = BN_num_bits(p);
if (bits == 0) {
- /* x**0 mod 1 is still zero. */
+ // x**0 mod 1 is still zero.
if (BN_is_one(m)) {
BN_zero(r);
return 1;
@@ -451,7 +451,7 @@
BN_RECP_CTX_init(&recp);
if (m->neg) {
- /* ignore sign of 'm' */
+ // ignore sign of 'm'
if (!BN_copy(aa, m)) {
goto err;
}
@@ -466,7 +466,7 @@
}
if (!BN_nnmod(val[0], a, m, ctx)) {
- goto err; /* 1 */
+ goto err; // 1
}
if (BN_is_zero(val[0])) {
BN_zero(r);
@@ -477,7 +477,7 @@
window = BN_window_bits_for_exponent_size(bits);
if (window > 1) {
if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx)) {
- goto err; /* 2 */
+ goto err; // 2
}
j = 1 << (window - 1);
for (i = 1; i < j; i++) {
@@ -488,18 +488,18 @@
}
}
- start = 1; /* This is used to avoid multiplication etc
- * when there is only the value '1' in the
- * buffer. */
- wstart = bits - 1; /* The top bit of the window */
+ start = 1; // This is used to avoid multiplication etc
+ // when there is only the value '1' in the
+ // buffer.
+ wstart = bits - 1; // The top bit of the window
if (!BN_one(r)) {
goto err;
}
for (;;) {
- int wvalue; /* The 'value' of the window */
- int wend; /* The bottom bit of the window */
+ int wvalue; // The 'value' of the window
+ int wend; // The bottom bit of the window
if (BN_is_bit_set(p, wstart) == 0) {
if (!start) {
@@ -514,10 +514,10 @@
continue;
}
- /* We now have wstart on a 'set' bit, we now need to work out
- * how bit a window to do. To do this we need to scan
- * forward until the last set bit before the end of the
- * window */
+ // We now have wstart on a 'set' bit, we now need to work out
+ // how bit a window to do. To do this we need to scan
+ // forward until the last set bit before the end of the
+ // window
wvalue = 1;
wend = 0;
for (i = 1; i < window; i++) {
@@ -531,9 +531,9 @@
}
}
- /* wend is the size of the current window */
+ // wend is the size of the current window
j = wend + 1;
- /* add the 'bytes above' */
+ // add the 'bytes above'
if (!start) {
for (i = 0; i < j; i++) {
if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) {
@@ -542,12 +542,12 @@
}
}
- /* wvalue will be an odd number < 2^window */
+ // wvalue will be an odd number < 2^window
if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx)) {
goto err;
}
- /* move the 'window' down further */
+ // move the 'window' down further
wstart -= wend + 1;
start = 0;
if (wstart < 0) {
@@ -577,7 +577,7 @@
int start = 1;
BIGNUM *d, *r;
const BIGNUM *aa;
- /* Table of variables obtained from 'ctx' */
+ // Table of variables obtained from 'ctx'
BIGNUM *val[TABLE_SIZE];
BN_MONT_CTX *new_mont = NULL;
@@ -587,7 +587,7 @@
}
bits = BN_num_bits(p);
if (bits == 0) {
- /* x**0 mod 1 is still zero. */
+ // x**0 mod 1 is still zero.
if (BN_is_one(m)) {
BN_zero(rr);
return 1;
@@ -603,7 +603,7 @@
goto err;
}
- /* Allocate a montgomery context if it was not supplied by the caller. */
+ // Allocate a montgomery context if it was not supplied by the caller.
if (mont == NULL) {
new_mont = BN_MONT_CTX_new();
if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) {
@@ -627,13 +627,13 @@
goto err;
}
if (!BN_to_montgomery(val[0], aa, mont, ctx)) {
- goto err; /* 1 */
+ goto err; // 1
}
window = BN_window_bits_for_exponent_size(bits);
if (window > 1) {
if (!BN_mod_mul_montgomery(d, val[0], val[0], mont, ctx)) {
- goto err; /* 2 */
+ goto err; // 2
}
j = 1 << (window - 1);
for (i = 1; i < j; i++) {
@@ -644,32 +644,32 @@
}
}
- start = 1; /* This is used to avoid multiplication etc
- * when there is only the value '1' in the
- * buffer. */
- wstart = bits - 1; /* The top bit of the window */
+ start = 1; // This is used to avoid multiplication etc
+ // when there is only the value '1' in the
+ // buffer.
+ wstart = bits - 1; // The top bit of the window
- j = m->top; /* borrow j */
+ j = m->top; // borrow j
if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
if (!bn_wexpand(r, j)) {
goto err;
}
- /* 2^(top*BN_BITS2) - m */
+ // 2^(top*BN_BITS2) - m
r->d[0] = (0 - m->d[0]) & BN_MASK2;
for (i = 1; i < j; i++) {
r->d[i] = (~m->d[i]) & BN_MASK2;
}
r->top = j;
- /* Upper words will be zero if the corresponding words of 'm'
- * were 0xfff[...], so decrement r->top accordingly. */
+ // Upper words will be zero if the corresponding words of 'm'
+ // were 0xfff[...], so decrement r->top accordingly.
bn_correct_top(r);
} else if (!BN_to_montgomery(r, BN_value_one(), mont, ctx)) {
goto err;
}
for (;;) {
- int wvalue; /* The 'value' of the window */
- int wend; /* The bottom bit of the window */
+ int wvalue; // The 'value' of the window
+ int wend; // The bottom bit of the window
if (BN_is_bit_set(p, wstart) == 0) {
if (!start && !BN_mod_mul_montgomery(r, r, r, mont, ctx)) {
@@ -682,9 +682,9 @@
continue;
}
- /* We now have wstart on a 'set' bit, we now need to work out how bit a
- * window to do. To do this we need to scan forward until the last set bit
- * before the end of the window */
+ // We now have wstart on a 'set' bit, we now need to work out how bit a
+ // window to do. To do this we need to scan forward until the last set bit
+ // before the end of the window
wvalue = 1;
wend = 0;
for (i = 1; i < window; i++) {
@@ -698,9 +698,9 @@
}
}
- /* wend is the size of the current window */
+ // wend is the size of the current window
j = wend + 1;
- /* add the 'bytes above' */
+ // add the 'bytes above'
if (!start) {
for (i = 0; i < j; i++) {
if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) {
@@ -709,12 +709,12 @@
}
}
- /* wvalue will be an odd number < 2^window */
+ // wvalue will be an odd number < 2^window
if (!BN_mod_mul_montgomery(r, r, val[wvalue >> 1], mont, ctx)) {
goto err;
}
- /* move the 'window' down further */
+ // move the 'window' down further
wstart -= wend + 1;
start = 0;
if (wstart < 0) {
@@ -733,10 +733,10 @@
return ret;
}
-/* BN_mod_exp_mont_consttime() stores the precomputed powers in a specific
- * layout so that accessing any of these table values shows the same access
- * pattern as far as cache lines are concerned. The following functions are
- * used to transfer a BIGNUM from/to that table. */
+// BN_mod_exp_mont_consttime() stores the precomputed powers in a specific
+// layout so that accessing any of these table values shows the same access
+// pattern as far as cache lines are concerned. The following functions are
+// used to transfer a BIGNUM from/to that table.
static int copy_to_prebuf(const BIGNUM *b, int top, unsigned char *buf, int idx,
int window) {
int i, j;
@@ -744,7 +744,7 @@
BN_ULONG *table = (BN_ULONG *) buf;
if (top > b->top) {
- top = b->top; /* this works because 'buf' is explicitly zeroed */
+ top = b->top; // this works because 'buf' is explicitly zeroed
}
for (i = 0, j = idx; i < top; i++, j += width) {
@@ -778,8 +778,8 @@
int xstride = 1 << (window - 2);
BN_ULONG y0, y1, y2, y3;
- i = idx >> (window - 2); /* equivalent of idx / xstride */
- idx &= xstride - 1; /* equivalent of idx % xstride */
+ i = idx >> (window - 2); // equivalent of idx / xstride
+ idx &= xstride - 1; // equivalent of idx % xstride
y0 = (BN_ULONG)0 - (constant_time_eq_int(i, 0) & 1);
y1 = (BN_ULONG)0 - (constant_time_eq_int(i, 1) & 1);
@@ -804,23 +804,23 @@
return 1;
}
-/* BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache
- * line width of the target processor is at least the following value. */
+// BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache
+// line width of the target processor is at least the following value.
#define MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH (64)
#define MOD_EXP_CTIME_MIN_CACHE_LINE_MASK \
(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - 1)
-/* Window sizes optimized for fixed window size modular exponentiation
- * algorithm (BN_mod_exp_mont_consttime).
- *
- * To achieve the security goals of BN_mode_exp_mont_consttime, the maximum
- * size of the window must not exceed
- * log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH).
- *
- * Window size thresholds are defined for cache line sizes of 32 and 64, cache
- * line sizes where log_2(32)=5 and log_2(64)=6 respectively. A window size of
- * 7 should only be used on processors that have a 128 byte or greater cache
- * line size. */
+// Window sizes optimized for fixed window size modular exponentiation
+// algorithm (BN_mod_exp_mont_consttime).
+//
+// To achieve the security goals of BN_mode_exp_mont_consttime, the maximum
+// size of the window must not exceed
+// log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH).
+//
+// Window size thresholds are defined for cache line sizes of 32 and 64, cache
+// line sizes where log_2(32)=5 and log_2(64)=6 respectively. A window size of
+// 7 should only be used on processors that have a 128 byte or greater cache
+// line size.
#if MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 64
#define BN_window_bits_for_ctime_exponent_size(b) \
@@ -835,19 +835,18 @@
#endif
-/* Given a pointer value, compute the next address that is a cache line
- * multiple. */
+// Given a pointer value, compute the next address that is a cache line
+// multiple.
#define MOD_EXP_CTIME_ALIGN(x_) \
((unsigned char *)(x_) + \
(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - \
(((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
-/* This variant of BN_mod_exp_mont() uses fixed windows and the special
- * precomputation memory layout to limit data-dependency to a minimum
- * to protect secret exponents (cf. the hyper-threading timing attacks
- * pointed out by Colin Percival,
- * http://www.daemonology.net/hyperthreading-considered-harmful/)
- */
+// This variant of BN_mod_exp_mont() uses fixed windows and the special
+// precomputation memory layout to limit data-dependency to a minimum
+// to protect secret exponents (cf. the hyper-threading timing attacks
+// pointed out by Colin Percival,
+// http://www.daemonology.net/hyperthreading-considered-harmful/)
int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx,
const BN_MONT_CTX *mont) {
@@ -871,7 +870,7 @@
bits = BN_num_bits(p);
if (bits == 0) {
- /* x**0 mod 1 is still zero. */
+ // x**0 mod 1 is still zero.
if (BN_is_one(m)) {
BN_zero(rr);
return 1;
@@ -879,7 +878,7 @@
return BN_one(rr);
}
- /* Allocate a montgomery context if it was not supplied by the caller. */
+ // Allocate a montgomery context if it was not supplied by the caller.
if (mont == NULL) {
new_mont = BN_MONT_CTX_new();
if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) {
@@ -898,9 +897,9 @@
}
#ifdef RSAZ_ENABLED
- /* If the size of the operands allow it, perform the optimized
- * RSAZ exponentiation. For further information see
- * crypto/bn/rsaz_exp.c and accompanying assembly modules. */
+ // If the size of the operands allow it, perform the optimized
+ // RSAZ exponentiation. For further information see
+ // crypto/bn/rsaz_exp.c and accompanying assembly modules.
if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) &&
rsaz_avx2_eligible()) {
if (!bn_wexpand(rr, 16)) {
@@ -915,19 +914,18 @@
}
#endif
- /* Get the window size to use with size of p. */
+ // Get the window size to use with size of p.
window = BN_window_bits_for_ctime_exponent_size(bits);
#if defined(OPENSSL_BN_ASM_MONT5)
if (window >= 5) {
- window = 5; /* ~5% improvement for RSA2048 sign, and even for RSA4096 */
- /* reserve space for mont->N.d[] copy */
+ window = 5; // ~5% improvement for RSA2048 sign, and even for RSA4096
+ // reserve space for mont->N.d[] copy
powerbufLen += top * sizeof(mont->N.d[0]);
}
#endif
- /* Allocate a buffer large enough to hold all of the pre-computed
- * powers of am, am itself and tmp.
- */
+ // Allocate a buffer large enough to hold all of the pre-computed
+ // powers of am, am itself and tmp.
numPowers = 1 << window;
powerbufLen +=
sizeof(m->d[0]) *
@@ -953,7 +951,7 @@
}
#endif
- /* lay down tmp and am right after powers table */
+ // lay down tmp and am right after powers table
tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers);
am.d = tmp.d + top;
tmp.top = am.top = 0;
@@ -961,10 +959,10 @@
tmp.neg = am.neg = 0;
tmp.flags = am.flags = BN_FLG_STATIC_DATA;
-/* prepare a^0 in Montgomery domain */
-/* by Shay Gueron's suggestion */
+// prepare a^0 in Montgomery domain
+// by Shay Gueron's suggestion
if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) {
- /* 2^(top*BN_BITS2) - m */
+ // 2^(top*BN_BITS2) - m
tmp.d[0] = (0 - m->d[0]) & BN_MASK2;
for (i = 1; i < top; i++) {
tmp.d[i] = (~m->d[i]) & BN_MASK2;
@@ -974,7 +972,7 @@
goto err;
}
- /* prepare a^1 in Montgomery domain */
+ // prepare a^1 in Montgomery domain
assert(!a->neg);
assert(BN_ucmp(a, m) < 0);
if (!BN_to_montgomery(&am, a, mont, ctx)) {
@@ -982,18 +980,18 @@
}
#if defined(OPENSSL_BN_ASM_MONT5)
- /* This optimization uses ideas from http://eprint.iacr.org/2011/239,
- * specifically optimization of cache-timing attack countermeasures
- * and pre-computation optimization. */
+ // This optimization uses ideas from http://eprint.iacr.org/2011/239,
+ // specifically optimization of cache-timing attack countermeasures
+ // and pre-computation optimization.
- /* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
- * 512-bit RSA is hardly relevant, we omit it to spare size... */
+ // Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
+ // 512-bit RSA is hardly relevant, we omit it to spare size...
if (window == 5 && top > 1) {
const BN_ULONG *n0 = mont->n0;
BN_ULONG *np;
- /* BN_to_montgomery can contaminate words above .top
- * [in BN_DEBUG[_DEBUG] build]... */
+ // BN_to_montgomery can contaminate words above .top
+ // [in BN_DEBUG[_DEBUG] build]...
for (i = am.top; i < top; i++) {
am.d[i] = 0;
}
@@ -1001,7 +999,7 @@
tmp.d[i] = 0;
}
- /* copy mont->N.d[] to improve cache locality */
+ // copy mont->N.d[] to improve cache locality
for (np = am.d + top, i = 0; i < top; i++) {
np[i] = mont->N.d[i];
}
@@ -1011,7 +1009,7 @@
bn_mul_mont(tmp.d, am.d, am.d, np, n0, top);
bn_scatter5(tmp.d, top, powerbuf, 2);
- /* same as above, but uses squaring for 1/2 of operations */
+ // same as above, but uses squaring for 1/2 of operations
for (i = 4; i < 32; i *= 2) {
bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top);
bn_scatter5(tmp.d, top, powerbuf, i);
@@ -1042,13 +1040,12 @@
}
bn_gather5(tmp.d, top, powerbuf, wvalue);
- /* At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit
- * that has not been read yet.) */
+ // At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit
+ // that has not been read yet.)
assert(bits >= -1 && (bits == -1 || bits % 5 == 4));
- /* Scan the exponent one window at a time starting from the most
- * significant bits.
- */
+ // Scan the exponent one window at a time starting from the most
+ // significant bits.
if (top & 7) {
while (bits >= 0) {
for (wvalue = 0, i = 0; i < 5; i++, bits--) {
@@ -1066,16 +1063,16 @@
const uint8_t *p_bytes = (const uint8_t *)p->d;
int max_bits = p->top * BN_BITS2;
assert(bits < max_bits);
- /* |p = 0| has been handled as a special case, so |max_bits| is at least
- * one word. */
+ // |p = 0| has been handled as a special case, so |max_bits| is at least
+ // one word.
assert(max_bits >= 64);
- /* If the first bit to be read lands in the last byte, unroll the first
- * iteration to avoid reading past the bounds of |p->d|. (After the first
- * iteration, we are guaranteed to be past the last byte.) Note |bits|
- * here is the top bit, inclusive. */
+ // If the first bit to be read lands in the last byte, unroll the first
+ // iteration to avoid reading past the bounds of |p->d|. (After the first
+ // iteration, we are guaranteed to be past the last byte.) Note |bits|
+ // here is the top bit, inclusive.
if (bits - 4 >= max_bits - 8) {
- /* Read five bits from |bits-4| through |bits|, inclusive. */
+ // Read five bits from |bits-4| through |bits|, inclusive.
wvalue = p_bytes[p->top * BN_BYTES - 1];
wvalue >>= (bits - 4) & 7;
wvalue &= 0x1f;
@@ -1083,7 +1080,7 @@
bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue);
}
while (bits >= 0) {
- /* Read five bits from |bits-4| through |bits|, inclusive. */
+ // Read five bits from |bits-4| through |bits|, inclusive.
int first_bit = bits - 4;
uint16_t val;
OPENSSL_memcpy(&val, p_bytes + (first_bit >> 3), sizeof(val));
@@ -1101,7 +1098,7 @@
if (!BN_copy(rr, &tmp)) {
ret = 0;
}
- goto err; /* non-zero ret means it's not error */
+ goto err; // non-zero ret means it's not error
}
} else
#endif
@@ -1111,18 +1108,17 @@
goto err;
}
- /* If the window size is greater than 1, then calculate
- * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1)
- * (even powers could instead be computed as (a^(i/2))^2
- * to use the slight performance advantage of sqr over mul).
- */
+ // If the window size is greater than 1, then calculate
+ // val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1)
+ // (even powers could instead be computed as (a^(i/2))^2
+ // to use the slight performance advantage of sqr over mul).
if (window > 1) {
if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx) ||
!copy_to_prebuf(&tmp, top, powerbuf, 2, window)) {
goto err;
}
for (i = 3; i < numPowers; i++) {
- /* Calculate a^i = a^(i-1) * a */
+ // Calculate a^i = a^(i-1) * a
if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx) ||
!copy_to_prebuf(&tmp, top, powerbuf, i, window)) {
goto err;
@@ -1138,13 +1134,12 @@
goto err;
}
- /* Scan the exponent one window at a time starting from the most
- * significant bits.
- */
+ // Scan the exponent one window at a time starting from the most
+ // significant bits.
while (bits >= 0) {
- wvalue = 0; /* The 'value' of the window */
+ wvalue = 0; // The 'value' of the window
- /* Scan the window, squaring the result as we go */
+ // Scan the window, squaring the result as we go
for (i = 0; i < window; i++, bits--) {
if (!BN_mod_mul_montgomery(&tmp, &tmp, &tmp, mont, ctx)) {
goto err;
@@ -1152,19 +1147,19 @@
wvalue = (wvalue << 1) + BN_is_bit_set(p, bits);
}
- /* Fetch the appropriate pre-computed value from the pre-buf */
+ // Fetch the appropriate pre-computed value from the pre-buf
if (!copy_from_prebuf(&am, top, powerbuf, wvalue, window)) {
goto err;
}
- /* Multiply the result into the intermediate result */
+ // Multiply the result into the intermediate result
if (!BN_mod_mul_montgomery(&tmp, &tmp, &am, mont, ctx)) {
goto err;
}
}
}
- /* Convert the final result from montgomery to standard format */
+ // Convert the final result from montgomery to standard format
if (!BN_from_montgomery(rr, &tmp, mont, ctx)) {
goto err;
}
@@ -1212,7 +1207,7 @@
int ret = 0;
BN_MONT_CTX *new_mont = NULL;
- /* Allocate a montgomery context if it was not supplied by the caller. */
+ // Allocate a montgomery context if it was not supplied by the caller.
if (mont == NULL) {
new_mont = BN_MONT_CTX_new();
if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) {
@@ -1221,9 +1216,9 @@
mont = new_mont;
}
- /* BN_mod_mul_montgomery removes one Montgomery factor, so passing one
- * Montgomery-encoded and one non-Montgomery-encoded value gives a
- * non-Montgomery-encoded result. */
+ // BN_mod_mul_montgomery removes one Montgomery factor, so passing one
+ // Montgomery-encoded and one non-Montgomery-encoded value gives a
+ // non-Montgomery-encoded result.
if (!BN_mod_exp_mont(rr, a1, p1, m, ctx, mont) ||
!BN_mod_exp_mont(&tmp, a2, p2, m, ctx, mont) ||
!BN_to_montgomery(rr, rr, mont, ctx) ||
diff --git a/crypto/fipsmodule/bn/gcd.c b/crypto/fipsmodule/bn/gcd.c
index 7c20b8e..850d446 100644
--- a/crypto/fipsmodule/bn/gcd.c
+++ b/crypto/fipsmodule/bn/gcd.c
@@ -118,9 +118,9 @@
BIGNUM *t;
int shifts = 0;
- /* 0 <= b <= a */
+ // 0 <= b <= a
while (!BN_is_zero(b)) {
- /* 0 < b <= a */
+ // 0 < b <= a
if (BN_is_odd(a)) {
if (BN_is_odd(b)) {
@@ -136,7 +136,7 @@
b = t;
}
} else {
- /* a odd - b even */
+ // a odd - b even
if (!BN_rshift1(b, b)) {
goto err;
}
@@ -147,7 +147,7 @@
}
}
} else {
- /* a is even */
+ // a is even
if (BN_is_odd(b)) {
if (!BN_rshift1(a, a)) {
goto err;
@@ -158,7 +158,7 @@
b = t;
}
} else {
- /* a even - b even */
+ // a even - b even
if (!BN_rshift1(a, a)) {
goto err;
}
@@ -168,7 +168,7 @@
shifts++;
}
}
- /* 0 <= b <= a */
+ // 0 <= b <= a
}
if (shifts) {
@@ -224,7 +224,7 @@
return ret;
}
-/* solves ax == 1 (mod n) */
+// solves ax == 1 (mod n)
static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse,
const BIGNUM *a, const BIGNUM *n,
BN_CTX *ctx);
@@ -264,30 +264,29 @@
}
A->neg = 0;
sign = -1;
- /* From B = a mod |n|, A = |n| it follows that
- *
- * 0 <= B < A,
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- */
+ // From B = a mod |n|, A = |n| it follows that
+ //
+ // 0 <= B < A,
+ // -sign*X*a == B (mod |n|),
+ // sign*Y*a == A (mod |n|).
- /* Binary inversion algorithm; requires odd modulus. This is faster than the
- * general algorithm if the modulus is sufficiently small (about 400 .. 500
- * bits on 32-bit systems, but much more on 64-bit systems) */
+ // Binary inversion algorithm; requires odd modulus. This is faster than the
+ // general algorithm if the modulus is sufficiently small (about 400 .. 500
+ // bits on 32-bit systems, but much more on 64-bit systems)
int shift;
while (!BN_is_zero(B)) {
- /* 0 < B < |n|,
- * 0 < A <= |n|,
- * (1) -sign*X*a == B (mod |n|),
- * (2) sign*Y*a == A (mod |n|) */
+ // 0 < B < |n|,
+ // 0 < A <= |n|,
+ // (1) -sign*X*a == B (mod |n|),
+ // (2) sign*Y*a == A (mod |n|)
- /* Now divide B by the maximum possible power of two in the integers,
- * and divide X by the same value mod |n|.
- * When we're done, (1) still holds. */
+ // Now divide B by the maximum possible power of two in the integers,
+ // and divide X by the same value mod |n|.
+ // When we're done, (1) still holds.
shift = 0;
while (!BN_is_bit_set(B, shift)) {
- /* note that 0 < B */
+ // note that 0 < B
shift++;
if (BN_is_odd(X)) {
@@ -295,7 +294,7 @@
goto err;
}
}
- /* now X is even, so we can easily divide it by two */
+ // now X is even, so we can easily divide it by two
if (!BN_rshift1(X, X)) {
goto err;
}
@@ -306,10 +305,10 @@
}
}
- /* Same for A and Y. Afterwards, (2) still holds. */
+ // Same for A and Y. Afterwards, (2) still holds.
shift = 0;
while (!BN_is_bit_set(A, shift)) {
- /* note that 0 < A */
+ // note that 0 < A
shift++;
if (BN_is_odd(Y)) {
@@ -317,7 +316,7 @@
goto err;
}
}
- /* now Y is even */
+ // now Y is even
if (!BN_rshift1(Y, Y)) {
goto err;
}
@@ -328,32 +327,32 @@
}
}
- /* We still have (1) and (2).
- * Both A and B are odd.
- * The following computations ensure that
- *
- * 0 <= B < |n|,
- * 0 < A < |n|,
- * (1) -sign*X*a == B (mod |n|),
- * (2) sign*Y*a == A (mod |n|),
- *
- * and that either A or B is even in the next iteration. */
+ // We still have (1) and (2).
+ // Both A and B are odd.
+ // The following computations ensure that
+ //
+ // 0 <= B < |n|,
+ // 0 < A < |n|,
+ // (1) -sign*X*a == B (mod |n|),
+ // (2) sign*Y*a == A (mod |n|),
+ //
+ // and that either A or B is even in the next iteration.
if (BN_ucmp(B, A) >= 0) {
- /* -sign*(X + Y)*a == B - A (mod |n|) */
+ // -sign*(X + Y)*a == B - A (mod |n|)
if (!BN_uadd(X, X, Y)) {
goto err;
}
- /* NB: we could use BN_mod_add_quick(X, X, Y, n), but that
- * actually makes the algorithm slower */
+ // NB: we could use BN_mod_add_quick(X, X, Y, n), but that
+ // actually makes the algorithm slower
if (!BN_usub(B, B, A)) {
goto err;
}
} else {
- /* sign*(X + Y)*a == A - B (mod |n|) */
+ // sign*(X + Y)*a == A - B (mod |n|)
if (!BN_uadd(Y, Y, X)) {
goto err;
}
- /* as above, BN_mod_add_quick(Y, Y, X, n) would slow things down */
+ // as above, BN_mod_add_quick(Y, Y, X, n) would slow things down
if (!BN_usub(A, A, B)) {
goto err;
}
@@ -366,20 +365,20 @@
goto err;
}
- /* The while loop (Euclid's algorithm) ends when
- * A == gcd(a,n);
- * we have
- * sign*Y*a == A (mod |n|),
- * where Y is non-negative. */
+ // The while loop (Euclid's algorithm) ends when
+ // A == gcd(a,n);
+ // we have
+ // sign*Y*a == A (mod |n|),
+ // where Y is non-negative.
if (sign < 0) {
if (!BN_sub(Y, n, Y)) {
goto err;
}
}
- /* Now Y*a == A (mod |n|). */
+ // Now Y*a == A (mod |n|).
- /* Y*a == 1 (mod |n|) */
+ // Y*a == 1 (mod |n|)
if (!Y->neg && BN_ucmp(Y, n) < 0) {
if (!BN_copy(R, Y)) {
goto err;
@@ -470,11 +469,11 @@
return ret;
}
-/* bn_mod_inverse_general is the general inversion algorithm that works for
- * both even and odd |n|. It was specifically designed to contain fewer
- * branches that may leak sensitive information; see "New Branch Prediction
- * Vulnerabilities in OpenSSL and Necessary Software Countermeasures" by
- * Onur Acıçmez, Shay Gueron, and Jean-Pierre Seifert. */
+// bn_mod_inverse_general is the general inversion algorithm that works for
+// both even and odd |n|. It was specifically designed to contain fewer
+// branches that may leak sensitive information; see "New Branch Prediction
+// Vulnerabilities in OpenSSL and Necessary Software Countermeasures" by
+// Onur Acıçmez, Shay Gueron, and Jean-Pierre Seifert.
static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse,
const BIGNUM *a, const BIGNUM *n,
BN_CTX *ctx) {
@@ -505,58 +504,53 @@
A->neg = 0;
sign = -1;
- /* From B = a mod |n|, A = |n| it follows that
- *
- * 0 <= B < A,
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- */
+ // From B = a mod |n|, A = |n| it follows that
+ //
+ // 0 <= B < A,
+ // -sign*X*a == B (mod |n|),
+ // sign*Y*a == A (mod |n|).
while (!BN_is_zero(B)) {
BIGNUM *tmp;
- /*
- * 0 < B < A,
- * (*) -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|)
- */
+ // 0 < B < A,
+ // (*) -sign*X*a == B (mod |n|),
+ // sign*Y*a == A (mod |n|)
- /* (D, M) := (A/B, A%B) ... */
+ // (D, M) := (A/B, A%B) ...
if (!BN_div(D, M, A, B, ctx)) {
goto err;
}
- /* Now
- * A = D*B + M;
- * thus we have
- * (**) sign*Y*a == D*B + M (mod |n|).
- */
+ // Now
+ // A = D*B + M;
+ // thus we have
+ // (**) sign*Y*a == D*B + M (mod |n|).
- tmp = A; /* keep the BIGNUM object, the value does not matter */
+ tmp = A; // keep the BIGNUM object, the value does not matter
- /* (A, B) := (B, A mod B) ... */
+ // (A, B) := (B, A mod B) ...
A = B;
B = M;
- /* ... so we have 0 <= B < A again */
+ // ... so we have 0 <= B < A again
- /* Since the former M is now B and the former B is now A,
- * (**) translates into
- * sign*Y*a == D*A + B (mod |n|),
- * i.e.
- * sign*Y*a - D*A == B (mod |n|).
- * Similarly, (*) translates into
- * -sign*X*a == A (mod |n|).
- *
- * Thus,
- * sign*Y*a + D*sign*X*a == B (mod |n|),
- * i.e.
- * sign*(Y + D*X)*a == B (mod |n|).
- *
- * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- * Note that X and Y stay non-negative all the time.
- */
+ // Since the former M is now B and the former B is now A,
+ // (**) translates into
+ // sign*Y*a == D*A + B (mod |n|),
+ // i.e.
+ // sign*Y*a - D*A == B (mod |n|).
+ // Similarly, (*) translates into
+ // -sign*X*a == A (mod |n|).
+ //
+ // Thus,
+ // sign*Y*a + D*sign*X*a == B (mod |n|),
+ // i.e.
+ // sign*(Y + D*X)*a == B (mod |n|).
+ //
+ // So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
+ // -sign*X*a == B (mod |n|),
+ // sign*Y*a == A (mod |n|).
+ // Note that X and Y stay non-negative all the time.
if (!BN_mul(tmp, D, X, ctx)) {
goto err;
@@ -565,7 +559,7 @@
goto err;
}
- M = Y; /* keep the BIGNUM object, the value does not matter */
+ M = Y; // keep the BIGNUM object, the value does not matter
Y = X;
X = tmp;
sign = -sign;
@@ -577,22 +571,20 @@
goto err;
}
- /*
- * The while loop (Euclid's algorithm) ends when
- * A == gcd(a,n);
- * we have
- * sign*Y*a == A (mod |n|),
- * where Y is non-negative.
- */
+ // The while loop (Euclid's algorithm) ends when
+ // A == gcd(a,n);
+ // we have
+ // sign*Y*a == A (mod |n|),
+ // where Y is non-negative.
if (sign < 0) {
if (!BN_sub(Y, n, Y)) {
goto err;
}
}
- /* Now Y*a == A (mod |n|). */
+ // Now Y*a == A (mod |n|).
- /* Y*a == 1 (mod |n|) */
+ // Y*a == 1 (mod |n|)
if (!Y->neg && BN_ucmp(Y, n) < 0) {
if (!BN_copy(R, Y)) {
goto err;
diff --git a/crypto/fipsmodule/bn/generic.c b/crypto/fipsmodule/bn/generic.c
index 3d98689..b70080f 100644
--- a/crypto/fipsmodule/bn/generic.c
+++ b/crypto/fipsmodule/bn/generic.c
@@ -61,8 +61,8 @@
#include "internal.h"
-/* This file has two other implementations: x86 assembly language in
- * asm/bn-586.pl and x86_64 inline assembly in asm/x86_64-gcc.c. */
+// This file has two other implementations: x86 assembly language in
+// asm/bn-586.pl and x86_64 inline assembly in asm/x86_64-gcc.c.
#if defined(OPENSSL_NO_ASM) || \
!(defined(OPENSSL_X86) || (defined(OPENSSL_X86_64) && defined(__GNUC__)))
@@ -122,7 +122,7 @@
BN_UMULT_LOHI(r0, r1, tmp, tmp); \
} while (0)
-#endif /* !BN_ULLONG */
+#endif // !BN_ULLONG
BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
BN_ULONG w) {
@@ -242,7 +242,7 @@
return (BN_ULONG)ll;
}
-#else /* !BN_ULLONG */
+#else // !BN_ULLONG
BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
int n) {
@@ -299,7 +299,7 @@
return (BN_ULONG)c;
}
-#endif /* !BN_ULLONG */
+#endif // !BN_ULLONG
BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
int n) {
@@ -356,15 +356,15 @@
return c;
}
-/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
-/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
-/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
-/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
+// mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0)
+// mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0)
+// sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0)
+// sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0)
#ifdef BN_ULLONG
-/* Keep in mind that additions to multiplication result can not overflow,
- * because its high half cannot be all-ones. */
+// Keep in mind that additions to multiplication result can not overflow,
+// because its high half cannot be all-ones.
#define mul_add_c(a, b, c0, c1, c2) \
do { \
BN_ULONG hi; \
@@ -415,8 +415,8 @@
#else
-/* Keep in mind that additions to hi can not overflow, because the high word of
- * a multiplication result cannot be all-ones. */
+// Keep in mind that additions to hi can not overflow, because the high word of
+// a multiplication result cannot be all-ones.
#define mul_add_c(a, b, c0, c1, c2) \
do { \
BN_ULONG ta = (a), tb = (b); \
@@ -456,7 +456,7 @@
#define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2)
-#endif /* !BN_ULLONG */
+#endif // !BN_ULLONG
void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) {
BN_ULONG c1, c2, c3;
diff --git a/crypto/fipsmodule/bn/internal.h b/crypto/fipsmodule/bn/internal.h
index 092e759..ecd7d6c 100644
--- a/crypto/fipsmodule/bn/internal.h
+++ b/crypto/fipsmodule/bn/internal.h
@@ -141,7 +141,7 @@
#if defined(OPENSSL_64_BIT)
#if !defined(_MSC_VER)
-/* MSVC doesn't support two-word integers on 64-bit. */
+// MSVC doesn't support two-word integers on 64-bit.
#define BN_ULLONG uint128_t
#endif
@@ -168,11 +168,11 @@
#define BN_MASK2l (0xffffUL)
#define BN_MASK2h1 (0xffff8000UL)
#define BN_MASK2h (0xffff0000UL)
-/* On some 32-bit platforms, Montgomery multiplication is done using 64-bit
- * arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0|
- * needs to be two words long. Only certain 32-bit platforms actually make use
- * of n0[1] and shorter R value would suffice for the others. However,
- * currently only the assembly files know which is which. */
+// On some 32-bit platforms, Montgomery multiplication is done using 64-bit
+// arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0|
+// needs to be two words long. Only certain 32-bit platforms actually make use
+// of n0[1] and shorter R value would suffice for the others. However,
+// currently only the assembly files know which is which.
#define BN_MONT_CTX_N0_LIMBS 2
#define BN_TBIT (0x80000000UL)
#define BN_DEC_CONV (1000000000UL)
@@ -195,21 +195,21 @@
#define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2)
#endif
-/* bn_correct_top decrements |bn->top| until |bn->d[top-1]| is non-zero or
- * until |top| is zero. If |bn| is zero, |bn->neg| is set to zero. */
+// bn_correct_top decrements |bn->top| until |bn->d[top-1]| is non-zero or
+// until |top| is zero. If |bn| is zero, |bn->neg| is set to zero.
void bn_correct_top(BIGNUM *bn);
-/* bn_wexpand ensures that |bn| has at least |words| works of space without
- * altering its value. It returns one on success or zero on allocation
- * failure. */
+// bn_wexpand ensures that |bn| has at least |words| works of space without
+// altering its value. It returns one on success or zero on allocation
+// failure.
int bn_wexpand(BIGNUM *bn, size_t words);
-/* bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather
- * than a number of words. */
+// bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather
+// than a number of words.
int bn_expand(BIGNUM *bn, size_t bits);
-/* bn_set_words sets |bn| to the value encoded in the |num| words in |words|,
- * least significant word first. */
+// bn_set_words sets |bn| to the value encoded in the |num| words in |words|,
+// least significant word first.
int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num);
BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w);
@@ -223,14 +223,14 @@
void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a);
void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a);
-/* bn_cmp_words returns a value less than, equal to or greater than zero if
- * the, length |n|, array |a| is less than, equal to or greater than |b|. */
+// bn_cmp_words returns a value less than, equal to or greater than zero if
+// the, length |n|, array |a| is less than, equal to or greater than |b|.
int bn_cmp_words(const BN_ULONG *a, const BN_ULONG *b, int n);
-/* bn_cmp_words returns a value less than, equal to or greater than zero if the
- * array |a| is less than, equal to or greater than |b|. The arrays can be of
- * different lengths: |cl| gives the minimum of the two lengths and |dl| gives
- * the length of |a| minus the length of |b|. */
+// bn_cmp_words returns a value less than, equal to or greater than zero if the
+// array |a| is less than, equal to or greater than |b|. The arrays can be of
+// different lengths: |cl| gives the minimum of the two lengths and |dl| gives
+// the length of |a| minus the length of |b|.
int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl);
int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
@@ -247,25 +247,25 @@
#error "Either BN_ULLONG or BN_UMULT_LOHI must be defined on every platform."
#endif
-/* bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|,
- * computed with Fermat's Little Theorem. It returns one on success and zero on
- * error. If |mont_p| is NULL, one will be computed temporarily. */
+// bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|,
+// computed with Fermat's Little Theorem. It returns one on success and zero on
+// error. If |mont_p| is NULL, one will be computed temporarily.
int bn_mod_inverse_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p,
BN_CTX *ctx, const BN_MONT_CTX *mont_p);
-/* bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses
- * |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of
- * protecting the exponent. */
+// bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses
+// |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of
+// protecting the exponent.
int bn_mod_inverse_secret_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p,
BN_CTX *ctx, const BN_MONT_CTX *mont_p);
-/* bn_jacobi returns the Jacobi symbol of |a| and |b| (which is -1, 0 or 1), or
- * -2 on error. */
+// bn_jacobi returns the Jacobi symbol of |a| and |b| (which is -1, 0 or 1), or
+// -2 on error.
int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_BN_INTERNAL_H */
+#endif // OPENSSL_HEADER_BN_INTERNAL_H
diff --git a/crypto/fipsmodule/bn/jacobi.c b/crypto/fipsmodule/bn/jacobi.c
index 93e8fd9..9c909bb 100644
--- a/crypto/fipsmodule/bn/jacobi.c
+++ b/crypto/fipsmodule/bn/jacobi.c
@@ -57,24 +57,24 @@
#include "internal.h"
-/* least significant word */
+// least significant word
#define BN_lsw(n) (((n)->top == 0) ? (BN_ULONG) 0 : (n)->d[0])
int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) {
- /* In 'tab', only odd-indexed entries are relevant:
- * For any odd BIGNUM n,
- * tab[BN_lsw(n) & 7]
- * is $(-1)^{(n^2-1)/8}$ (using TeX notation).
- * Note that the sign of n does not matter. */
+ // In 'tab', only odd-indexed entries are relevant:
+ // For any odd BIGNUM n,
+ // tab[BN_lsw(n) & 7]
+ // is $(-1)^{(n^2-1)/8}$ (using TeX notation).
+ // Note that the sign of n does not matter.
static const int tab[8] = {0, 1, 0, -1, 0, -1, 0, 1};
- /* The Jacobi symbol is only defined for odd modulus. */
+ // The Jacobi symbol is only defined for odd modulus.
if (!BN_is_odd(b)) {
OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS);
return -2;
}
- /* Require b be positive. */
+ // Require b be positive.
if (BN_is_negative(b)) {
OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER);
return -2;
@@ -93,22 +93,22 @@
goto end;
}
- /* Adapted from logic to compute the Kronecker symbol, originally implemented
- * according to Henri Cohen, "A Course in Computational Algebraic Number
- * Theory" (algorithm 1.4.10). */
+ // Adapted from logic to compute the Kronecker symbol, originally implemented
+ // according to Henri Cohen, "A Course in Computational Algebraic Number
+ // Theory" (algorithm 1.4.10).
ret = 1;
while (1) {
- /* Cohen's step 3: */
+ // Cohen's step 3:
- /* B is positive and odd */
+ // B is positive and odd
if (BN_is_zero(A)) {
ret = BN_is_one(B) ? ret : 0;
goto end;
}
- /* now A is non-zero */
+ // now A is non-zero
int i = 0;
while (!BN_is_bit_set(A, i)) {
i++;
@@ -118,18 +118,18 @@
goto end;
}
if (i & 1) {
- /* i is odd */
- /* multiply 'ret' by $(-1)^{(B^2-1)/8}$ */
+ // i is odd
+ // multiply 'ret' by $(-1)^{(B^2-1)/8}$
ret = ret * tab[BN_lsw(B) & 7];
}
- /* Cohen's step 4: */
- /* multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$ */
+ // Cohen's step 4:
+ // multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$
if ((A->neg ? ~BN_lsw(A) : BN_lsw(A)) & BN_lsw(B) & 2) {
ret = -ret;
}
- /* (A, B) := (B mod |A|, |A|) */
+ // (A, B) := (B mod |A|, |A|)
if (!BN_nnmod(B, B, A, ctx)) {
ret = -2;
goto end;
diff --git a/crypto/fipsmodule/bn/montgomery.c b/crypto/fipsmodule/bn/montgomery.c
index d70509f..8024e27 100644
--- a/crypto/fipsmodule/bn/montgomery.c
+++ b/crypto/fipsmodule/bn/montgomery.c
@@ -187,18 +187,18 @@
return 0;
}
- /* Save the modulus. */
+ // Save the modulus.
if (!BN_copy(&mont->N, mod)) {
OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR);
return 0;
}
- /* Find n0 such that n0 * N == -1 (mod r).
- *
- * Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the
- * others, we could use a shorter R value and use faster |BN_ULONG|-based
- * math instead of |uint64_t|-based math, which would be double-precision.
- * However, currently only the assembler files know which is which. */
+ // Find n0 such that n0 * N == -1 (mod r).
+ //
+ // Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the
+ // others, we could use a shorter R value and use faster |BN_ULONG|-based
+ // math instead of |uint64_t|-based math, which would be double-precision.
+ // However, currently only the assembler files know which is which.
uint64_t n0 = bn_mont_n0(mod);
mont->n0[0] = (BN_ULONG)n0;
#if BN_MONT_CTX_N0_LIMBS == 2
@@ -207,14 +207,14 @@
mont->n0[1] = 0;
#endif
- /* Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS such that R
- * > mod. Even though the assembly on some 32-bit platforms works with 64-bit
- * values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS *
- * BN_BITS2|, is correct because R**2 will still be a multiple of the latter
- * as |BN_MONT_CTX_N0_LIMBS| is either one or two.
- *
- * XXX: This is not constant time with respect to |mont->N|, but it should
- * be. */
+ // Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS such that R
+ // > mod. Even though the assembly on some 32-bit platforms works with 64-bit
+ // values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS *
+ // BN_BITS2|, is correct because R**2 will still be a multiple of the latter
+ // as |BN_MONT_CTX_N0_LIMBS| is either one or two.
+ //
+ // XXX: This is not constant time with respect to |mont->N|, but it should
+ // be.
unsigned lgBigR = (BN_num_bits(mod) + (BN_BITS2 - 1)) / BN_BITS2 * BN_BITS2;
if (!bn_mod_exp_base_2_vartime(&mont->RR, lgBigR * 2, &mont->N)) {
return 0;
@@ -272,7 +272,7 @@
return 1;
}
- max = (2 * nl); /* carry is stored separately */
+ max = (2 * nl); // carry is stored separately
if (!bn_wexpand(r, max)) {
return 0;
}
@@ -281,7 +281,7 @@
np = n->d;
rp = r->d;
- /* clear the top words of T */
+ // clear the top words of T
if (max > r->top) {
OPENSSL_memset(&rp[r->top], 0, (max - r->top) * sizeof(BN_ULONG));
}
@@ -311,8 +311,8 @@
uintptr_t m;
v = bn_sub_words(rp, ap, np, nl) - carry;
- /* if subtraction result is real, then trick unconditional memcpy below to
- * perform in-place "refresh" instead of actual copy. */
+ // if subtraction result is real, then trick unconditional memcpy below to
+ // perform in-place "refresh" instead of actual copy.
m = (0u - (uintptr_t)v);
nrp = (BN_ULONG *)(((uintptr_t)rp & ~m) | ((uintptr_t)ap & m));
@@ -371,7 +371,7 @@
#else
int num = mont->N.top;
- /* |bn_mul_mont| requires at least 128 bits of limbs, at least for x86. */
+ // |bn_mul_mont| requires at least 128 bits of limbs, at least for x86.
if (num < (128 / BN_BITS2) ||
a->top != num ||
b->top != num) {
@@ -382,7 +382,7 @@
return 0;
}
if (!bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) {
- /* The check above ensures this won't happen. */
+ // The check above ensures this won't happen.
assert(0);
OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR);
return 0;
@@ -417,7 +417,7 @@
}
}
- /* reduce from aRR to aR */
+ // reduce from aRR to aR
if (!BN_from_montgomery_word(r, tmp, mont)) {
goto err;
}
diff --git a/crypto/fipsmodule/bn/montgomery_inv.c b/crypto/fipsmodule/bn/montgomery_inv.c
index aa2574b..c3c788a 100644
--- a/crypto/fipsmodule/bn/montgomery_inv.c
+++ b/crypto/fipsmodule/bn/montgomery_inv.c
@@ -28,47 +28,47 @@
BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG),
BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T);
-/* LG_LITTLE_R is log_2(r). */
+// LG_LITTLE_R is log_2(r).
#define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2)
uint64_t bn_mont_n0(const BIGNUM *n) {
- /* These conditions are checked by the caller, |BN_MONT_CTX_set|. */
+ // These conditions are checked by the caller, |BN_MONT_CTX_set|.
assert(!BN_is_zero(n));
assert(!BN_is_negative(n));
assert(BN_is_odd(n));
- /* r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This
- * ensures that we can do integer division by |r| by simply ignoring
- * |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo
- * |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is
- * what makes Montgomery multiplication efficient.
- *
- * As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography
- * with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a
- * multi-limb Montgomery multiplication of |a * b (mod n)|, given the
- * unreduced product |t == a * b|, we repeatedly calculate:
- *
- * t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph).
- * t2 := t1*n0*n
- * t3 := t + t2
- * t := t3 / r copy all limbs of |t3| except the lowest to |t|.
- *
- * In the last step, it would only make sense to ignore the lowest limb of
- * |t3| if it were zero. The middle steps ensure that this is the case:
- *
- * t3 == 0 (mod r)
- * t + t2 == 0 (mod r)
- * t + t1*n0*n == 0 (mod r)
- * t1*n0*n == -t (mod r)
- * t*n0*n == -t (mod r)
- * n0*n == -1 (mod r)
- * n0 == -1/n (mod r)
- *
- * Thus, in each iteration of the loop, we multiply by the constant factor
- * |n0|, the negative inverse of n (mod r). */
+ // r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This
+ // ensures that we can do integer division by |r| by simply ignoring
+ // |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo
+ // |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is
+ // what makes Montgomery multiplication efficient.
+ //
+ // As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography
+ // with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a
+ // multi-limb Montgomery multiplication of |a * b (mod n)|, given the
+ // unreduced product |t == a * b|, we repeatedly calculate:
+ //
+ // t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph).
+ // t2 := t1*n0*n
+ // t3 := t + t2
+ // t := t3 / r copy all limbs of |t3| except the lowest to |t|.
+ //
+ // In the last step, it would only make sense to ignore the lowest limb of
+ // |t3| if it were zero. The middle steps ensure that this is the case:
+ //
+ // t3 == 0 (mod r)
+ // t + t2 == 0 (mod r)
+ // t + t1*n0*n == 0 (mod r)
+ // t1*n0*n == -t (mod r)
+ // t*n0*n == -t (mod r)
+ // n0*n == -1 (mod r)
+ // n0 == -1/n (mod r)
+ //
+ // Thus, in each iteration of the loop, we multiply by the constant factor
+ // |n0|, the negative inverse of n (mod r).
- /* n_mod_r = n % r. As explained above, this is done by taking the lowest
- * |BN_MONT_CTX_N0_LIMBS| limbs of |n|. */
+ // n_mod_r = n % r. As explained above, this is done by taking the lowest
+ // |BN_MONT_CTX_N0_LIMBS| limbs of |n|.
uint64_t n_mod_r = n->d[0];
#if BN_MONT_CTX_N0_LIMBS == 2
if (n->top > 1) {
@@ -79,32 +79,32 @@
return bn_neg_inv_mod_r_u64(n_mod_r);
}
-/* bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v|
- * such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n|
- * must be odd.
- *
- * This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery
- * Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf).
- * It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and
- * Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000"
- * (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21).
- *
- * This is inspired by Joppe W. Bos's "Constant Time Modular Inversion"
- * (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is
- * constant-time with respect to |n|. We assume uint64_t additions,
- * subtractions, shifts, and bitwise operations are all constant time, which
- * may be a large leap of faith on 32-bit targets. We avoid division and
- * multiplication, which tend to be the most problematic in terms of timing
- * leaks.
- *
- * Most GCD implementations return values such that |u*r + v*n == 1|, so the
- * caller would have to negate the resultant |v| for the purpose of Montgomery
- * multiplication. This implementation does the negation implicitly by doing
- * the computations as a difference instead of a sum. */
+// bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v|
+// such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n|
+// must be odd.
+//
+// This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery
+// Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf).
+// It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and
+// Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000"
+// (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21).
+//
+// This is inspired by Joppe W. Bos's "Constant Time Modular Inversion"
+// (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is
+// constant-time with respect to |n|. We assume uint64_t additions,
+// subtractions, shifts, and bitwise operations are all constant time, which
+// may be a large leap of faith on 32-bit targets. We avoid division and
+// multiplication, which tend to be the most problematic in terms of timing
+// leaks.
+//
+// Most GCD implementations return values such that |u*r + v*n == 1|, so the
+// caller would have to negate the resultant |v| for the purpose of Montgomery
+// multiplication. This implementation does the negation implicitly by doing
+// the computations as a difference instead of a sum.
static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) {
assert(n % 2 == 1);
- /* alpha == 2**(lg r - 1) == r / 2. */
+ // alpha == 2**(lg r - 1) == r / 2.
static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1);
const uint64_t beta = n;
@@ -112,46 +112,46 @@
uint64_t u = 1;
uint64_t v = 0;
- /* The invariant maintained from here on is:
- * 2**(lg r - i) == u*2*alpha - v*beta. */
+ // The invariant maintained from here on is:
+ // 2**(lg r - i) == u*2*alpha - v*beta.
for (size_t i = 0; i < LG_LITTLE_R; ++i) {
#if BN_BITS2 == 64 && defined(BN_ULLONG)
assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) ==
((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta));
#endif
- /* Delete a common factor of 2 in u and v if |u| is even. Otherwise, set
- * |u = (u + beta) / 2| and |v = (v / 2) + alpha|. */
+ // Delete a common factor of 2 in u and v if |u| is even. Otherwise, set
+ // |u = (u + beta) / 2| and |v = (v / 2) + alpha|.
- uint64_t u_is_odd = UINT64_C(0) - (u & 1); /* Either 0xff..ff or 0. */
+ uint64_t u_is_odd = UINT64_C(0) - (u & 1); // Either 0xff..ff or 0.
- /* The addition can overflow, so use Dietz's method for it.
- *
- * Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all
- * (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values
- * (embedded in 64 bits to so that overflow can be ignored):
- *
- * (declare-fun x () (_ BitVec 64))
- * (declare-fun y () (_ BitVec 64))
- * (assert (let (
- * (one (_ bv1 64))
- * (thirtyTwo (_ bv32 64)))
- * (and
- * (bvult x (bvshl one thirtyTwo))
- * (bvult y (bvshl one thirtyTwo))
- * (not (=
- * (bvadd (bvlshr (bvxor x y) one) (bvand x y))
- * (bvlshr (bvadd x y) one)))
- * )))
- * (check-sat) */
- uint64_t beta_if_u_is_odd = beta & u_is_odd; /* Either |beta| or 0. */
+ // The addition can overflow, so use Dietz's method for it.
+ //
+ // Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all
+ // (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values
+ // (embedded in 64 bits to so that overflow can be ignored):
+ //
+ // (declare-fun x () (_ BitVec 64))
+ // (declare-fun y () (_ BitVec 64))
+ // (assert (let (
+ // (one (_ bv1 64))
+ // (thirtyTwo (_ bv32 64)))
+ // (and
+ // (bvult x (bvshl one thirtyTwo))
+ // (bvult y (bvshl one thirtyTwo))
+ // (not (=
+ // (bvadd (bvlshr (bvxor x y) one) (bvand x y))
+ // (bvlshr (bvadd x y) one)))
+ // )))
+ // (check-sat)
+ uint64_t beta_if_u_is_odd = beta & u_is_odd; // Either |beta| or 0.
u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd);
- uint64_t alpha_if_u_is_odd = alpha & u_is_odd; /* Either |alpha| or 0. */
+ uint64_t alpha_if_u_is_odd = alpha & u_is_odd; // Either |alpha| or 0.
v = (v >> 1) + alpha_if_u_is_odd;
}
- /* The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. */
+ // The invariant now shows that u*r - v*n == 1 since r == 2 * alpha.
#if BN_BITS2 == 64 && defined(BN_ULLONG)
assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta));
#endif
@@ -159,9 +159,9 @@
return v;
}
-/* bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger
- * than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and
- * odd. */
+// bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger
+// than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and
+// odd.
int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) {
assert(!BN_is_zero(n));
assert(!BN_is_negative(n));
@@ -175,13 +175,13 @@
return 1;
}
- /* Set |r| to the smallest power of two larger than |n|. */
+ // Set |r| to the smallest power of two larger than |n|.
assert(p > n_bits);
if (!BN_set_bit(r, n_bits)) {
return 0;
}
- /* Unconditionally reduce |r|. */
+ // Unconditionally reduce |r|.
assert(BN_cmp(r, n) > 0);
if (!BN_usub(r, r, n)) {
return 0;
@@ -189,10 +189,10 @@
assert(BN_cmp(r, n) < 0);
for (unsigned i = n_bits; i < p; ++i) {
- /* This is like |BN_mod_lshift1_quick| except using |BN_usub|.
- *
- * TODO: Replace this with the use of a constant-time variant of
- * |BN_mod_lshift1_quick|. */
+ // This is like |BN_mod_lshift1_quick| except using |BN_usub|.
+ //
+ // TODO: Replace this with the use of a constant-time variant of
+ // |BN_mod_lshift1_quick|.
if (!BN_lshift1(r, r)) {
return 0;
}
diff --git a/crypto/fipsmodule/bn/mul.c b/crypto/fipsmodule/bn/mul.c
index 36a4060..7cc0e3c 100644
--- a/crypto/fipsmodule/bn/mul.c
+++ b/crypto/fipsmodule/bn/mul.c
@@ -113,15 +113,15 @@
}
#if !defined(OPENSSL_X86) || defined(OPENSSL_NO_ASM)
-/* Here follows specialised variants of bn_add_words() and bn_sub_words(). They
- * have the property performing operations on arrays of different sizes. The
- * sizes of those arrays is expressed through cl, which is the common length (
- * basicall, min(len(a),len(b)) ), and dl, which is the delta between the two
- * lengths, calculated as len(a)-len(b). All lengths are the number of
- * BN_ULONGs... For the operations that require a result array as parameter,
- * it must have the length cl+abs(dl). These functions should probably end up
- * in bn_asm.c as soon as there are assembler counterparts for the systems that
- * use assembler files. */
+// Here follows specialised variants of bn_add_words() and bn_sub_words(). They
+// have the property performing operations on arrays of different sizes. The
+// sizes of those arrays is expressed through cl, which is the common length (
+// basicall, min(len(a),len(b)) ), and dl, which is the delta between the two
+// lengths, calculated as len(a)-len(b). All lengths are the number of
+// BN_ULONGs... For the operations that require a result array as parameter,
+// it must have the length cl+abs(dl). These functions should probably end up
+// in bn_asm.c as soon as there are assembler counterparts for the systems that
+// use assembler files.
static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a,
const BN_ULONG *b, int cl, int dl) {
@@ -274,25 +274,24 @@
return c;
}
#else
-/* On other platforms the function is defined in asm. */
+// On other platforms the function is defined in asm.
BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
int cl, int dl);
#endif
-/* Karatsuba recursive multiplication algorithm
- * (cf. Knuth, The Art of Computer Programming, Vol. 2) */
+// Karatsuba recursive multiplication algorithm
+// (cf. Knuth, The Art of Computer Programming, Vol. 2)
-/* r is 2*n2 words in size,
- * a and b are both n2 words in size.
- * n2 must be a power of 2.
- * We multiply and return the result.
- * t must be 2*n2 words in size
- * We calculate
- * a[0]*b[0]
- * a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
- * a[1]*b[1]
- */
-/* dnX may not be positive, but n2/2+dnX has to be */
+// r is 2*n2 words in size,
+// a and b are both n2 words in size.
+// n2 must be a power of 2.
+// We multiply and return the result.
+// t must be 2*n2 words in size
+// We calculate
+// a[0]*b[0]
+// a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
+// a[1]*b[1]
+// dnX may not be positive, but n2/2+dnX has to be
static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
int dna, int dnb, BN_ULONG *t) {
int n = n2 / 2, c1, c2;
@@ -300,15 +299,14 @@
unsigned int neg, zero;
BN_ULONG ln, lo, *p;
- /* Only call bn_mul_comba 8 if n2 == 8 and the
- * two arrays are complete [steve]
- */
+ // Only call bn_mul_comba 8 if n2 == 8 and the
+ // two arrays are complete [steve]
if (n2 == 8 && dna == 0 && dnb == 0) {
bn_mul_comba8(r, a, b);
return;
}
- /* Else do normal multiply */
+ // Else do normal multiply
if (n2 < BN_MUL_RECURSIVE_SIZE_NORMAL) {
bn_mul_normal(r, a, n2 + dna, b, n2 + dnb);
if ((dna + dnb) < 0) {
@@ -318,21 +316,21 @@
return;
}
- /* r=(a[0]-a[1])*(b[1]-b[0]) */
+ // r=(a[0]-a[1])*(b[1]-b[0])
c1 = bn_cmp_part_words(a, &(a[n]), tna, n - tna);
c2 = bn_cmp_part_words(&(b[n]), b, tnb, tnb - n);
zero = neg = 0;
switch (c1 * 3 + c2) {
case -4:
- bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */
- bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */
+ bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // -
+ bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // -
break;
case -3:
zero = 1;
break;
case -2:
- bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */
- bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); /* + */
+ bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // -
+ bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); // +
neg = 1;
break;
case -1:
@@ -341,8 +339,8 @@
zero = 1;
break;
case 2:
- bn_sub_part_words(t, a, &(a[n]), tna, n - tna); /* + */
- bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */
+ bn_sub_part_words(t, a, &(a[n]), tna, n - tna); // +
+ bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // -
neg = 1;
break;
case 3:
@@ -355,7 +353,7 @@
}
if (n == 4 && dna == 0 && dnb == 0) {
- /* XXX: bn_mul_comba4 could take extra args to do this well */
+ // XXX: bn_mul_comba4 could take extra args to do this well
if (!zero) {
bn_mul_comba4(&(t[n2]), t, &(t[n]));
} else {
@@ -365,7 +363,7 @@
bn_mul_comba4(r, a, b);
bn_mul_comba4(&(r[n2]), &(a[n]), &(b[n]));
} else if (n == 8 && dna == 0 && dnb == 0) {
- /* XXX: bn_mul_comba8 could take extra args to do this well */
+ // XXX: bn_mul_comba8 could take extra args to do this well
if (!zero) {
bn_mul_comba8(&(t[n2]), t, &(t[n]));
} else {
@@ -385,24 +383,24 @@
bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), n, dna, dnb, p);
}
- /* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1]) */
+ // t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
+ // r[10] holds (a[0]*b[0])
+ // r[32] holds (b[1]*b[1])
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
if (neg) {
- /* if t[32] is negative */
+ // if t[32] is negative
c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2));
} else {
- /* Might have a carry */
+ // Might have a carry
c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2));
}
- /* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- * c1 holds the carry bits */
+ // t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
+ // r[10] holds (a[0]*b[0])
+ // r[32] holds (b[1]*b[1])
+ // c1 holds the carry bits
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -410,8 +408,8 @@
ln = (lo + c1) & BN_MASK2;
*p = ln;
- /* The overflow will stop before we over write
- * words we should not overwrite */
+ // The overflow will stop before we over write
+ // words we should not overwrite
if (ln < (BN_ULONG)c1) {
do {
p++;
@@ -423,9 +421,9 @@
}
}
-/* n+tn is the word length
- * t needs to be n*4 is size, as does r */
-/* tnX may not be negative but less than n */
+// n+tn is the word length
+// t needs to be n*4 is size, as does r
+// tnX may not be negative but less than n
static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
int tna, int tnb, BN_ULONG *t) {
int i, j, n2 = n * 2;
@@ -437,33 +435,33 @@
return;
}
- /* r=(a[0]-a[1])*(b[1]-b[0]) */
+ // r=(a[0]-a[1])*(b[1]-b[0])
c1 = bn_cmp_part_words(a, &(a[n]), tna, n - tna);
c2 = bn_cmp_part_words(&(b[n]), b, tnb, tnb - n);
neg = 0;
switch (c1 * 3 + c2) {
case -4:
- bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */
- bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */
+ bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // -
+ bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // -
break;
case -3:
- /* break; */
+ // break;
case -2:
- bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */
- bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); /* + */
+ bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // -
+ bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); // +
neg = 1;
break;
case -1:
case 0:
case 1:
- /* break; */
+ // break;
case 2:
- bn_sub_part_words(t, a, &(a[n]), tna, n - tna); /* + */
- bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */
+ bn_sub_part_words(t, a, &(a[n]), tna, n - tna); // +
+ bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // -
neg = 1;
break;
case 3:
- /* break; */
+ // break;
case 4:
bn_sub_part_words(t, a, &(a[n]), tna, n - tna);
bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n);
@@ -480,8 +478,8 @@
bn_mul_recursive(&(t[n2]), t, &(t[n]), n, 0, 0, p);
bn_mul_recursive(r, a, b, n, 0, 0, p);
i = n / 2;
- /* If there is only a bottom half to the number,
- * just do it */
+ // If there is only a bottom half to the number,
+ // just do it
if (tna > tnb) {
j = tna - i;
} else {
@@ -492,12 +490,12 @@
bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p);
OPENSSL_memset(&(r[n2 + i * 2]), 0, sizeof(BN_ULONG) * (n2 - i * 2));
} else if (j > 0) {
- /* eg, n == 16, i == 8 and tn == 11 */
+ // eg, n == 16, i == 8 and tn == 11
bn_mul_part_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p);
OPENSSL_memset(&(r[n2 + tna + tnb]), 0,
sizeof(BN_ULONG) * (n2 - tna - tnb));
} else {
- /* (j < 0) eg, n == 16, i == 8 and tn == 5 */
+ // (j < 0) eg, n == 16, i == 8 and tn == 5
OPENSSL_memset(&(r[n2]), 0, sizeof(BN_ULONG) * n2);
if (tna < BN_MUL_RECURSIVE_SIZE_NORMAL &&
tnb < BN_MUL_RECURSIVE_SIZE_NORMAL) {
@@ -505,9 +503,9 @@
} else {
for (;;) {
i /= 2;
- /* these simplified conditions work
- * exclusively because difference
- * between tna and tnb is 1 or 0 */
+ // these simplified conditions work
+ // exclusively because difference
+ // between tna and tnb is 1 or 0
if (i < tna || i < tnb) {
bn_mul_part_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i,
tnb - i, p);
@@ -522,25 +520,24 @@
}
}
- /* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- */
+ // t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
+ // r[10] holds (a[0]*b[0])
+ // r[32] holds (b[1]*b[1])
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
if (neg) {
- /* if t[32] is negative */
+ // if t[32] is negative
c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2));
} else {
- /* Might have a carry */
+ // Might have a carry
c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2));
}
- /* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- * c1 holds the carry bits */
+ // t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
+ // r[10] holds (a[0]*b[0])
+ // r[32] holds (b[1]*b[1])
+ // c1 holds the carry bits
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -548,8 +545,8 @@
ln = (lo + c1) & BN_MASK2;
*p = ln;
- /* The overflow will stop before we over write
- * words we should not overwrite */
+ // The overflow will stop before we over write
+ // words we should not overwrite
if (ln < (BN_ULONG)c1) {
do {
p++;
@@ -627,7 +624,7 @@
}
bn_mul_part_recursive(rr->d, a->d, b->d, j, al - j, bl - j, t->d);
} else {
- /* al <= j || bl <= j */
+ // al <= j || bl <= j
if (!bn_wexpand(t, k * 2)) {
goto err;
}
@@ -659,7 +656,7 @@
return ret;
}
-/* tmp must have 2*n words */
+// tmp must have 2*n words
static void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp) {
int i, j, max;
const BN_ULONG *ap;
@@ -687,23 +684,22 @@
bn_add_words(r, r, r, max);
- /* There will not be a carry */
+ // There will not be a carry
bn_sqr_words(tmp, a, n);
bn_add_words(r, r, tmp, max);
}
-/* r is 2*n words in size,
- * a and b are both n words in size. (There's not actually a 'b' here ...)
- * n must be a power of 2.
- * We multiply and return the result.
- * t must be 2*n words in size
- * We calculate
- * a[0]*b[0]
- * a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
- * a[1]*b[1]
- */
+// r is 2*n words in size,
+// a and b are both n words in size. (There's not actually a 'b' here ...)
+// n must be a power of 2.
+// We multiply and return the result.
+// t must be 2*n words in size
+// We calculate
+// a[0]*b[0]
+// a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
+// a[1]*b[1]
static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t) {
int n = n2 / 2;
int zero, c1;
@@ -720,7 +716,7 @@
bn_sqr_normal(r, a, n2, t);
return;
}
- /* r=(a[0]-a[1])*(a[1]-a[0]) */
+ // r=(a[0]-a[1])*(a[1]-a[0])
c1 = bn_cmp_words(a, &(a[n]), n);
zero = 0;
if (c1 > 0) {
@@ -731,7 +727,7 @@
zero = 1;
}
- /* The result will always be negative unless it is zero */
+ // The result will always be negative unless it is zero
p = &(t[n2 * 2]);
if (!zero) {
@@ -742,19 +738,19 @@
bn_sqr_recursive(r, a, n, p);
bn_sqr_recursive(&(r[n2]), &(a[n]), n, p);
- /* t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1]) */
+ // t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
+ // r[10] holds (a[0]*b[0])
+ // r[32] holds (b[1]*b[1])
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
- /* t[32] is negative */
+ // t[32] is negative
c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2));
- /* t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
- * r[10] holds (a[0]*a[0])
- * r[32] holds (a[1]*a[1])
- * c1 holds the carry bits */
+ // t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
+ // r[10] holds (a[0]*a[0])
+ // r[32] holds (a[1]*a[1])
+ // c1 holds the carry bits
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -762,8 +758,8 @@
ln = (lo + c1) & BN_MASK2;
*p = ln;
- /* The overflow will stop before we over write
- * words we should not overwrite */
+ // The overflow will stop before we over write
+ // words we should not overwrite
if (ln < (BN_ULONG)c1) {
do {
p++;
@@ -818,7 +814,7 @@
goto err;
}
- max = 2 * al; /* Non-zero (from above) */
+ max = 2 * al; // Non-zero (from above)
if (!bn_wexpand(rr, max)) {
goto err;
}
@@ -852,8 +848,8 @@
}
rr->neg = 0;
- /* If the most-significant half of the top word of 'a' is zero, then
- * the square of 'a' will max-1 words. */
+ // If the most-significant half of the top word of 'a' is zero, then
+ // the square of 'a' will max-1 words.
if (a->d[al - 1] == (a->d[al - 1] & BN_MASK2l)) {
rr->top = max - 1;
} else {
diff --git a/crypto/fipsmodule/bn/prime.c b/crypto/fipsmodule/bn/prime.c
index 3e2e6f5..691d0cb 100644
--- a/crypto/fipsmodule/bn/prime.c
+++ b/crypto/fipsmodule/bn/prime.c
@@ -113,13 +113,13 @@
#include "internal.h"
-/* The quick sieve algorithm approach to weeding out primes is Philip
- * Zimmermann's, as implemented in PGP. I have had a read of his comments and
- * implemented my own version. */
+// The quick sieve algorithm approach to weeding out primes is Philip
+// Zimmermann's, as implemented in PGP. I have had a read of his comments and
+// implemented my own version.
#define NUMPRIMES 2048
-/* primes contains all the primes that fit into a uint16_t. */
+// primes contains all the primes that fit into a uint16_t.
static const uint16_t primes[NUMPRIMES] = {
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,
37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79,
@@ -310,12 +310,12 @@
17851, 17863,
};
-/* BN_prime_checks_for_size returns the number of Miller-Rabin iterations
- * necessary for a 'bits'-bit prime, in order to maintain an error rate greater
- * than the security level for an RSA prime of that many bits (calculated using
- * the FIPS SP 800-57 security level and 186-4 Section F.1; original paper:
- * Damgaard, Landrock, Pomerance: Average case error estimates for the strong
- * probable prime test. -- Math. Comp. 61 (1993) 177-194) */
+// BN_prime_checks_for_size returns the number of Miller-Rabin iterations
+// necessary for a 'bits'-bit prime, in order to maintain an error rate greater
+// than the security level for an RSA prime of that many bits (calculated using
+// the FIPS SP 800-57 security level and 186-4 Section F.1; original paper:
+// Damgaard, Landrock, Pomerance: Average case error estimates for the strong
+// probable prime test. -- Math. Comp. 61 (1993) 177-194)
static int BN_prime_checks_for_size(int bits) {
if (bits >= 3747) {
return 3;
@@ -371,11 +371,11 @@
int checks = BN_prime_checks_for_size(bits);
if (bits < 2) {
- /* There are no prime numbers this small. */
+ // There are no prime numbers this small.
OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL);
return 0;
} else if (bits == 2 && safe) {
- /* The smallest safe prime (7) is three bits. */
+ // The smallest safe prime (7) is three bits.
OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL);
return 0;
}
@@ -391,7 +391,7 @@
}
loop:
- /* make a random number and set the top and bottom bits */
+ // make a random number and set the top and bottom bits
if (add == NULL) {
if (!probable_prime(ret, bits)) {
goto err;
@@ -409,7 +409,7 @@
}
if (!BN_GENCB_call(cb, BN_GENCB_GENERATED, c1++)) {
- /* aborted */
+ // aborted
goto err;
}
@@ -421,8 +421,8 @@
goto loop;
}
} else {
- /* for "safe prime" generation, check that (p-1)/2 is prime. Since a prime
- * is odd, We just need to divide by 2 */
+ // for "safe prime" generation, check that (p-1)/2 is prime. Since a prime
+ // is odd, We just need to divide by 2
if (!BN_rshift1(t, ret)) {
goto err;
}
@@ -445,11 +445,11 @@
if (!BN_GENCB_call(cb, i, c1 - 1)) {
goto err;
}
- /* We have a safe prime test pass */
+ // We have a safe prime test pass
}
}
- /* we have a prime :-) */
+ // we have a prime :-)
found = 1;
err:
@@ -487,13 +487,13 @@
return 0;
}
- /* first look for small factors */
+ // first look for small factors
if (!BN_is_odd(a)) {
- /* a is even => a is prime if and only if a == 2 */
+ // a is even => a is prime if and only if a == 2
return BN_is_word(a, 2);
}
- /* Enhanced Miller-Rabin does not work for three. */
+ // Enhanced Miller-Rabin does not work for three.
if (BN_is_word(a, 3)) {
return 1;
}
@@ -539,7 +539,7 @@
int BN_enhanced_miller_rabin_primality_test(
enum bn_primality_result_t *out_result, const BIGNUM *w, int iterations,
BN_CTX *ctx, BN_GENCB *cb) {
- /* Enhanced Miller-Rabin is only valid on odd integers greater than 3. */
+ // Enhanced Miller-Rabin is only valid on odd integers greater than 3.
if (!BN_is_odd(w) || BN_cmp_word(w, 3) <= 0) {
OPENSSL_PUT_ERROR(BN, BN_R_INVALID_INPUT);
return 0;
@@ -561,7 +561,7 @@
goto err;
}
- /* Write w1 as m*2^a (Steps 1 and 2). */
+ // Write w1 as m*2^a (Steps 1 and 2).
int a = 0;
while (!BN_is_bit_set(w1, a)) {
a++;
@@ -585,22 +585,22 @@
goto err;
}
- /* Montgomery setup for computations mod A */
+ // Montgomery setup for computations mod A
mont = BN_MONT_CTX_new();
if (mont == NULL ||
!BN_MONT_CTX_set(mont, w, ctx)) {
goto err;
}
- /* The following loop performs in inner iteration of the Enhanced Miller-Rabin
- * Primality test (Step 4). */
+ // The following loop performs in inner iteration of the Enhanced Miller-Rabin
+ // Primality test (Step 4).
for (int i = 1; i <= iterations; i++) {
- /* Step 4.1-4.2 */
+ // Step 4.1-4.2
if (!BN_rand_range_ex(b, 2, w1)) {
goto err;
}
- /* Step 4.3-4.4 */
+ // Step 4.3-4.4
if (!BN_gcd(g, b, w, ctx)) {
goto err;
}
@@ -610,17 +610,17 @@
goto err;
}
- /* Step 4.5 */
+ // Step 4.5
if (!BN_mod_exp_mont(z, b, m, w, ctx, mont)) {
goto err;
}
- /* Step 4.6 */
+ // Step 4.6
if (BN_is_one(z) || BN_cmp(z, w1) == 0) {
goto loop;
}
- /* Step 4.7 */
+ // Step 4.7
for (int j = 1; j < a; j++) {
if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) {
goto err;
@@ -633,18 +633,18 @@
}
}
- /* Step 4.8-4.9 */
+ // Step 4.8-4.9
if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) {
goto err;
}
- /* Step 4.10-4.11 */
+ // Step 4.10-4.11
if (!BN_is_one(z) && !BN_copy(x, z)) {
goto err;
}
composite:
- /* Step 4.12-4.14 */
+ // Step 4.12-4.14
if (!BN_copy(x1, x) ||
!BN_sub_word(x1, 1) ||
!BN_gcd(g, x1, w, ctx)) {
@@ -660,7 +660,7 @@
goto err;
loop:
- /* Step 4.15 */
+ // Step 4.15
if (!BN_GENCB_call(cb, 1, i)) {
goto err;
}
@@ -688,7 +688,7 @@
return 0;
}
- /* we now have a random number 'rnd' to test. */
+ // we now have a random number 'rnd' to test.
for (i = 1; i < NUMPRIMES; i++) {
BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]);
if (mod == (BN_ULONG)-1) {
@@ -696,12 +696,12 @@
}
mods[i] = (uint16_t)mod;
}
- /* If bits is so small that it fits into a single word then we
- * additionally don't want to exceed that many bits. */
+ // If bits is so small that it fits into a single word then we
+ // additionally don't want to exceed that many bits.
if (is_single_word) {
BN_ULONG size_limit;
if (bits == BN_BITS2) {
- /* Avoid undefined behavior. */
+ // Avoid undefined behavior.
size_limit = ~((BN_ULONG)0) - BN_get_word(rnd);
} else {
size_limit = (((BN_ULONG)1) << bits) - BN_get_word(rnd) - 1;
@@ -716,15 +716,15 @@
if (is_single_word) {
BN_ULONG rnd_word = BN_get_word(rnd);
- /* In the case that the candidate prime is a single word then
- * we check that:
- * 1) It's greater than primes[i] because we shouldn't reject
- * 3 as being a prime number because it's a multiple of
- * three.
- * 2) That it's not a multiple of a known prime. We don't
- * check that rnd-1 is also coprime to all the known
- * primes because there aren't many small primes where
- * that's true. */
+ // In the case that the candidate prime is a single word then
+ // we check that:
+ // 1) It's greater than primes[i] because we shouldn't reject
+ // 3 as being a prime number because it's a multiple of
+ // three.
+ // 2) That it's not a multiple of a known prime. We don't
+ // check that rnd-1 is also coprime to all the known
+ // primes because there aren't many small primes where
+ // that's true.
for (i = 1; i < NUMPRIMES && primes[i] < rnd_word; i++) {
if ((mods[i] + delta) % primes[i] == 0) {
delta += 2;
@@ -736,8 +736,8 @@
}
} else {
for (i = 1; i < NUMPRIMES; i++) {
- /* check that rnd is not a prime and also
- * that gcd(rnd-1,primes) == 1 (except for 2) */
+ // check that rnd is not a prime and also
+ // that gcd(rnd-1,primes) == 1 (except for 2)
if (((mods[i] + delta) % primes[i]) <= 1) {
delta += 2;
if (delta > maxdelta) {
@@ -772,7 +772,7 @@
goto err;
}
- /* we need ((rnd-rem) % add) == 0 */
+ // we need ((rnd-rem) % add) == 0
if (!BN_mod(t1, rnd, add, ctx)) {
goto err;
@@ -789,11 +789,11 @@
goto err;
}
}
- /* we now have a random number 'rand' to test. */
+ // we now have a random number 'rand' to test.
loop:
for (i = 1; i < NUMPRIMES; i++) {
- /* check that rnd is a prime */
+ // check that rnd is a prime
BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]);
if (mod == (BN_ULONG)-1) {
goto err;
@@ -835,7 +835,7 @@
goto err;
}
- /* we need ((rnd-rem) % add) == 0 */
+ // we need ((rnd-rem) % add) == 0
if (!BN_mod(t1, q, qadd, ctx)) {
goto err;
}
@@ -857,7 +857,7 @@
}
}
- /* we now have a random number 'rand' to test. */
+ // we now have a random number 'rand' to test.
if (!BN_lshift1(p, q)) {
goto err;
}
@@ -867,9 +867,9 @@
loop:
for (i = 1; i < NUMPRIMES; i++) {
- /* check that p and q are prime */
- /* check that for p and q
- * gcd(p-1,primes) == 1 (except for 2) */
+ // check that p and q are prime
+ // check that for p and q
+ // gcd(p-1,primes) == 1 (except for 2)
BN_ULONG pmod = BN_mod_word(p, (BN_ULONG)primes[i]);
BN_ULONG qmod = BN_mod_word(q, (BN_ULONG)primes[i]);
if (pmod == (BN_ULONG)-1 || qmod == (BN_ULONG)-1) {
diff --git a/crypto/fipsmodule/bn/random.c b/crypto/fipsmodule/bn/random.c
index 8aa40cf..64e7605 100644
--- a/crypto/fipsmodule/bn/random.c
+++ b/crypto/fipsmodule/bn/random.c
@@ -158,7 +158,7 @@
goto err;
}
- /* Make a random number and set the top and bottom bits. */
+ // Make a random number and set the top and bottom bits.
RAND_bytes_with_additional_data(buf, bytes, additional_data);
if (top != BN_RAND_TOP_ANY) {
@@ -176,7 +176,7 @@
buf[0] &= ~mask;
- /* Set the bottom bit if requested, */
+ // Set the bottom bit if requested,
if (bottom == BN_RAND_BOTTOM_ODD) {
buf[bytes - 1] |= 1;
}
@@ -212,28 +212,28 @@
return 0;
}
- /* This function is used to implement steps 4 through 7 of FIPS 186-4
- * appendices B.4.2 and B.5.2. When called in those contexts, |max_exclusive|
- * is n and |min_inclusive| is one. */
+ // This function is used to implement steps 4 through 7 of FIPS 186-4
+ // appendices B.4.2 and B.5.2. When called in those contexts, |max_exclusive|
+ // is n and |min_inclusive| is one.
unsigned count = 100;
- unsigned n = BN_num_bits(max_exclusive); /* n > 0 */
+ unsigned n = BN_num_bits(max_exclusive); // n > 0
do {
if (!--count) {
OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS);
return 0;
}
- if (/* steps 4 and 5 */
+ if (// steps 4 and 5
!bn_rand_with_additional_data(r, n, BN_RAND_TOP_ANY, BN_RAND_BOTTOM_ANY,
additional_data) ||
- /* step 7 */
+ // step 7
!BN_add_word(r, min_inclusive)) {
return 0;
}
- /* Step 6. This loops if |r| >= |max_exclusive|. This is identical to
- * checking |r| > |max_exclusive| - 1 or |r| - 1 > |max_exclusive| - 2, the
- * formulation stated in FIPS 186-4. */
+ // Step 6. This loops if |r| >= |max_exclusive|. This is identical to
+ // checking |r| > |max_exclusive| - 1 or |r| - 1 > |max_exclusive| - 2, the
+ // formulation stated in FIPS 186-4.
} while (BN_cmp(r, max_exclusive) >= 0);
return 1;
@@ -256,22 +256,22 @@
int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, const BIGNUM *priv,
const uint8_t *message, size_t message_len,
BN_CTX *ctx) {
- /* We copy |priv| into a local buffer to avoid furthur exposing its
- * length. */
+ // We copy |priv| into a local buffer to avoid furthur exposing its
+ // length.
uint8_t private_bytes[96];
size_t todo = sizeof(priv->d[0]) * priv->top;
if (todo > sizeof(private_bytes)) {
- /* No reasonable DSA or ECDSA key should have a private key
- * this large and we don't handle this case in order to avoid
- * leaking the length of the private key. */
+ // No reasonable DSA or ECDSA key should have a private key
+ // this large and we don't handle this case in order to avoid
+ // leaking the length of the private key.
OPENSSL_PUT_ERROR(BN, BN_R_PRIVATE_KEY_TOO_LARGE);
return 0;
}
OPENSSL_memcpy(private_bytes, priv->d, todo);
OPENSSL_memset(private_bytes + todo, 0, sizeof(private_bytes) - todo);
- /* Pass a SHA512 hash of the private key and message as additional data into
- * the RBG. This is a hardening measure against entropy failure. */
+ // Pass a SHA512 hash of the private key and message as additional data into
+ // the RBG. This is a hardening measure against entropy failure.
OPENSSL_COMPILE_ASSERT(SHA512_DIGEST_LENGTH >= 32,
additional_data_is_too_large_for_sha512);
SHA512_CTX sha;
@@ -281,6 +281,6 @@
SHA512_Update(&sha, message, message_len);
SHA512_Final(digest, &sha);
- /* Select a value k from [1, range-1], following FIPS 186-4 appendix B.5.2. */
+ // Select a value k from [1, range-1], following FIPS 186-4 appendix B.5.2.
return bn_rand_range_with_additional_data(out, 1, range, digest);
}
diff --git a/crypto/fipsmodule/bn/shift.c b/crypto/fipsmodule/bn/shift.c
index 1e41342..d3fcf39 100644
--- a/crypto/fipsmodule/bn/shift.c
+++ b/crypto/fipsmodule/bn/shift.c
@@ -157,7 +157,7 @@
}
} else {
if (n == 0) {
- return 1; /* or the copying loop will go berserk */
+ return 1; // or the copying loop will go berserk
}
}
diff --git a/crypto/fipsmodule/bn/sqrt.c b/crypto/fipsmodule/bn/sqrt.c
index 0342bc0..68ccb91 100644
--- a/crypto/fipsmodule/bn/sqrt.c
+++ b/crypto/fipsmodule/bn/sqrt.c
@@ -60,9 +60,9 @@
BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) {
- /* Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm
- * (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory",
- * algorithm 1.5.1). |p| is assumed to be a prime. */
+ // Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm
+ // (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory",
+ // algorithm 1.5.1). |p| is assumed to be a prime.
BIGNUM *ret = in;
int err = 1;
@@ -125,26 +125,25 @@
goto end;
}
- /* A = a mod p */
+ // A = a mod p
if (!BN_nnmod(A, a, p, ctx)) {
goto end;
}
- /* now write |p| - 1 as 2^e*q where q is odd */
+ // now write |p| - 1 as 2^e*q where q is odd
e = 1;
while (!BN_is_bit_set(p, e)) {
e++;
}
- /* we'll set q later (if needed) */
+ // we'll set q later (if needed)
if (e == 1) {
- /* The easy case: (|p|-1)/2 is odd, so 2 has an inverse
- * modulo (|p|-1)/2, and square roots can be computed
- * directly by modular exponentiation.
- * We have
- * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
- * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
- */
+ // The easy case: (|p|-1)/2 is odd, so 2 has an inverse
+ // modulo (|p|-1)/2, and square roots can be computed
+ // directly by modular exponentiation.
+ // We have
+ // 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
+ // so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
if (!BN_rshift(q, p, 2)) {
goto end;
}
@@ -158,39 +157,38 @@
}
if (e == 2) {
- /* |p| == 5 (mod 8)
- *
- * In this case 2 is always a non-square since
- * Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime.
- * So if a really is a square, then 2*a is a non-square.
- * Thus for
- * b := (2*a)^((|p|-5)/8),
- * i := (2*a)*b^2
- * we have
- * i^2 = (2*a)^((1 + (|p|-5)/4)*2)
- * = (2*a)^((p-1)/2)
- * = -1;
- * so if we set
- * x := a*b*(i-1),
- * then
- * x^2 = a^2 * b^2 * (i^2 - 2*i + 1)
- * = a^2 * b^2 * (-2*i)
- * = a*(-i)*(2*a*b^2)
- * = a*(-i)*i
- * = a.
- *
- * (This is due to A.O.L. Atkin,
- * <URL:
- *http://listserv.nodak.edu/scripts/wa.exe?A2=ind9211&L=nmbrthry&O=T&P=562>,
- * November 1992.)
- */
+ // |p| == 5 (mod 8)
+ //
+ // In this case 2 is always a non-square since
+ // Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime.
+ // So if a really is a square, then 2*a is a non-square.
+ // Thus for
+ // b := (2*a)^((|p|-5)/8),
+ // i := (2*a)*b^2
+ // we have
+ // i^2 = (2*a)^((1 + (|p|-5)/4)*2)
+ // = (2*a)^((p-1)/2)
+ // = -1;
+ // so if we set
+ // x := a*b*(i-1),
+ // then
+ // x^2 = a^2 * b^2 * (i^2 - 2*i + 1)
+ // = a^2 * b^2 * (-2*i)
+ // = a*(-i)*(2*a*b^2)
+ // = a*(-i)*i
+ // = a.
+ //
+ // (This is due to A.O.L. Atkin,
+ // <URL:
+ //http://listserv.nodak.edu/scripts/wa.exe?A2=ind9211&L=nmbrthry&O=T&P=562>,
+ // November 1992.)
- /* t := 2*a */
+ // t := 2*a
if (!BN_mod_lshift1_quick(t, A, p)) {
goto end;
}
- /* b := (2*a)^((|p|-5)/8) */
+ // b := (2*a)^((|p|-5)/8)
if (!BN_rshift(q, p, 3)) {
goto end;
}
@@ -199,18 +197,18 @@
goto end;
}
- /* y := b^2 */
+ // y := b^2
if (!BN_mod_sqr(y, b, p, ctx)) {
goto end;
}
- /* t := (2*a)*b^2 - 1*/
+ // t := (2*a)*b^2 - 1
if (!BN_mod_mul(t, t, y, p, ctx) ||
!BN_sub_word(t, 1)) {
goto end;
}
- /* x = a*b*t */
+ // x = a*b*t
if (!BN_mod_mul(x, A, b, p, ctx) ||
!BN_mod_mul(x, x, t, p, ctx)) {
goto end;
@@ -223,17 +221,16 @@
goto vrfy;
}
- /* e > 2, so we really have to use the Tonelli/Shanks algorithm.
- * First, find some y that is not a square. */
+ // e > 2, so we really have to use the Tonelli/Shanks algorithm.
+ // First, find some y that is not a square.
if (!BN_copy(q, p)) {
- goto end; /* use 'q' as temp */
+ goto end; // use 'q' as temp
}
q->neg = 0;
i = 2;
do {
- /* For efficiency, try small numbers first;
- * if this fails, try random numbers.
- */
+ // For efficiency, try small numbers first;
+ // if this fails, try random numbers.
if (i < 22) {
if (!BN_set_word(y, i)) {
goto end;
@@ -247,7 +244,7 @@
goto end;
}
}
- /* now 0 <= y < |p| */
+ // now 0 <= y < |p|
if (BN_is_zero(y)) {
if (!BN_set_word(y, i)) {
goto end;
@@ -255,34 +252,33 @@
}
}
- r = bn_jacobi(y, q, ctx); /* here 'q' is |p| */
+ r = bn_jacobi(y, q, ctx); // here 'q' is |p|
if (r < -1) {
goto end;
}
if (r == 0) {
- /* m divides p */
+ // m divides p
OPENSSL_PUT_ERROR(BN, BN_R_P_IS_NOT_PRIME);
goto end;
}
} while (r == 1 && ++i < 82);
if (r != -1) {
- /* Many rounds and still no non-square -- this is more likely
- * a bug than just bad luck.
- * Even if p is not prime, we should have found some y
- * such that r == -1.
- */
+ // Many rounds and still no non-square -- this is more likely
+ // a bug than just bad luck.
+ // Even if p is not prime, we should have found some y
+ // such that r == -1.
OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS);
goto end;
}
- /* Here's our actual 'q': */
+ // Here's our actual 'q':
if (!BN_rshift(q, q, e)) {
goto end;
}
- /* Now that we have some non-square, we can find an element
- * of order 2^e by computing its q'th power. */
+ // Now that we have some non-square, we can find an element
+ // of order 2^e by computing its q'th power.
if (!BN_mod_exp_mont(y, y, q, p, ctx, NULL)) {
goto end;
}
@@ -291,37 +287,36 @@
goto end;
}
- /* Now we know that (if p is indeed prime) there is an integer
- * k, 0 <= k < 2^e, such that
- *
- * a^q * y^k == 1 (mod p).
- *
- * As a^q is a square and y is not, k must be even.
- * q+1 is even, too, so there is an element
- *
- * X := a^((q+1)/2) * y^(k/2),
- *
- * and it satisfies
- *
- * X^2 = a^q * a * y^k
- * = a,
- *
- * so it is the square root that we are looking for.
- */
+ // Now we know that (if p is indeed prime) there is an integer
+ // k, 0 <= k < 2^e, such that
+ //
+ // a^q * y^k == 1 (mod p).
+ //
+ // As a^q is a square and y is not, k must be even.
+ // q+1 is even, too, so there is an element
+ //
+ // X := a^((q+1)/2) * y^(k/2),
+ //
+ // and it satisfies
+ //
+ // X^2 = a^q * a * y^k
+ // = a,
+ //
+ // so it is the square root that we are looking for.
- /* t := (q-1)/2 (note that q is odd) */
+ // t := (q-1)/2 (note that q is odd)
if (!BN_rshift1(t, q)) {
goto end;
}
- /* x := a^((q-1)/2) */
- if (BN_is_zero(t)) /* special case: p = 2^e + 1 */
+ // x := a^((q-1)/2)
+ if (BN_is_zero(t)) // special case: p = 2^e + 1
{
if (!BN_nnmod(t, A, p, ctx)) {
goto end;
}
if (BN_is_zero(t)) {
- /* special case: a == 0 (mod p) */
+ // special case: a == 0 (mod p)
BN_zero(ret);
err = 0;
goto end;
@@ -333,33 +328,32 @@
goto end;
}
if (BN_is_zero(x)) {
- /* special case: a == 0 (mod p) */
+ // special case: a == 0 (mod p)
BN_zero(ret);
err = 0;
goto end;
}
}
- /* b := a*x^2 (= a^q) */
+ // b := a*x^2 (= a^q)
if (!BN_mod_sqr(b, x, p, ctx) ||
!BN_mod_mul(b, b, A, p, ctx)) {
goto end;
}
- /* x := a*x (= a^((q+1)/2)) */
+ // x := a*x (= a^((q+1)/2))
if (!BN_mod_mul(x, x, A, p, ctx)) {
goto end;
}
while (1) {
- /* Now b is a^q * y^k for some even k (0 <= k < 2^E
- * where E refers to the original value of e, which we
- * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
- *
- * We have a*b = x^2,
- * y^2^(e-1) = -1,
- * b^2^(e-1) = 1.
- */
+ // Now b is a^q * y^k for some even k (0 <= k < 2^E
+ // where E refers to the original value of e, which we
+ // don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
+ //
+ // We have a*b = x^2,
+ // y^2^(e-1) = -1,
+ // b^2^(e-1) = 1.
if (BN_is_one(b)) {
if (!BN_copy(ret, x)) {
@@ -370,7 +364,7 @@
}
- /* find smallest i such that b^(2^i) = 1 */
+ // find smallest i such that b^(2^i) = 1
i = 1;
if (!BN_mod_sqr(t, b, p, ctx)) {
goto end;
@@ -387,7 +381,7 @@
}
- /* t := y^2^(e - i - 1) */
+ // t := y^2^(e - i - 1)
if (!BN_copy(t, y)) {
goto end;
}
@@ -406,8 +400,8 @@
vrfy:
if (!err) {
- /* verify the result -- the input might have been not a square
- * (test added in 0.9.8) */
+ // verify the result -- the input might have been not a square
+ // (test added in 0.9.8)
if (!BN_mod_sqr(x, ret, p, ctx)) {
err = 1;
@@ -457,30 +451,30 @@
goto err;
}
- /* We estimate that the square root of an n-bit number is 2^{n/2}. */
+ // We estimate that the square root of an n-bit number is 2^{n/2}.
if (!BN_lshift(estimate, BN_value_one(), BN_num_bits(in)/2)) {
goto err;
}
- /* This is Newton's method for finding a root of the equation |estimate|^2 -
- * |in| = 0. */
+ // This is Newton's method for finding a root of the equation |estimate|^2 -
+ // |in| = 0.
for (;;) {
- /* |estimate| = 1/2 * (|estimate| + |in|/|estimate|) */
+ // |estimate| = 1/2 * (|estimate| + |in|/|estimate|)
if (!BN_div(tmp, NULL, in, estimate, ctx) ||
!BN_add(tmp, tmp, estimate) ||
!BN_rshift1(estimate, tmp) ||
- /* |tmp| = |estimate|^2 */
+ // |tmp| = |estimate|^2
!BN_sqr(tmp, estimate, ctx) ||
- /* |delta| = |in| - |tmp| */
+ // |delta| = |in| - |tmp|
!BN_sub(delta, in, tmp)) {
OPENSSL_PUT_ERROR(BN, ERR_R_BN_LIB);
goto err;
}
delta->neg = 0;
- /* The difference between |in| and |estimate| squared is required to always
- * decrease. This ensures that the loop always terminates, but I don't have
- * a proof that it always finds the square root for a given square. */
+ // The difference between |in| and |estimate| squared is required to always
+ // decrease. This ensures that the loop always terminates, but I don't have
+ // a proof that it always finds the square root for a given square.
if (last_delta_valid && BN_cmp(delta, last_delta) >= 0) {
break;
}
diff --git a/crypto/fipsmodule/cipher/aead.c b/crypto/fipsmodule/cipher/aead.c
index ed30209..8d2ad04 100644
--- a/crypto/fipsmodule/cipher/aead.c
+++ b/crypto/fipsmodule/cipher/aead.c
@@ -101,8 +101,8 @@
ctx->aead = NULL;
}
-/* check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If
- * |in| and |out| alias, we require that |in| == |out|. */
+// check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If
+// |in| and |out| alias, we require that |in| == |out|.
static int check_alias(const uint8_t *in, size_t in_len, const uint8_t *out,
size_t out_len) {
if (!buffers_alias(in, in_len, out, out_len)) {
@@ -140,8 +140,8 @@
}
error:
- /* In the event of an error, clear the output buffer so that a caller
- * that doesn't check the return value doesn't send raw data. */
+ // In the event of an error, clear the output buffer so that a caller
+ // that doesn't check the return value doesn't send raw data.
OPENSSL_memset(out, 0, max_out_len);
*out_len = 0;
return 0;
@@ -172,8 +172,8 @@
}
error:
- /* In the event of an error, clear the output buffer so that a caller
- * that doesn't check the return value doesn't send raw data. */
+ // In the event of an error, clear the output buffer so that a caller
+ // that doesn't check the return value doesn't send raw data.
OPENSSL_memset(out, 0, in_len);
OPENSSL_memset(out_tag, 0, max_out_tag_len);
*out_tag_len = 0;
@@ -218,9 +218,9 @@
}
error:
- /* In the event of an error, clear the output buffer so that a caller
- * that doesn't check the return value doesn't try and process bad
- * data. */
+ // In the event of an error, clear the output buffer so that a caller
+ // that doesn't check the return value doesn't try and process bad
+ // data.
OPENSSL_memset(out, 0, max_out_len);
*out_len = 0;
return 0;
@@ -247,9 +247,9 @@
}
error:
- /* In the event of an error, clear the output buffer so that a caller
- * that doesn't check the return value doesn't try and process bad
- * data. */
+ // In the event of an error, clear the output buffer so that a caller
+ // that doesn't check the return value doesn't try and process bad
+ // data.
OPENSSL_memset(out, 0, in_len);
return 0;
}
diff --git a/crypto/fipsmodule/cipher/cipher.c b/crypto/fipsmodule/cipher/cipher.c
index d116715..8f0d788 100644
--- a/crypto/fipsmodule/cipher/cipher.c
+++ b/crypto/fipsmodule/cipher/cipher.c
@@ -141,12 +141,12 @@
}
if (cipher) {
- /* Ensure a context left from last time is cleared (the previous check
- * attempted to avoid this if the same ENGINE and EVP_CIPHER could be
- * used). */
+ // Ensure a context left from last time is cleared (the previous check
+ // attempted to avoid this if the same ENGINE and EVP_CIPHER could be
+ // used).
if (ctx->cipher) {
EVP_CIPHER_CTX_cleanup(ctx);
- /* Restore encrypt and flags */
+ // Restore encrypt and flags
ctx->encrypt = enc;
}
@@ -177,7 +177,7 @@
return 0;
}
- /* we assume block size is a power of 2 in *cryptUpdate */
+ // we assume block size is a power of 2 in *cryptUpdate
assert(ctx->cipher->block_size == 1 || ctx->cipher->block_size == 8 ||
ctx->cipher->block_size == 16);
@@ -189,7 +189,7 @@
case EVP_CIPH_CFB_MODE:
ctx->num = 0;
- /* fall-through */
+ // fall-through
case EVP_CIPH_CBC_MODE:
assert(EVP_CIPHER_CTX_iv_length(ctx) <= sizeof(ctx->iv));
@@ -202,7 +202,7 @@
case EVP_CIPH_CTR_MODE:
case EVP_CIPH_OFB_MODE:
ctx->num = 0;
- /* Don't reuse IV for CTR mode */
+ // Don't reuse IV for CTR mode
if (iv) {
OPENSSL_memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx));
}
@@ -388,8 +388,8 @@
return 0;
}
- /* if we have 'decrypted' a multiple of block size, make sure
- * we have a copy of this last block */
+ // if we have 'decrypted' a multiple of block size, make sure
+ // we have a copy of this last block
if (b > 1 && !ctx->buf_len) {
*out_len -= b;
ctx->final_used = 1;
@@ -437,8 +437,8 @@
}
assert(b <= sizeof(ctx->final));
- /* The following assumes that the ciphertext has been authenticated.
- * Otherwise it provides a padding oracle. */
+ // The following assumes that the ciphertext has been authenticated.
+ // Otherwise it provides a padding oracle.
n = ctx->final[b - 1];
if (n == 0 || n > (int)b) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
diff --git a/crypto/fipsmodule/cipher/e_aes.c b/crypto/fipsmodule/cipher/e_aes.c
index 2c6fc41..bd9847c 100644
--- a/crypto/fipsmodule/cipher/e_aes.c
+++ b/crypto/fipsmodule/cipher/e_aes.c
@@ -68,7 +68,7 @@
#endif
-OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) /* Unreachable code. */
+OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) // Unreachable code.
typedef struct {
union {
@@ -86,14 +86,14 @@
union {
double align;
AES_KEY ks;
- } ks; /* AES key schedule to use */
- int key_set; /* Set if key initialised */
- int iv_set; /* Set if an iv is set */
+ } ks; // AES key schedule to use
+ int key_set; // Set if key initialised
+ int iv_set; // Set if an iv is set
GCM128_CONTEXT gcm;
- uint8_t *iv; /* Temporary IV store */
- int ivlen; /* IV length */
+ uint8_t *iv; // Temporary IV store
+ int ivlen; // IV length
int taglen;
- int iv_gen; /* It is OK to generate IVs */
+ int iv_gen; // It is OK to generate IVs
ctr128_f ctr;
} EVP_AES_GCM_CTX;
@@ -125,8 +125,8 @@
#if defined(BSAES)
-/* On platforms where BSAES gets defined (just above), then these functions are
- * provided by asm. */
+// On platforms where BSAES gets defined (just above), then these functions are
+// provided by asm.
void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
const AES_KEY *key, uint8_t ivec[16], int enc);
void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
@@ -136,8 +136,8 @@
return 0;
}
-/* On other platforms, bsaes_capable() will always return false and so the
- * following will never be called. */
+// On other platforms, bsaes_capable() will always return false and so the
+// following will never be called.
static void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
const AES_KEY *key, uint8_t ivec[16], int enc) {
abort();
@@ -151,8 +151,8 @@
#endif
#if defined(VPAES)
-/* On platforms where VPAES gets defined (just above), then these functions are
- * provided by asm. */
+// On platforms where VPAES gets defined (just above), then these functions are
+// provided by asm.
int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
@@ -166,8 +166,8 @@
return 0;
}
-/* On other platforms, vpaes_capable() will always return false and so the
- * following will never be called. */
+// On other platforms, vpaes_capable() will always return false and so the
+// following will never be called.
static int vpaes_set_encrypt_key(const uint8_t *userKey, int bits,
AES_KEY *key) {
abort();
@@ -203,8 +203,8 @@
#else
-/* On other platforms, aesni_capable() will always return false and so the
- * following will never be called. */
+// On other platforms, aesni_capable() will always return false and so the
+// following will never be called.
static void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
abort();
}
@@ -404,7 +404,7 @@
if (key) {
gctx->ctr =
aes_ctr_set_key(&gctx->ks.ks, &gctx->gcm, NULL, key, ctx->key_len);
- /* If we have an iv can set it directly, otherwise use saved IV. */
+ // If we have an iv can set it directly, otherwise use saved IV.
if (iv == NULL && gctx->iv_set) {
iv = gctx->iv;
}
@@ -414,7 +414,7 @@
}
gctx->key_set = 1;
} else {
- /* If key set use IV, otherwise copy */
+ // If key set use IV, otherwise copy
if (gctx->key_set) {
CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen);
} else {
@@ -434,7 +434,7 @@
}
}
-/* increment counter (64-bit int) by 1 */
+// increment counter (64-bit int) by 1
static void ctr64_inc(uint8_t *counter) {
int n = 8;
uint8_t c;
@@ -467,7 +467,7 @@
return 0;
}
- /* Allocate memory for IV if needed */
+ // Allocate memory for IV if needed
if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) {
if (gctx->iv != c->iv) {
OPENSSL_free(gctx->iv);
@@ -496,14 +496,14 @@
return 1;
case EVP_CTRL_GCM_SET_IV_FIXED:
- /* Special case: -1 length restores whole IV */
+ // Special case: -1 length restores whole IV
if (arg == -1) {
OPENSSL_memcpy(gctx->iv, ptr, gctx->ivlen);
gctx->iv_gen = 1;
return 1;
}
- /* Fixed field must be at least 4 bytes and invocation field
- * at least 8. */
+ // Fixed field must be at least 4 bytes and invocation field
+ // at least 8.
if (arg < 4 || (gctx->ivlen - arg) < 8) {
return 0;
}
@@ -525,9 +525,9 @@
arg = gctx->ivlen;
}
OPENSSL_memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
- /* Invocation field will be at least 8 bytes in size and
- * so no need to check wrap around or increment more than
- * last 8 bytes. */
+ // Invocation field will be at least 8 bytes in size and
+ // so no need to check wrap around or increment more than
+ // last 8 bytes.
ctr64_inc(gctx->iv + gctx->ivlen - 8);
gctx->iv_set = 1;
return 1;
@@ -565,7 +565,7 @@
size_t len) {
EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
- /* If not set up, return error */
+ // If not set up, return error
if (!gctx->key_set) {
return -1;
}
@@ -613,7 +613,7 @@
}
CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
gctx->taglen = 16;
- /* Don't reuse the IV */
+ // Don't reuse the IV
gctx->iv_set = 0;
return 0;
}
@@ -813,7 +813,7 @@
#if !defined(OPENSSL_NO_ASM) && \
(defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
-/* AES-NI section. */
+// AES-NI section.
static char aesni_capable(void) {
return (OPENSSL_ia32cap_P[1] & (1 << (57 - 32))) != 0;
@@ -880,8 +880,8 @@
aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)aesni_encrypt, 1);
gctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks;
- /* If we have an iv can set it directly, otherwise use
- * saved IV. */
+ // If we have an iv can set it directly, otherwise use
+ // saved IV.
if (iv == NULL && gctx->iv_set) {
iv = gctx->iv;
}
@@ -891,7 +891,7 @@
}
gctx->key_set = 1;
} else {
- /* If key set use IV, otherwise copy */
+ // If key set use IV, otherwise copy
if (gctx->key_set) {
CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen);
} else {
@@ -1104,7 +1104,7 @@
} \
}
-#else /* ^^^ OPENSSL_X86_64 || OPENSSL_X86 */
+#else // ^^^ OPENSSL_X86_64 || OPENSSL_X86
static char aesni_capable(void) {
return 0;
@@ -1158,7 +1158,7 @@
if (key_bits != 128 && key_bits != 256) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
- return 0; /* EVP_AEAD_CTX_init should catch this. */
+ return 0; // EVP_AEAD_CTX_init should catch this.
}
if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
diff --git a/crypto/fipsmodule/cipher/internal.h b/crypto/fipsmodule/cipher/internal.h
index 02335e0..7b5f23f 100644
--- a/crypto/fipsmodule/cipher/internal.h
+++ b/crypto/fipsmodule/cipher/internal.h
@@ -70,10 +70,10 @@
#endif
-/* EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode. */
+// EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode.
#define EVP_CIPH_MODE_MASK 0x3f
-/* EVP_AEAD represents a specific AEAD algorithm. */
+// EVP_AEAD represents a specific AEAD algorithm.
struct evp_aead_st {
uint8_t key_len;
uint8_t nonce_len;
@@ -81,8 +81,8 @@
uint8_t max_tag_len;
int seal_scatter_supports_extra_in;
- /* init initialises an |EVP_AEAD_CTX|. If this call returns zero then
- * |cleanup| will not be called for that context. */
+ // init initialises an |EVP_AEAD_CTX|. If this call returns zero then
+ // |cleanup| will not be called for that context.
int (*init)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len,
size_t tag_len);
int (*init_with_direction)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len,
@@ -112,18 +112,18 @@
size_t extra_in_len);
};
-/* aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|,
- * where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is
- * set to a function that encrypts single blocks. If not NULL, |*gcm_ctx| is
- * initialised to do GHASH with the given key. It returns a function for
- * optimised CTR-mode, or NULL if CTR-mode should be built using
- * |*out_block|. */
+// aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|,
+// where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is
+// set to a function that encrypts single blocks. If not NULL, |*gcm_ctx| is
+// initialised to do GHASH with the given key. It returns a function for
+// optimised CTR-mode, or NULL if CTR-mode should be built using
+// |*out_block|.
ctr128_f aes_ctr_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx,
block128_f *out_block, const uint8_t *key,
size_t key_bytes);
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_CIPHER_INTERNAL_H */
+#endif // OPENSSL_HEADER_CIPHER_INTERNAL_H
diff --git a/crypto/fipsmodule/delocate.h b/crypto/fipsmodule/delocate.h
index 0153a4e..065a21c 100644
--- a/crypto/fipsmodule/delocate.h
+++ b/crypto/fipsmodule/delocate.h
@@ -24,12 +24,12 @@
#define DEFINE_BSS_GET(type, name) \
static type name __attribute__((used)); \
type *name##_bss_get(void);
-/* For FIPS builds we require that CRYPTO_ONCE_INIT be zero. */
+// For FIPS builds we require that CRYPTO_ONCE_INIT be zero.
#define DEFINE_STATIC_ONCE(name) DEFINE_BSS_GET(CRYPTO_once_t, name)
-/* For FIPS builds we require that CRYPTO_STATIC_MUTEX_INIT be zero. */
+// For FIPS builds we require that CRYPTO_STATIC_MUTEX_INIT be zero.
#define DEFINE_STATIC_MUTEX(name) \
DEFINE_BSS_GET(struct CRYPTO_STATIC_MUTEX, name)
-/* For FIPS builds we require that CRYPTO_EX_DATA_CLASS_INIT be zero. */
+// For FIPS builds we require that CRYPTO_EX_DATA_CLASS_INIT be zero.
#define DEFINE_STATIC_EX_DATA_CLASS(name) \
DEFINE_BSS_GET(CRYPTO_EX_DATA_CLASS, name)
#else
@@ -60,29 +60,29 @@
} \
static void name##_do_init(type *out)
-/* DEFINE_METHOD_FUNCTION defines a function named |name| which returns a
- * method table of type const |type|*. In FIPS mode, to avoid rel.ro data, it
- * is split into a CRYPTO_once_t-guarded initializer in the module and
- * unhashed, non-module accessor functions to space reserved in the BSS. The
- * method table is initialized by a caller-supplied function which takes a
- * parameter named |out| of type |type|*. The caller should follow the macro
- * invocation with the body of this function:
- *
- * DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) {
- * out->type = NID_md4;
- * out->md_size = MD4_DIGEST_LENGTH;
- * out->flags = 0;
- * out->init = md4_init;
- * out->update = md4_update;
- * out->final = md4_final;
- * out->block_size = 64;
- * out->ctx_size = sizeof(MD4_CTX);
- * }
- *
- * This mechanism does not use a static initializer because their execution
- * order is undefined. See FIPS.md for more details. */
+// DEFINE_METHOD_FUNCTION defines a function named |name| which returns a
+// method table of type const |type|*. In FIPS mode, to avoid rel.ro data, it
+// is split into a CRYPTO_once_t-guarded initializer in the module and
+// unhashed, non-module accessor functions to space reserved in the BSS. The
+// method table is initialized by a caller-supplied function which takes a
+// parameter named |out| of type |type|*. The caller should follow the macro
+// invocation with the body of this function:
+//
+// DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) {
+// out->type = NID_md4;
+// out->md_size = MD4_DIGEST_LENGTH;
+// out->flags = 0;
+// out->init = md4_init;
+// out->update = md4_update;
+// out->final = md4_final;
+// out->block_size = 64;
+// out->ctx_size = sizeof(MD4_CTX);
+// }
+//
+// This mechanism does not use a static initializer because their execution
+// order is undefined. See FIPS.md for more details.
#define DEFINE_METHOD_FUNCTION(type, name) DEFINE_DATA(type, name, const)
#define DEFINE_LOCAL_DATA(type, name) DEFINE_DATA(type, name, static const)
-#endif /* OPENSSL_HEADER_FIPSMODULE_DELOCATE_H */
+#endif // OPENSSL_HEADER_FIPSMODULE_DELOCATE_H
diff --git a/crypto/fipsmodule/des/des.c b/crypto/fipsmodule/des/des.c
index a6c177c..2b0fdcd 100644
--- a/crypto/fipsmodule/des/des.c
+++ b/crypto/fipsmodule/des/des.c
@@ -62,7 +62,7 @@
static const uint32_t des_skb[8][64] = {
- {/* for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 */
+ { // for C bits (numbered as per FIPS 46) 1 2 3 4 5 6
0x00000000L, 0x00000010L, 0x20000000L, 0x20000010L, 0x00010000L,
0x00010010L, 0x20010000L, 0x20010010L, 0x00000800L, 0x00000810L,
0x20000800L, 0x20000810L, 0x00010800L, 0x00010810L, 0x20010800L,
@@ -76,7 +76,7 @@
0x20080020L, 0x20080030L, 0x00090020L, 0x00090030L, 0x20090020L,
0x20090030L, 0x00080820L, 0x00080830L, 0x20080820L, 0x20080830L,
0x00090820L, 0x00090830L, 0x20090820L, 0x20090830L, },
- {/* for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 */
+ { // for C bits (numbered as per FIPS 46) 7 8 10 11 12 13
0x00000000L, 0x02000000L, 0x00002000L, 0x02002000L, 0x00200000L,
0x02200000L, 0x00202000L, 0x02202000L, 0x00000004L, 0x02000004L,
0x00002004L, 0x02002004L, 0x00200004L, 0x02200004L, 0x00202004L,
@@ -90,7 +90,7 @@
0x10002400L, 0x12002400L, 0x10200400L, 0x12200400L, 0x10202400L,
0x12202400L, 0x10000404L, 0x12000404L, 0x10002404L, 0x12002404L,
0x10200404L, 0x12200404L, 0x10202404L, 0x12202404L, },
- {/* for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 */
+ { // for C bits (numbered as per FIPS 46) 14 15 16 17 19 20
0x00000000L, 0x00000001L, 0x00040000L, 0x00040001L, 0x01000000L,
0x01000001L, 0x01040000L, 0x01040001L, 0x00000002L, 0x00000003L,
0x00040002L, 0x00040003L, 0x01000002L, 0x01000003L, 0x01040002L,
@@ -104,7 +104,7 @@
0x08040200L, 0x08040201L, 0x09000200L, 0x09000201L, 0x09040200L,
0x09040201L, 0x08000202L, 0x08000203L, 0x08040202L, 0x08040203L,
0x09000202L, 0x09000203L, 0x09040202L, 0x09040203L, },
- {/* for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 */
+ { // for C bits (numbered as per FIPS 46) 21 23 24 26 27 28
0x00000000L, 0x00100000L, 0x00000100L, 0x00100100L, 0x00000008L,
0x00100008L, 0x00000108L, 0x00100108L, 0x00001000L, 0x00101000L,
0x00001100L, 0x00101100L, 0x00001008L, 0x00101008L, 0x00001108L,
@@ -118,7 +118,7 @@
0x04020100L, 0x04120100L, 0x04020008L, 0x04120008L, 0x04020108L,
0x04120108L, 0x04021000L, 0x04121000L, 0x04021100L, 0x04121100L,
0x04021008L, 0x04121008L, 0x04021108L, 0x04121108L, },
- {/* for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 */
+ { // for D bits (numbered as per FIPS 46) 1 2 3 4 5 6
0x00000000L, 0x10000000L, 0x00010000L, 0x10010000L, 0x00000004L,
0x10000004L, 0x00010004L, 0x10010004L, 0x20000000L, 0x30000000L,
0x20010000L, 0x30010000L, 0x20000004L, 0x30000004L, 0x20010004L,
@@ -132,7 +132,7 @@
0x00111000L, 0x10111000L, 0x00101004L, 0x10101004L, 0x00111004L,
0x10111004L, 0x20101000L, 0x30101000L, 0x20111000L, 0x30111000L,
0x20101004L, 0x30101004L, 0x20111004L, 0x30111004L, },
- {/* for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 */
+ { // for D bits (numbered as per FIPS 46) 8 9 11 12 13 14
0x00000000L, 0x08000000L, 0x00000008L, 0x08000008L, 0x00000400L,
0x08000400L, 0x00000408L, 0x08000408L, 0x00020000L, 0x08020000L,
0x00020008L, 0x08020008L, 0x00020400L, 0x08020400L, 0x00020408L,
@@ -146,7 +146,7 @@
0x02000009L, 0x0A000009L, 0x02000401L, 0x0A000401L, 0x02000409L,
0x0A000409L, 0x02020001L, 0x0A020001L, 0x02020009L, 0x0A020009L,
0x02020401L, 0x0A020401L, 0x02020409L, 0x0A020409L, },
- {/* for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 */
+ { // for D bits (numbered as per FIPS 46) 16 17 18 19 20 21
0x00000000L, 0x00000100L, 0x00080000L, 0x00080100L, 0x01000000L,
0x01000100L, 0x01080000L, 0x01080100L, 0x00000010L, 0x00000110L,
0x00080010L, 0x00080110L, 0x01000010L, 0x01000110L, 0x01080010L,
@@ -160,7 +160,7 @@
0x00280200L, 0x00280300L, 0x01200200L, 0x01200300L, 0x01280200L,
0x01280300L, 0x00200210L, 0x00200310L, 0x00280210L, 0x00280310L,
0x01200210L, 0x01200310L, 0x01280210L, 0x01280310L, },
- {/* for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 */
+ { // for D bits (numbered as per FIPS 46) 22 23 24 25 27 28
0x00000000L, 0x04000000L, 0x00040000L, 0x04040000L, 0x00000002L,
0x04000002L, 0x00040002L, 0x04040002L, 0x00002000L, 0x04002000L,
0x00042000L, 0x04042000L, 0x00002002L, 0x04002002L, 0x00042002L,
@@ -176,7 +176,7 @@
0x00002822L, 0x04002822L, 0x00042822L, 0x04042822L, }};
static const uint32_t DES_SPtrans[8][64] = {
- {/* nibble 0 */
+ { // nibble 0
0x02080800L, 0x00080000L, 0x02000002L, 0x02080802L, 0x02000000L,
0x00080802L, 0x00080002L, 0x02000002L, 0x00080802L, 0x02080800L,
0x02080000L, 0x00000802L, 0x02000802L, 0x02000000L, 0x00000000L,
@@ -190,7 +190,7 @@
0x02080000L, 0x02000802L, 0x02000000L, 0x00000802L, 0x00080002L,
0x00000000L, 0x00080000L, 0x02000000L, 0x02000802L, 0x02080800L,
0x00000002L, 0x02080002L, 0x00000800L, 0x00080802L, },
- {/* nibble 1 */
+ { // nibble 1
0x40108010L, 0x00000000L, 0x00108000L, 0x40100000L, 0x40000010L,
0x00008010L, 0x40008000L, 0x00108000L, 0x00008000L, 0x40100010L,
0x00000010L, 0x40008000L, 0x00100010L, 0x40108000L, 0x40100000L,
@@ -204,7 +204,7 @@
0x00000000L, 0x40000010L, 0x00000010L, 0x40108010L, 0x00108000L,
0x40100000L, 0x40100010L, 0x00100000L, 0x00008010L, 0x40008000L,
0x40008010L, 0x00000010L, 0x40100000L, 0x00108000L, },
- {/* nibble 2 */
+ { // nibble 2
0x04000001L, 0x04040100L, 0x00000100L, 0x04000101L, 0x00040001L,
0x04000000L, 0x04000101L, 0x00040100L, 0x04000100L, 0x00040000L,
0x04040000L, 0x00000001L, 0x04040101L, 0x00000101L, 0x00000001L,
@@ -218,7 +218,7 @@
0x04000000L, 0x04040101L, 0x00040000L, 0x04000100L, 0x04000101L,
0x00040100L, 0x04000100L, 0x00000000L, 0x04040001L, 0x00000101L,
0x04000001L, 0x00040101L, 0x00000100L, 0x04040000L, },
- {/* nibble 3 */
+ { // nibble 3
0x00401008L, 0x10001000L, 0x00000008L, 0x10401008L, 0x00000000L,
0x10400000L, 0x10001008L, 0x00400008L, 0x10401000L, 0x10000008L,
0x10000000L, 0x00001008L, 0x10000008L, 0x00401008L, 0x00400000L,
@@ -232,7 +232,7 @@
0x00401008L, 0x00400000L, 0x10401008L, 0x00000008L, 0x10001000L,
0x00401008L, 0x00400008L, 0x00401000L, 0x10400000L, 0x10001008L,
0x00001008L, 0x10000000L, 0x10000008L, 0x10401000L, },
- {/* nibble 4 */
+ { // nibble 4
0x08000000L, 0x00010000L, 0x00000400L, 0x08010420L, 0x08010020L,
0x08000400L, 0x00010420L, 0x08010000L, 0x00010000L, 0x00000020L,
0x08000020L, 0x00010400L, 0x08000420L, 0x08010020L, 0x08010400L,
@@ -246,7 +246,7 @@
0x00000000L, 0x08010420L, 0x08010020L, 0x08010400L, 0x00000420L,
0x00010000L, 0x00010400L, 0x08010020L, 0x08000400L, 0x00000420L,
0x00000020L, 0x00010420L, 0x08010000L, 0x08000020L, },
- {/* nibble 5 */
+ { // nibble 5
0x80000040L, 0x00200040L, 0x00000000L, 0x80202000L, 0x00200040L,
0x00002000L, 0x80002040L, 0x00200000L, 0x00002040L, 0x80202040L,
0x00202000L, 0x80000000L, 0x80002000L, 0x80000040L, 0x80200000L,
@@ -260,7 +260,7 @@
0x00200000L, 0x80002040L, 0x80000040L, 0x80200000L, 0x00202040L,
0x00000000L, 0x00002000L, 0x80000040L, 0x80002040L, 0x80202000L,
0x80200000L, 0x00002040L, 0x00000040L, 0x80200040L, },
- {/* nibble 6 */
+ { // nibble 6
0x00004000L, 0x00000200L, 0x01000200L, 0x01000004L, 0x01004204L,
0x00004004L, 0x00004200L, 0x00000000L, 0x01000000L, 0x01000204L,
0x00000204L, 0x01004000L, 0x00000004L, 0x01004200L, 0x01004000L,
@@ -274,7 +274,7 @@
0x01000200L, 0x00004200L, 0x00000204L, 0x00004000L, 0x01004204L,
0x01000000L, 0x01004200L, 0x00000004L, 0x00004004L, 0x01004204L,
0x01000004L, 0x01004200L, 0x01004000L, 0x00004004L, },
- {/* nibble 7 */
+ { // nibble 7
0x20800080L, 0x20820000L, 0x00020080L, 0x00000000L, 0x20020000L,
0x00800080L, 0x20800000L, 0x20820080L, 0x00000080L, 0x20000000L,
0x00820000L, 0x00020080L, 0x00820080L, 0x20020080L, 0x20000080L,
@@ -305,9 +305,9 @@
c2l(in, c);
c2l(in, d);
- /* do PC1 in 47 simple operations :-)
- * Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov)
- * for the inspiration. :-) */
+ // do PC1 in 47 simple operations :-)
+ // Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov)
+ // for the inspiration. :-)
PERM_OP(d, c, t, 4, 0x0f0f0f0fL);
HPERM_OP(c, t, -2, 0xcccc0000L);
HPERM_OP(d, t, -2, 0xcccc0000L);
@@ -328,8 +328,8 @@
}
c &= 0x0fffffffL;
d &= 0x0fffffffL;
- /* could be a few less shifts but I am to lazy at this
- * point in time to investigate */
+ // could be a few less shifts but I am to lazy at this
+ // point in time to investigate
s = des_skb[0][(c) & 0x3f] |
des_skb[1][((c >> 6L) & 0x03) | ((c >> 7L) & 0x3c)] |
des_skb[2][((c >> 13L) & 0x0f) | ((c >> 14L) & 0x30)] |
@@ -340,7 +340,7 @@
des_skb[6][(d >> 15L) & 0x3f] |
des_skb[7][((d >> 21L) & 0x0f) | ((d >> 22L) & 0x30)];
- /* table contained 0213 4657 */
+ // table contained 0213 4657
t2 = ((t << 16L) | (s & 0x0000ffffL)) & 0xffffffffL;
schedule->subkeys[i][0] = ROTATE(t2, 30) & 0xffffffffL;
@@ -385,18 +385,18 @@
l = data[1];
IP(r, l);
- /* Things have been modified so that the initial rotate is done outside
- * the loop. This required the DES_SPtrans values in sp.h to be
- * rotated 1 bit to the right. One perl script later and things have a
- * 5% speed up on a sparc2. Thanks to Richard Outerbridge
- * <71755.204@CompuServe.COM> for pointing this out. */
- /* clear the top bits on machines with 8byte longs */
- /* shift left by 2 */
+ // Things have been modified so that the initial rotate is done outside
+ // the loop. This required the DES_SPtrans values in sp.h to be
+ // rotated 1 bit to the right. One perl script later and things have a
+ // 5% speed up on a sparc2. Thanks to Richard Outerbridge
+ // <71755.204@CompuServe.COM> for pointing this out.
+ // clear the top bits on machines with 8byte longs
+ // shift left by 2
r = ROTATE(r, 29) & 0xffffffffL;
l = ROTATE(l, 29) & 0xffffffffL;
- /* I don't know if it is worth the effort of loop unrolling the
- * inner loop */
+ // I don't know if it is worth the effort of loop unrolling the
+ // inner loop
if (enc) {
D_ENCRYPT(ks, l, r, 0);
D_ENCRYPT(ks, r, l, 1);
@@ -433,7 +433,7 @@
D_ENCRYPT(ks, r, l, 0);
}
- /* rotate and clear the top bits on machines with 8byte longs */
+ // rotate and clear the top bits on machines with 8byte longs
l = ROTATE(l, 3) & 0xffffffffL;
r = ROTATE(r, 3) & 0xffffffffL;
@@ -448,17 +448,17 @@
r = data[0];
l = data[1];
- /* Things have been modified so that the initial rotate is done outside the
- * loop. This required the DES_SPtrans values in sp.h to be rotated 1 bit to
- * the right. One perl script later and things have a 5% speed up on a
- * sparc2. Thanks to Richard Outerbridge <71755.204@CompuServe.COM> for
- * pointing this out. */
- /* clear the top bits on machines with 8byte longs */
+ // Things have been modified so that the initial rotate is done outside the
+ // loop. This required the DES_SPtrans values in sp.h to be rotated 1 bit to
+ // the right. One perl script later and things have a 5% speed up on a
+ // sparc2. Thanks to Richard Outerbridge <71755.204@CompuServe.COM> for
+ // pointing this out.
+ // clear the top bits on machines with 8byte longs
r = ROTATE(r, 29) & 0xffffffffL;
l = ROTATE(l, 29) & 0xffffffffL;
- /* I don't know if it is worth the effort of loop unrolling the
- * inner loop */
+ // I don't know if it is worth the effort of loop unrolling the
+ // inner loop
if (enc) {
D_ENCRYPT(ks, l, r, 0);
D_ENCRYPT(ks, r, l, 1);
@@ -494,7 +494,7 @@
D_ENCRYPT(ks, l, r, 1);
D_ENCRYPT(ks, r, l, 0);
}
- /* rotate and clear the top bits on machines with 8byte longs */
+ // rotate and clear the top bits on machines with 8byte longs
data[0] = ROTATE(l, 3) & 0xffffffffL;
data[1] = ROTATE(r, 3) & 0xffffffffL;
}
@@ -764,7 +764,7 @@
}
-/* Deprecated functions. */
+// Deprecated functions.
void DES_set_key_unchecked(const DES_cblock *key, DES_key_schedule *schedule) {
DES_set_key(key, schedule);
diff --git a/crypto/fipsmodule/des/internal.h b/crypto/fipsmodule/des/internal.h
index 21eb933..7bfc45b 100644
--- a/crypto/fipsmodule/des/internal.h
+++ b/crypto/fipsmodule/des/internal.h
@@ -80,7 +80,7 @@
*((c)++) = (unsigned char)(((l) >> 24L) & 0xff); \
} while (0)
-/* NOTE - c is not incremented as per c2l */
+// NOTE - c is not incremented as per c2l
#define c2ln(c, l1, l2, n) \
do { \
(c) += (n); \
@@ -105,7 +105,7 @@
} \
} while (0)
-/* NOTE - c is not incremented as per l2c */
+// NOTE - c is not incremented as per l2c
#define l2cn(l1, l2, c, n) \
do { \
(c) += (n); \
@@ -218,7 +218,7 @@
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_DES_INTERNAL_H */
+#endif // OPENSSL_HEADER_DES_INTERNAL_H
diff --git a/crypto/fipsmodule/digest/digest.c b/crypto/fipsmodule/digest/digest.c
index 00e6d4b..f8a0dd2 100644
--- a/crypto/fipsmodule/digest/digest.c
+++ b/crypto/fipsmodule/digest/digest.c
@@ -123,9 +123,9 @@
}
if (out->digest == in->digest) {
- /* |md_data| will be the correct size in this case so it's removed from
- * |out| at this point so that |EVP_MD_CTX_cleanup| doesn't free it and
- * then it's reused. */
+ // |md_data| will be the correct size in this case so it's removed from
+ // |out| at this point so that |EVP_MD_CTX_cleanup| doesn't free it and
+ // then it's reused.
tmp_buf = out->md_data;
out->md_data = NULL;
}
diff --git a/crypto/fipsmodule/digest/internal.h b/crypto/fipsmodule/digest/internal.h
index e3d812a..2d06ed0 100644
--- a/crypto/fipsmodule/digest/internal.h
+++ b/crypto/fipsmodule/digest/internal.h
@@ -65,48 +65,48 @@
struct env_md_st {
- /* type contains a NID identifing the digest function. (For example,
- * NID_md5.) */
+ // type contains a NID identifing the digest function. (For example,
+ // NID_md5.)
int type;
- /* md_size contains the size, in bytes, of the resulting digest. */
+ // md_size contains the size, in bytes, of the resulting digest.
unsigned md_size;
- /* flags contains the OR of |EVP_MD_FLAG_*| values. */
+ // flags contains the OR of |EVP_MD_FLAG_*| values.
uint32_t flags;
- /* init initialises the state in |ctx->md_data|. */
+ // init initialises the state in |ctx->md_data|.
void (*init)(EVP_MD_CTX *ctx);
- /* update hashes |len| bytes of |data| into the state in |ctx->md_data|. */
+ // update hashes |len| bytes of |data| into the state in |ctx->md_data|.
void (*update)(EVP_MD_CTX *ctx, const void *data, size_t count);
- /* final completes the hash and writes |md_size| bytes of digest to |out|. */
+ // final completes the hash and writes |md_size| bytes of digest to |out|.
void (*final)(EVP_MD_CTX *ctx, uint8_t *out);
- /* block_size contains the hash's native block size. */
+ // block_size contains the hash's native block size.
unsigned block_size;
- /* ctx_size contains the size, in bytes, of the state of the hash function. */
+ // ctx_size contains the size, in bytes, of the state of the hash function.
unsigned ctx_size;
};
-/* evp_md_pctx_ops contains function pointers to allow the |pctx| member of
- * |EVP_MD_CTX| to be manipulated without breaking layering by calling EVP
- * functions. */
+// evp_md_pctx_ops contains function pointers to allow the |pctx| member of
+// |EVP_MD_CTX| to be manipulated without breaking layering by calling EVP
+// functions.
struct evp_md_pctx_ops {
- /* free is called when an |EVP_MD_CTX| is being freed and the |pctx| also
- * needs to be freed. */
+ // free is called when an |EVP_MD_CTX| is being freed and the |pctx| also
+ // needs to be freed.
void (*free) (EVP_PKEY_CTX *pctx);
- /* dup is called when an |EVP_MD_CTX| is copied and so the |pctx| also needs
- * to be copied. */
+ // dup is called when an |EVP_MD_CTX| is copied and so the |pctx| also needs
+ // to be copied.
EVP_PKEY_CTX* (*dup) (EVP_PKEY_CTX *pctx);
};
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_DIGEST_INTERNAL */
+#endif // OPENSSL_HEADER_DIGEST_INTERNAL
diff --git a/crypto/fipsmodule/digest/md32_common.h b/crypto/fipsmodule/digest/md32_common.h
index 7371629..a0c3665 100644
--- a/crypto/fipsmodule/digest/md32_common.h
+++ b/crypto/fipsmodule/digest/md32_common.h
@@ -57,56 +57,55 @@
#endif
-/* This is a generic 32-bit "collector" for message digest algorithms. It
- * collects input character stream into chunks of 32-bit values and invokes the
- * block function that performs the actual hash calculations. To make use of
- * this mechanism, the following macros must be defined before including
- * md32_common.h.
- *
- * One of |DATA_ORDER_IS_BIG_ENDIAN| or |DATA_ORDER_IS_LITTLE_ENDIAN| must be
- * defined to specify the byte order of the input stream.
- *
- * |HASH_CBLOCK| must be defined as the integer block size, in bytes.
- *
- * |HASH_CTX| must be defined as the name of the context structure, which must
- * have at least the following members:
- *
- * typedef struct <name>_state_st {
- * uint32_t h[<chaining length> / sizeof(uint32_t)];
- * uint32_t Nl, Nh;
- * uint8_t data[HASH_CBLOCK];
- * unsigned num;
- * ...
- * } <NAME>_CTX;
- *
- * <chaining length> is the output length of the hash in bytes, before
- * any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and
- * SHA-512).
- *
- * |HASH_UPDATE| must be defined as the name of the "Update" function to
- * generate.
- *
- * |HASH_TRANSFORM| must be defined as the the name of the "Transform"
- * function to generate.
- *
- * |HASH_FINAL| must be defined as the name of "Final" function to generate.
- *
- * |HASH_BLOCK_DATA_ORDER| must be defined as the name of the "Block" function.
- * That function must be implemented manually. It must be capable of operating
- * on *unaligned* input data in its original (data) byte order. It must have
- * this signature:
- *
- * void HASH_BLOCK_DATA_ORDER(uint32_t *state, const uint8_t *data,
- * size_t num);
- *
- * It must update the hash state |state| with |num| blocks of data from |data|,
- * where each block is |HASH_CBLOCK| bytes; i.e. |data| points to a array of
- * |HASH_CBLOCK * num| bytes. |state| points to the |h| member of a |HASH_CTX|,
- * and so will have |<chaining length> / sizeof(uint32_t)| elements.
- *
- * |HASH_MAKE_STRING(c, s)| must be defined as a block statement that converts
- * the hash state |c->h| into the output byte order, storing the result in |s|.
- */
+// This is a generic 32-bit "collector" for message digest algorithms. It
+// collects input character stream into chunks of 32-bit values and invokes the
+// block function that performs the actual hash calculations. To make use of
+// this mechanism, the following macros must be defined before including
+// md32_common.h.
+//
+// One of |DATA_ORDER_IS_BIG_ENDIAN| or |DATA_ORDER_IS_LITTLE_ENDIAN| must be
+// defined to specify the byte order of the input stream.
+//
+// |HASH_CBLOCK| must be defined as the integer block size, in bytes.
+//
+// |HASH_CTX| must be defined as the name of the context structure, which must
+// have at least the following members:
+//
+// typedef struct <name>_state_st {
+// uint32_t h[<chaining length> / sizeof(uint32_t)];
+// uint32_t Nl, Nh;
+// uint8_t data[HASH_CBLOCK];
+// unsigned num;
+// ...
+// } <NAME>_CTX;
+//
+// <chaining length> is the output length of the hash in bytes, before
+// any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and
+// SHA-512).
+//
+// |HASH_UPDATE| must be defined as the name of the "Update" function to
+// generate.
+//
+// |HASH_TRANSFORM| must be defined as the the name of the "Transform"
+// function to generate.
+//
+// |HASH_FINAL| must be defined as the name of "Final" function to generate.
+//
+// |HASH_BLOCK_DATA_ORDER| must be defined as the name of the "Block" function.
+// That function must be implemented manually. It must be capable of operating
+// on *unaligned* input data in its original (data) byte order. It must have
+// this signature:
+//
+// void HASH_BLOCK_DATA_ORDER(uint32_t *state, const uint8_t *data,
+// size_t num);
+//
+// It must update the hash state |state| with |num| blocks of data from |data|,
+// where each block is |HASH_CBLOCK| bytes; i.e. |data| points to a array of
+// |HASH_CBLOCK * num| bytes. |state| points to the |h| member of a |HASH_CTX|,
+// and so will have |<chaining length> / sizeof(uint32_t)| elements.
+//
+// |HASH_MAKE_STRING(c, s)| must be defined as a block statement that converts
+// the hash state |c->h| into the output byte order, storing the result in |s|.
#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
#error "DATA_ORDER must be defined!"
@@ -173,7 +172,7 @@
*((c)++) = (uint8_t)(((l) >> 24) & 0xff); \
} while (0)
-#endif /* DATA_ORDER */
+#endif // DATA_ORDER
int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) {
const uint8_t *data = data_;
@@ -184,7 +183,7 @@
uint32_t l = c->Nl + (((uint32_t)len) << 3);
if (l < c->Nl) {
- /* Handle carries. */
+ // Handle carries.
c->Nh++;
}
c->Nh += (uint32_t)(len >> 29);
@@ -199,7 +198,7 @@
data += n;
len -= n;
c->num = 0;
- /* Keep |c->data| zeroed when unused. */
+ // Keep |c->data| zeroed when unused.
OPENSSL_memset(c->data, 0, HASH_CBLOCK);
} else {
OPENSSL_memcpy(c->data + n, data, len);
@@ -230,14 +229,14 @@
int HASH_FINAL(uint8_t *md, HASH_CTX *c) {
- /* |c->data| always has room for at least one byte. A full block would have
- * been consumed. */
+ // |c->data| always has room for at least one byte. A full block would have
+ // been consumed.
size_t n = c->num;
assert(n < HASH_CBLOCK);
c->data[n] = 0x80;
n++;
- /* Fill the block with zeros if there isn't room for a 64-bit length. */
+ // Fill the block with zeros if there isn't room for a 64-bit length.
if (n > (HASH_CBLOCK - 8)) {
OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - n);
n = 0;
@@ -245,7 +244,7 @@
}
OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - 8 - n);
- /* Append a 64-bit length to the block and process it. */
+ // Append a 64-bit length to the block and process it.
uint8_t *p = c->data + HASH_CBLOCK - 8;
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
HOST_l2c(c->Nh, p);
@@ -265,5 +264,5 @@
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
diff --git a/crypto/fipsmodule/ec/ec.c b/crypto/fipsmodule/ec/ec.c
index 55f388d..d82e58f 100644
--- a/crypto/fipsmodule/ec/ec.c
+++ b/crypto/fipsmodule/ec/ec.c
@@ -81,86 +81,86 @@
static const uint8_t kP224Params[6 * 28] = {
- /* p */
+ // p
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
- /* a */
+ // a
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFE,
- /* b */
+ // b
0xB4, 0x05, 0x0A, 0x85, 0x0C, 0x04, 0xB3, 0xAB, 0xF5, 0x41, 0x32, 0x56,
0x50, 0x44, 0xB0, 0xB7, 0xD7, 0xBF, 0xD8, 0xBA, 0x27, 0x0B, 0x39, 0x43,
0x23, 0x55, 0xFF, 0xB4,
- /* x */
+ // x
0xB7, 0x0E, 0x0C, 0xBD, 0x6B, 0xB4, 0xBF, 0x7F, 0x32, 0x13, 0x90, 0xB9,
0x4A, 0x03, 0xC1, 0xD3, 0x56, 0xC2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xD6,
0x11, 0x5C, 0x1D, 0x21,
- /* y */
+ // y
0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, 0xdf, 0xe6,
0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64, 0x44, 0xd5, 0x81, 0x99,
0x85, 0x00, 0x7e, 0x34,
- /* order */
+ // order
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0x16, 0xA2, 0xE0, 0xB8, 0xF0, 0x3E, 0x13, 0xDD, 0x29, 0x45,
0x5C, 0x5C, 0x2A, 0x3D,
};
static const uint8_t kP256Params[6 * 32] = {
- /* p */
+ // p
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- /* a */
+ // a
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC,
- /* b */
+ // b
0x5A, 0xC6, 0x35, 0xD8, 0xAA, 0x3A, 0x93, 0xE7, 0xB3, 0xEB, 0xBD, 0x55,
0x76, 0x98, 0x86, 0xBC, 0x65, 0x1D, 0x06, 0xB0, 0xCC, 0x53, 0xB0, 0xF6,
0x3B, 0xCE, 0x3C, 0x3E, 0x27, 0xD2, 0x60, 0x4B,
- /* x */
+ // x
0x6B, 0x17, 0xD1, 0xF2, 0xE1, 0x2C, 0x42, 0x47, 0xF8, 0xBC, 0xE6, 0xE5,
0x63, 0xA4, 0x40, 0xF2, 0x77, 0x03, 0x7D, 0x81, 0x2D, 0xEB, 0x33, 0xA0,
0xF4, 0xA1, 0x39, 0x45, 0xD8, 0x98, 0xC2, 0x96,
- /* y */
+ // y
0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, 0x8e, 0xe7, 0xeb, 0x4a,
0x7c, 0x0f, 0x9e, 0x16, 0x2b, 0xce, 0x33, 0x57, 0x6b, 0x31, 0x5e, 0xce,
0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5,
- /* order */
+ // order
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84,
0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x51,
};
static const uint8_t kP384Params[6 * 48] = {
- /* p */
+ // p
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
- /* a */
+ // a
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC,
- /* b */
+ // b
0xB3, 0x31, 0x2F, 0xA7, 0xE2, 0x3E, 0xE7, 0xE4, 0x98, 0x8E, 0x05, 0x6B,
0xE3, 0xF8, 0x2D, 0x19, 0x18, 0x1D, 0x9C, 0x6E, 0xFE, 0x81, 0x41, 0x12,
0x03, 0x14, 0x08, 0x8F, 0x50, 0x13, 0x87, 0x5A, 0xC6, 0x56, 0x39, 0x8D,
0x8A, 0x2E, 0xD1, 0x9D, 0x2A, 0x85, 0xC8, 0xED, 0xD3, 0xEC, 0x2A, 0xEF,
- /* x */
+ // x
0xAA, 0x87, 0xCA, 0x22, 0xBE, 0x8B, 0x05, 0x37, 0x8E, 0xB1, 0xC7, 0x1E,
0xF3, 0x20, 0xAD, 0x74, 0x6E, 0x1D, 0x3B, 0x62, 0x8B, 0xA7, 0x9B, 0x98,
0x59, 0xF7, 0x41, 0xE0, 0x82, 0x54, 0x2A, 0x38, 0x55, 0x02, 0xF2, 0x5D,
0xBF, 0x55, 0x29, 0x6C, 0x3A, 0x54, 0x5E, 0x38, 0x72, 0x76, 0x0A, 0xB7,
- /* y */
+ // y
0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x5d, 0x9e, 0x98, 0xbf,
0x92, 0x92, 0xdc, 0x29, 0xf8, 0xf4, 0x1d, 0xbd, 0x28, 0x9a, 0x14, 0x7c,
0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0x0a, 0x60, 0xb1, 0xce,
0x1d, 0x7e, 0x81, 0x9d, 0x7a, 0x43, 0x1d, 0x7c, 0x90, 0xea, 0x0e, 0x5f,
- /* order */
+ // order
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC7, 0x63, 0x4D, 0x81, 0xF4, 0x37, 0x2D, 0xDF, 0x58, 0x1A, 0x0D, 0xB2,
@@ -168,42 +168,42 @@
};
static const uint8_t kP521Params[6 * 66] = {
- /* p */
+ // p
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- /* a */
+ // a
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC,
- /* b */
+ // b
0x00, 0x51, 0x95, 0x3E, 0xB9, 0x61, 0x8E, 0x1C, 0x9A, 0x1F, 0x92, 0x9A,
0x21, 0xA0, 0xB6, 0x85, 0x40, 0xEE, 0xA2, 0xDA, 0x72, 0x5B, 0x99, 0xB3,
0x15, 0xF3, 0xB8, 0xB4, 0x89, 0x91, 0x8E, 0xF1, 0x09, 0xE1, 0x56, 0x19,
0x39, 0x51, 0xEC, 0x7E, 0x93, 0x7B, 0x16, 0x52, 0xC0, 0xBD, 0x3B, 0xB1,
0xBF, 0x07, 0x35, 0x73, 0xDF, 0x88, 0x3D, 0x2C, 0x34, 0xF1, 0xEF, 0x45,
0x1F, 0xD4, 0x6B, 0x50, 0x3F, 0x00,
- /* x */
+ // x
0x00, 0xC6, 0x85, 0x8E, 0x06, 0xB7, 0x04, 0x04, 0xE9, 0xCD, 0x9E, 0x3E,
0xCB, 0x66, 0x23, 0x95, 0xB4, 0x42, 0x9C, 0x64, 0x81, 0x39, 0x05, 0x3F,
0xB5, 0x21, 0xF8, 0x28, 0xAF, 0x60, 0x6B, 0x4D, 0x3D, 0xBA, 0xA1, 0x4B,
0x5E, 0x77, 0xEF, 0xE7, 0x59, 0x28, 0xFE, 0x1D, 0xC1, 0x27, 0xA2, 0xFF,
0xA8, 0xDE, 0x33, 0x48, 0xB3, 0xC1, 0x85, 0x6A, 0x42, 0x9B, 0xF9, 0x7E,
0x7E, 0x31, 0xC2, 0xE5, 0xBD, 0x66,
- /* y */
+ // y
0x01, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, 0xc0, 0x04, 0x5c, 0x8a,
0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b,
0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e, 0x66, 0x2c, 0x97, 0xee,
0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad,
0x07, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72, 0xc2, 0x40, 0x88, 0xbe,
0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50,
- /* order */
+ // order
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x51, 0x86,
@@ -212,15 +212,15 @@
0xB7, 0x1E, 0x91, 0x38, 0x64, 0x09,
};
-/* MSan appears to have a bug that causes code to be miscompiled in opt mode.
- * While that is being looked at, don't run the uint128_t code under MSan. */
+// MSan appears to have a bug that causes code to be miscompiled in opt mode.
+// While that is being looked at, don't run the uint128_t code under MSan.
#if defined(OPENSSL_64_BIT) && !defined(OPENSSL_WINDOWS) && \
!defined(MEMORY_SANITIZER)
#define BORINGSSL_USE_INT128_CODE
#endif
DEFINE_METHOD_FUNCTION(struct built_in_curves, OPENSSL_built_in_curves) {
- /* 1.3.132.0.35 */
+ // 1.3.132.0.35
static const uint8_t kOIDP521[] = {0x2b, 0x81, 0x04, 0x00, 0x23};
out->curves[0].nid = NID_secp521r1;
out->curves[0].oid = kOIDP521;
@@ -230,7 +230,7 @@
out->curves[0].params = kP521Params;
out->curves[0].method = EC_GFp_mont_method();
- /* 1.3.132.0.34 */
+ // 1.3.132.0.34
static const uint8_t kOIDP384[] = {0x2b, 0x81, 0x04, 0x00, 0x22};
out->curves[1].nid = NID_secp384r1;
out->curves[1].oid = kOIDP384;
@@ -240,7 +240,7 @@
out->curves[1].params = kP384Params;
out->curves[1].method = EC_GFp_mont_method();
- /* 1.2.840.10045.3.1.7 */
+ // 1.2.840.10045.3.1.7
static const uint8_t kOIDP256[] = {0x2a, 0x86, 0x48, 0xce,
0x3d, 0x03, 0x01, 0x07};
out->curves[2].nid = NID_X9_62_prime256v1;
@@ -261,7 +261,7 @@
EC_GFp_mont_method();
#endif
- /* 1.3.132.0.33 */
+ // 1.3.132.0.33
static const uint8_t kOIDP224[] = {0x2b, 0x81, 0x04, 0x00, 0x21};
out->curves[3].nid = NID_secp224r1;
out->curves[3].oid = kOIDP224;
@@ -277,9 +277,9 @@
#endif
}
-/* built_in_curve_scalar_field_monts contains Montgomery contexts for
- * performing inversions in the scalar fields of each of the built-in
- * curves. It's protected by |built_in_curve_scalar_field_monts_once|. */
+// built_in_curve_scalar_field_monts contains Montgomery contexts for
+// performing inversions in the scalar fields of each of the built-in
+// curves. It's protected by |built_in_curve_scalar_field_monts_once|.
DEFINE_LOCAL_DATA(BN_MONT_CTX **, built_in_curve_scalar_field_monts) {
const struct built_in_curves *const curves = OPENSSL_built_in_curves();
@@ -386,12 +386,12 @@
int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator,
const BIGNUM *order, const BIGNUM *cofactor) {
if (group->curve_name != NID_undef || group->generator != NULL) {
- /* |EC_GROUP_set_generator| may only be used with |EC_GROUP|s returned by
- * |EC_GROUP_new_curve_GFp| and may only used once on each group. */
+ // |EC_GROUP_set_generator| may only be used with |EC_GROUP|s returned by
+ // |EC_GROUP_new_curve_GFp| and may only used once on each group.
return 0;
}
- /* Require a cofactor of one for custom curves, which implies prime order. */
+ // Require a cofactor of one for custom curves, which implies prime order.
if (!BN_is_one(cofactor)) {
OPENSSL_PUT_ERROR(EC, EC_R_INVALID_COFACTOR);
return 0;
@@ -579,7 +579,7 @@
int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor,
BN_CTX *ctx) {
- /* All |EC_GROUP|s have cofactor 1. */
+ // All |EC_GROUP|s have cofactor 1.
return BN_set_word(cofactor, 1);
}
@@ -782,9 +782,9 @@
int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar,
const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx) {
- /* Previously, this function set |r| to the point at infinity if there was
- * nothing to multiply. But, nobody should be calling this function with
- * nothing to multiply in the first place. */
+ // Previously, this function set |r| to the point at infinity if there was
+ // nothing to multiply. But, nobody should be calling this function with
+ // nothing to multiply in the first place.
if ((g_scalar == NULL && p_scalar == NULL) ||
((p == NULL) != (p_scalar == NULL))) {
OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER);
diff --git a/crypto/fipsmodule/ec/ec_key.c b/crypto/fipsmodule/ec/ec_key.c
index acabb06..e5e8b1a 100644
--- a/crypto/fipsmodule/ec/ec_key.c
+++ b/crypto/fipsmodule/ec/ec_key.c
@@ -165,9 +165,9 @@
OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER);
return NULL;
}
- /* Copy the parameters. */
+ // Copy the parameters.
if (src->group) {
- /* TODO(fork): duplicating the group seems wasteful. */
+ // TODO(fork): duplicating the group seems wasteful.
EC_GROUP_free(dest->group);
dest->group = EC_GROUP_dup(src->group);
if (dest->group == NULL) {
@@ -175,7 +175,7 @@
}
}
- /* Copy the public key. */
+ // Copy the public key.
if (src->pub_key && src->group) {
EC_POINT_free(dest->pub_key);
dest->pub_key = EC_POINT_dup(src->pub_key, src->group);
@@ -184,7 +184,7 @@
}
}
- /* copy the private key */
+ // copy the private key
if (src->priv_key) {
if (dest->priv_key == NULL) {
dest->priv_key = BN_new();
@@ -196,14 +196,14 @@
return NULL;
}
}
- /* copy method/extra data */
+ // copy method/extra data
if (src->ecdsa_meth) {
METHOD_unref(dest->ecdsa_meth);
dest->ecdsa_meth = src->ecdsa_meth;
METHOD_ref(dest->ecdsa_meth);
}
- /* copy the rest */
+ // copy the rest
dest->enc_flag = src->enc_flag;
dest->conv_form = src->conv_form;
@@ -235,13 +235,13 @@
int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group) {
EC_GROUP_free(key->group);
- /* TODO(fork): duplicating the group seems wasteful but see
- * |EC_KEY_set_conv_form|. */
+ // TODO(fork): duplicating the group seems wasteful but see
+ // |EC_KEY_set_conv_form|.
key->group = EC_GROUP_dup(group);
if (key->group == NULL) {
return 0;
}
- /* XXX: |BN_cmp| is not constant time. */
+ // XXX: |BN_cmp| is not constant time.
if (key->priv_key != NULL &&
BN_cmp(key->priv_key, EC_GROUP_get0_order(group)) >= 0) {
return 0;
@@ -254,7 +254,7 @@
}
int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv_key) {
- /* XXX: |BN_cmp| is not constant time. */
+ // XXX: |BN_cmp| is not constant time.
if (key->group != NULL &&
BN_cmp(priv_key, EC_GROUP_get0_order(key->group)) >= 0) {
OPENSSL_PUT_ERROR(EC, EC_R_WRONG_ORDER);
@@ -310,16 +310,15 @@
goto err;
}
- /* testing whether the pub_key is on the elliptic curve */
+ // testing whether the pub_key is on the elliptic curve
if (!EC_POINT_is_on_curve(eckey->group, eckey->pub_key, ctx)) {
OPENSSL_PUT_ERROR(EC, EC_R_POINT_IS_NOT_ON_CURVE);
goto err;
}
- /* in case the priv_key is present :
- * check if generator * priv_key == pub_key
- */
+ // in case the priv_key is present :
+ // check if generator * priv_key == pub_key
if (eckey->priv_key) {
- /* XXX: |BN_cmp| is not constant time. */
+ // XXX: |BN_cmp| is not constant time.
if (BN_cmp(eckey->priv_key, EC_GROUP_get0_order(eckey->group)) >= 0) {
OPENSSL_PUT_ERROR(EC, EC_R_WRONG_ORDER);
goto err;
@@ -345,7 +344,7 @@
int EC_KEY_check_fips(const EC_KEY *key) {
if (EC_KEY_is_opaque(key)) {
- /* Opaque keys can't be checked. */
+ // Opaque keys can't be checked.
OPENSSL_PUT_ERROR(EC, EC_R_PUBLIC_KEY_VALIDATION_FAILED);
return 0;
}
@@ -408,8 +407,8 @@
goto err;
}
- /* Check if retrieved coordinates match originals: if not values
- * are out of range. */
+ // Check if retrieved coordinates match originals: if not values
+ // are out of range.
if (BN_cmp(x, tx) || BN_cmp(y, ty)) {
OPENSSL_PUT_ERROR(EC, EC_R_COORDINATES_OUT_OF_RANGE);
goto err;
@@ -453,14 +452,14 @@
const BIGNUM *order = EC_GROUP_get0_order(eckey->group);
- /* Check that the size of the group order is FIPS compliant (FIPS 186-4
- * B.4.2). */
+ // Check that the size of the group order is FIPS compliant (FIPS 186-4
+ // B.4.2).
if (BN_num_bits(order) < 160) {
OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER);
goto err;
}
- /* Generate the private key by testing candidates (FIPS 186-4 B.4.2). */
+ // Generate the private key by testing candidates (FIPS 186-4 B.4.2).
if (!BN_rand_range_ex(priv_key, 1, order)) {
goto err;
}
diff --git a/crypto/fipsmodule/ec/ec_montgomery.c b/crypto/fipsmodule/ec/ec_montgomery.c
index c2afe25..c5f240b 100644
--- a/crypto/fipsmodule/ec/ec_montgomery.c
+++ b/crypto/fipsmodule/ec/ec_montgomery.c
@@ -219,7 +219,7 @@
BN_CTX_start(ctx);
if (BN_cmp(&point->Z, &group->one) == 0) {
- /* |point| is already affine. */
+ // |point| is already affine.
if (x != NULL && !BN_from_montgomery(x, &point->X, group->mont, ctx)) {
goto err;
}
@@ -227,7 +227,7 @@
goto err;
}
} else {
- /* transform (X, Y, Z) into (x, y) := (X/Z^2, Y/Z^3) */
+ // transform (X, Y, Z) into (x, y) := (X/Z^2, Y/Z^3)
BIGNUM *Z_1 = BN_CTX_get(ctx);
BIGNUM *Z_2 = BN_CTX_get(ctx);
@@ -238,18 +238,18 @@
goto err;
}
- /* The straightforward way to calculate the inverse of a Montgomery-encoded
- * value where the result is Montgomery-encoded is:
- *
- * |BN_from_montgomery| + invert + |BN_to_montgomery|.
- *
- * This is equivalent, but more efficient, because |BN_from_montgomery|
- * is more efficient (at least in theory) than |BN_to_montgomery|, since it
- * doesn't have to do the multiplication before the reduction.
- *
- * Use Fermat's Little Theorem instead of |BN_mod_inverse_odd| since this
- * inversion may be done as the final step of private key operations.
- * Unfortunately, this is suboptimal for ECDSA verification. */
+ // The straightforward way to calculate the inverse of a Montgomery-encoded
+ // value where the result is Montgomery-encoded is:
+ //
+ // |BN_from_montgomery| + invert + |BN_to_montgomery|.
+ //
+ // This is equivalent, but more efficient, because |BN_from_montgomery|
+ // is more efficient (at least in theory) than |BN_to_montgomery|, since it
+ // doesn't have to do the multiplication before the reduction.
+ //
+ // Use Fermat's Little Theorem instead of |BN_mod_inverse_odd| since this
+ // inversion may be done as the final step of private key operations.
+ // Unfortunately, this is suboptimal for ECDSA verification.
if (!BN_from_montgomery(Z_1, &point->Z, group->mont, ctx) ||
!BN_from_montgomery(Z_1, Z_1, group->mont, ctx) ||
!bn_mod_inverse_prime(Z_1, Z_1, &group->field, ctx, group->mont)) {
@@ -260,10 +260,10 @@
goto err;
}
- /* Instead of using |BN_from_montgomery| to convert the |x| coordinate
- * and then calling |BN_from_montgomery| again to convert the |y|
- * coordinate below, convert the common factor |Z_2| once now, saving one
- * reduction. */
+ // Instead of using |BN_from_montgomery| to convert the |x| coordinate
+ // and then calling |BN_from_montgomery| again to convert the |y|
+ // coordinate below, convert the common factor |Z_2| once now, saving one
+ // reduction.
if (!BN_from_montgomery(Z_2, Z_2, group->mont, ctx)) {
goto err;
}
diff --git a/crypto/fipsmodule/ec/internal.h b/crypto/fipsmodule/ec/internal.h
index 424fe53..39c9349 100644
--- a/crypto/fipsmodule/ec/internal.h
+++ b/crypto/fipsmodule/ec/internal.h
@@ -88,25 +88,25 @@
int (*point_get_affine_coordinates)(const EC_GROUP *, const EC_POINT *,
BIGNUM *x, BIGNUM *y, BN_CTX *);
- /* Computes |r = g_scalar*generator + p_scalar*p| if |g_scalar| and |p_scalar|
- * are both non-null. Computes |r = g_scalar*generator| if |p_scalar| is null.
- * Computes |r = p_scalar*p| if g_scalar is null. At least one of |g_scalar|
- * and |p_scalar| must be non-null, and |p| must be non-null if |p_scalar| is
- * non-null. */
+ // Computes |r = g_scalar*generator + p_scalar*p| if |g_scalar| and |p_scalar|
+ // are both non-null. Computes |r = g_scalar*generator| if |p_scalar| is null.
+ // Computes |r = p_scalar*p| if g_scalar is null. At least one of |g_scalar|
+ // and |p_scalar| must be non-null, and |p| must be non-null if |p_scalar| is
+ // non-null.
int (*mul)(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar,
const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx);
- /* 'field_mul' and 'field_sqr' can be used by 'add' and 'dbl' so that the
- * same implementations of point operations can be used with different
- * optimized implementations of expensive field operations: */
+ // 'field_mul' and 'field_sqr' can be used by 'add' and 'dbl' so that the
+ // same implementations of point operations can be used with different
+ // optimized implementations of expensive field operations:
int (*field_mul)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a,
const BIGNUM *b, BN_CTX *);
int (*field_sqr)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, BN_CTX *);
int (*field_encode)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a,
- BN_CTX *); /* e.g. to Montgomery */
+ BN_CTX *); // e.g. to Montgomery
int (*field_decode)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a,
- BN_CTX *); /* e.g. from Montgomery */
+ BN_CTX *); // e.g. from Montgomery
} /* EC_METHOD */;
const EC_METHOD *EC_GFp_mont_method(void);
@@ -117,22 +117,22 @@
EC_POINT *generator;
BIGNUM order;
- int curve_name; /* optional NID for named curve */
+ int curve_name; // optional NID for named curve
- const BN_MONT_CTX *order_mont; /* data for ECDSA inverse */
+ const BN_MONT_CTX *order_mont; // data for ECDSA inverse
- /* The following members are handled by the method functions,
- * even if they appear generic */
+ // The following members are handled by the method functions,
+ // even if they appear generic
- BIGNUM field; /* For curves over GF(p), this is the modulus. */
+ BIGNUM field; // For curves over GF(p), this is the modulus.
- BIGNUM a, b; /* Curve coefficients. */
+ BIGNUM a, b; // Curve coefficients.
- int a_is_minus3; /* enable optimized point arithmetics for special case */
+ int a_is_minus3; // enable optimized point arithmetics for special case
- BN_MONT_CTX *mont; /* Montgomery structure. */
+ BN_MONT_CTX *mont; // Montgomery structure.
- BIGNUM one; /* The value one. */
+ BIGNUM one; // The value one.
} /* EC_GROUP */;
struct ec_point_st {
@@ -140,22 +140,22 @@
BIGNUM X;
BIGNUM Y;
- BIGNUM Z; /* Jacobian projective coordinates:
- * (X, Y, Z) represents (X/Z^2, Y/Z^3) if Z != 0 */
+ BIGNUM Z; // Jacobian projective coordinates:
+ // (X, Y, Z) represents (X/Z^2, Y/Z^3) if Z != 0
} /* EC_POINT */;
EC_GROUP *ec_group_new(const EC_METHOD *meth);
int ec_group_copy(EC_GROUP *dest, const EC_GROUP *src);
-/* ec_group_get_order_mont returns a Montgomery context for operations modulo
- * |group|'s order. It may return NULL in the case that |group| is not a
- * built-in group. */
+// ec_group_get_order_mont returns a Montgomery context for operations modulo
+// |group|'s order. It may return NULL in the case that |group| is not a
+// built-in group.
const BN_MONT_CTX *ec_group_get_order_mont(const EC_GROUP *group);
int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar,
const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx);
-/* method functions in simple.c */
+// method functions in simple.c
int ec_GFp_simple_group_init(EC_GROUP *);
void ec_GFp_simple_group_finish(EC_GROUP *);
int ec_GFp_simple_group_copy(EC_GROUP *, const EC_GROUP *);
@@ -200,7 +200,7 @@
int ec_GFp_simple_field_sqr(const EC_GROUP *, BIGNUM *r, const BIGNUM *a,
BN_CTX *);
-/* method functions in montgomery.c */
+// method functions in montgomery.c
int ec_GFp_mont_group_init(EC_GROUP *);
int ec_GFp_mont_group_set_curve(EC_GROUP *, const BIGNUM *p, const BIGNUM *a,
const BIGNUM *b, BN_CTX *);
@@ -225,8 +225,8 @@
const EC_METHOD *EC_GFp_nistp224_method(void);
const EC_METHOD *EC_GFp_nistp256_method(void);
-/* EC_GFp_nistz256_method is a GFp method using montgomery multiplication, with
- * x86-64 optimized P256. See http://eprint.iacr.org/2013/816. */
+// EC_GFp_nistz256_method is a GFp method using montgomery multiplication, with
+// x86-64 optimized P256. See http://eprint.iacr.org/2013/816.
const EC_METHOD *EC_GFp_nistz256_method(void);
struct ec_key_st {
@@ -235,8 +235,8 @@
EC_POINT *pub_key;
BIGNUM *priv_key;
- /* fixed_k may contain a specific value of 'k', to be used in ECDSA signing.
- * This is only for the FIPS power-on tests. */
+ // fixed_k may contain a specific value of 'k', to be used in ECDSA signing.
+ // This is only for the FIPS power-on tests.
BIGNUM *fixed_k;
unsigned int enc_flag;
@@ -253,13 +253,13 @@
int nid;
const uint8_t *oid;
uint8_t oid_len;
- /* comment is a human-readable string describing the curve. */
+ // comment is a human-readable string describing the curve.
const char *comment;
- /* param_len is the number of bytes needed to store a field element. */
+ // param_len is the number of bytes needed to store a field element.
uint8_t param_len;
- /* params points to an array of 6*|param_len| bytes which hold the field
- * elements of the following (in big-endian order): prime, a, b, generator x,
- * generator y, order. */
+ // params points to an array of 6*|param_len| bytes which hold the field
+ // elements of the following (in big-endian order): prime, a, b, generator x,
+ // generator y, order.
const uint8_t *params;
const EC_METHOD *method;
};
@@ -270,13 +270,13 @@
struct built_in_curve curves[OPENSSL_NUM_BUILT_IN_CURVES];
};
-/* OPENSSL_built_in_curves returns a pointer to static information about
- * standard curves. The array is terminated with an entry where |nid| is
- * |NID_undef|. */
+// OPENSSL_built_in_curves returns a pointer to static information about
+// standard curves. The array is terminated with an entry where |nid| is
+// |NID_undef|.
const struct built_in_curves *OPENSSL_built_in_curves(void);
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_EC_INTERNAL_H */
+#endif // OPENSSL_HEADER_EC_INTERNAL_H
diff --git a/crypto/fipsmodule/ec/oct.c b/crypto/fipsmodule/ec/oct.c
index 5071c2e..cf51e4b 100644
--- a/crypto/fipsmodule/ec/oct.c
+++ b/crypto/fipsmodule/ec/oct.c
@@ -94,12 +94,12 @@
goto err;
}
- /* ret := required output buffer length */
+ // ret := required output buffer length
field_len = BN_num_bytes(&group->field);
ret =
(form == POINT_CONVERSION_COMPRESSED) ? 1 + field_len : 1 + 2 * field_len;
- /* if 'buf' is NULL, just return required length */
+ // if 'buf' is NULL, just return required length
if (buf != NULL) {
if (len < ret) {
OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL);
@@ -299,13 +299,13 @@
goto err;
}
- /* Recover y. We have a Weierstrass equation
- * y^2 = x^3 + a*x + b,
- * so y is one of the square roots of x^3 + a*x + b. */
+ // Recover y. We have a Weierstrass equation
+ // y^2 = x^3 + a*x + b,
+ // so y is one of the square roots of x^3 + a*x + b.
- /* tmp1 := x^3 */
+ // tmp1 := x^3
if (group->meth->field_decode == 0) {
- /* field_{sqr,mul} work on standard representation */
+ // field_{sqr,mul} work on standard representation
if (!group->meth->field_sqr(group, tmp2, x, ctx) ||
!group->meth->field_mul(group, tmp1, tmp2, x, ctx)) {
goto err;
@@ -317,7 +317,7 @@
}
}
- /* tmp1 := tmp1 + a*x */
+ // tmp1 := tmp1 + a*x
if (group->a_is_minus3) {
if (!BN_mod_lshift1_quick(tmp2, x, &group->field) ||
!BN_mod_add_quick(tmp2, tmp2, x, &group->field) ||
@@ -331,7 +331,7 @@
goto err;
}
} else {
- /* field_mul works on standard representation */
+ // field_mul works on standard representation
if (!group->meth->field_mul(group, tmp2, &group->a, x, ctx)) {
goto err;
}
@@ -342,7 +342,7 @@
}
}
- /* tmp1 := tmp1 + b */
+ // tmp1 := tmp1 + b
if (group->meth->field_decode) {
if (!group->meth->field_decode(group, tmp2, &group->b, ctx) ||
!BN_mod_add_quick(tmp1, tmp1, tmp2, &group->field)) {
diff --git a/crypto/fipsmodule/ec/p224-64.c b/crypto/fipsmodule/ec/p224-64.c
index 67dfcc8..ec5a93d 100644
--- a/crypto/fipsmodule/ec/p224-64.c
+++ b/crypto/fipsmodule/ec/p224-64.c
@@ -12,10 +12,10 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
-/* A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
- *
- * Inspired by Daniel J. Bernstein's public domain nistp224 implementation
- * and Adam Langley's public domain 64-bit C implementation of curve25519. */
+// A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
+//
+// Inspired by Daniel J. Bernstein's public domain nistp224 implementation
+// and Adam Langley's public domain 64-bit C implementation of curve25519.
#include <openssl/base.h>
@@ -34,18 +34,18 @@
#include "../../internal.h"
-/* Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3
- * using 64-bit coefficients called 'limbs', and sometimes (for multiplication
- * results) as b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 +
- * 2^336*b_6 using 128-bit coefficients called 'widelimbs'. A 4-p224_limb
- * representation is an 'p224_felem'; a 7-p224_widelimb representation is a
- * 'p224_widefelem'. Even within felems, bits of adjacent limbs overlap, and we
- * don't always reduce the representations: we ensure that inputs to each
- * p224_felem multiplication satisfy a_i < 2^60, so outputs satisfy b_i <
- * 4*2^60*2^60, and fit into a 128-bit word without overflow. The coefficients
- * are then again partially reduced to obtain an p224_felem satisfying a_i <
- * 2^57. We only reduce to the unique minimal representation at the end of the
- * computation. */
+// Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3
+// using 64-bit coefficients called 'limbs', and sometimes (for multiplication
+// results) as b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 +
+// 2^336*b_6 using 128-bit coefficients called 'widelimbs'. A 4-p224_limb
+// representation is an 'p224_felem'; a 7-p224_widelimb representation is a
+// 'p224_widefelem'. Even within felems, bits of adjacent limbs overlap, and we
+// don't always reduce the representations: we ensure that inputs to each
+// p224_felem multiplication satisfy a_i < 2^60, so outputs satisfy b_i <
+// 4*2^60*2^60, and fit into a 128-bit word without overflow. The coefficients
+// are then again partially reduced to obtain an p224_felem satisfying a_i <
+// 2^57. We only reduce to the unique minimal representation at the end of the
+// computation.
typedef uint64_t p224_limb;
typedef uint128_t p224_widelimb;
@@ -53,40 +53,40 @@
typedef p224_limb p224_felem[4];
typedef p224_widelimb p224_widefelem[7];
-/* Field element represented as a byte arrary. 28*8 = 224 bits is also the
- * group order size for the elliptic curve, and we also use this type for
- * scalars for point multiplication. */
+// Field element represented as a byte arrary. 28*8 = 224 bits is also the
+// group order size for the elliptic curve, and we also use this type for
+// scalars for point multiplication.
typedef uint8_t p224_felem_bytearray[28];
-/* Precomputed multiples of the standard generator
- * Points are given in coordinates (X, Y, Z) where Z normally is 1
- * (0 for the point at infinity).
- * For each field element, slice a_0 is word 0, etc.
- *
- * The table has 2 * 16 elements, starting with the following:
- * index | bits | point
- * ------+---------+------------------------------
- * 0 | 0 0 0 0 | 0G
- * 1 | 0 0 0 1 | 1G
- * 2 | 0 0 1 0 | 2^56G
- * 3 | 0 0 1 1 | (2^56 + 1)G
- * 4 | 0 1 0 0 | 2^112G
- * 5 | 0 1 0 1 | (2^112 + 1)G
- * 6 | 0 1 1 0 | (2^112 + 2^56)G
- * 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G
- * 8 | 1 0 0 0 | 2^168G
- * 9 | 1 0 0 1 | (2^168 + 1)G
- * 10 | 1 0 1 0 | (2^168 + 2^56)G
- * 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G
- * 12 | 1 1 0 0 | (2^168 + 2^112)G
- * 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G
- * 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G
- * 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G
- * followed by a copy of this with each element multiplied by 2^28.
- *
- * The reason for this is so that we can clock bits into four different
- * locations when doing simple scalar multiplies against the base point,
- * and then another four locations using the second 16 elements. */
+// Precomputed multiples of the standard generator
+// Points are given in coordinates (X, Y, Z) where Z normally is 1
+// (0 for the point at infinity).
+// For each field element, slice a_0 is word 0, etc.
+//
+// The table has 2 * 16 elements, starting with the following:
+// index | bits | point
+// ------+---------+------------------------------
+// 0 | 0 0 0 0 | 0G
+// 1 | 0 0 0 1 | 1G
+// 2 | 0 0 1 0 | 2^56G
+// 3 | 0 0 1 1 | (2^56 + 1)G
+// 4 | 0 1 0 0 | 2^112G
+// 5 | 0 1 0 1 | (2^112 + 1)G
+// 6 | 0 1 1 0 | (2^112 + 2^56)G
+// 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G
+// 8 | 1 0 0 0 | 2^168G
+// 9 | 1 0 0 1 | (2^168 + 1)G
+// 10 | 1 0 1 0 | (2^168 + 2^56)G
+// 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G
+// 12 | 1 1 0 0 | (2^168 + 2^112)G
+// 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G
+// 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G
+// 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G
+// followed by a copy of this with each element multiplied by 2^28.
+//
+// The reason for this is so that we can clock bits into four different
+// locations when doing simple scalar multiplies against the base point,
+// and then another four locations using the second 16 elements.
static const p224_felem g_p224_pre_comp[2][16][3] = {
{{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}},
{{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf},
@@ -187,7 +187,7 @@
return ret;
}
-/* Helper functions to convert field elements to/from internal representation */
+// Helper functions to convert field elements to/from internal representation
static void p224_bin28_to_felem(p224_felem out, const uint8_t in[28]) {
out[0] = p224_load_u64(in) & 0x00ffffffffffffff;
out[1] = p224_load_u64(in + 7) & 0x00ffffffffffffff;
@@ -204,16 +204,16 @@
}
}
-/* To preserve endianness when using BN_bn2bin and BN_bin2bn */
+// To preserve endianness when using BN_bn2bin and BN_bin2bn
static void p224_flip_endian(uint8_t *out, const uint8_t *in, size_t len) {
for (size_t i = 0; i < len; ++i) {
out[i] = in[len - 1 - i];
}
}
-/* From OpenSSL BIGNUM to internal representation */
+// From OpenSSL BIGNUM to internal representation
static int p224_BN_to_felem(p224_felem out, const BIGNUM *bn) {
- /* BN_bn2bin eats leading zeroes */
+ // BN_bn2bin eats leading zeroes
p224_felem_bytearray b_out;
OPENSSL_memset(b_out, 0, sizeof(b_out));
size_t num_bytes = BN_num_bytes(bn);
@@ -230,7 +230,7 @@
return 1;
}
-/* From internal representation to OpenSSL BIGNUM */
+// From internal representation to OpenSSL BIGNUM
static BIGNUM *p224_felem_to_BN(BIGNUM *out, const p224_felem in) {
p224_felem_bytearray b_in, b_out;
p224_felem_to_bin28(b_in, in);
@@ -238,10 +238,10 @@
return BN_bin2bn(b_out, sizeof(b_out), out);
}
-/* Field operations, using the internal representation of field elements.
- * NB! These operations are specific to our point multiplication and cannot be
- * expected to be correct in general - e.g., multiplication with a large scalar
- * will cause an overflow. */
+// Field operations, using the internal representation of field elements.
+// NB! These operations are specific to our point multiplication and cannot be
+// expected to be correct in general - e.g., multiplication with a large scalar
+// will cause an overflow.
static void p224_felem_assign(p224_felem out, const p224_felem in) {
out[0] = in[0];
@@ -250,7 +250,7 @@
out[3] = in[3];
}
-/* Sum two field elements: out += in */
+// Sum two field elements: out += in
static void p224_felem_sum(p224_felem out, const p224_felem in) {
out[0] += in[0];
out[1] += in[1];
@@ -258,8 +258,8 @@
out[3] += in[3];
}
-/* Get negative value: out = -in */
-/* Assumes in[i] < 2^57 */
+// Get negative value: out = -in
+// Assumes in[i] < 2^57
static void p224_felem_neg(p224_felem out, const p224_felem in) {
static const p224_limb two58p2 =
(((p224_limb)1) << 58) + (((p224_limb)1) << 2);
@@ -268,15 +268,15 @@
static const p224_limb two58m42m2 =
(((p224_limb)1) << 58) - (((p224_limb)1) << 42) - (((p224_limb)1) << 2);
- /* Set to 0 mod 2^224-2^96+1 to ensure out > in */
+ // Set to 0 mod 2^224-2^96+1 to ensure out > in
out[0] = two58p2 - in[0];
out[1] = two58m42m2 - in[1];
out[2] = two58m2 - in[2];
out[3] = two58m2 - in[3];
}
-/* Subtract field elements: out -= in */
-/* Assumes in[i] < 2^57 */
+// Subtract field elements: out -= in
+// Assumes in[i] < 2^57
static void p224_felem_diff(p224_felem out, const p224_felem in) {
static const p224_limb two58p2 =
(((p224_limb)1) << 58) + (((p224_limb)1) << 2);
@@ -285,7 +285,7 @@
static const p224_limb two58m42m2 =
(((p224_limb)1) << 58) - (((p224_limb)1) << 42) - (((p224_limb)1) << 2);
- /* Add 0 mod 2^224-2^96+1 to ensure out > in */
+ // Add 0 mod 2^224-2^96+1 to ensure out > in
out[0] += two58p2;
out[1] += two58m42m2;
out[2] += two58m2;
@@ -297,8 +297,8 @@
out[3] -= in[3];
}
-/* Subtract in unreduced 128-bit mode: out -= in */
-/* Assumes in[i] < 2^119 */
+// Subtract in unreduced 128-bit mode: out -= in
+// Assumes in[i] < 2^119
static void p224_widefelem_diff(p224_widefelem out, const p224_widefelem in) {
static const p224_widelimb two120 = ((p224_widelimb)1) << 120;
static const p224_widelimb two120m64 =
@@ -307,7 +307,7 @@
(((p224_widelimb)1) << 104) -
(((p224_widelimb)1) << 64);
- /* Add 0 mod 2^224-2^96+1 to ensure out > in */
+ // Add 0 mod 2^224-2^96+1 to ensure out > in
out[0] += two120;
out[1] += two120m64;
out[2] += two120m64;
@@ -325,8 +325,8 @@
out[6] -= in[6];
}
-/* Subtract in mixed mode: out128 -= in64 */
-/* in[i] < 2^63 */
+// Subtract in mixed mode: out128 -= in64
+// in[i] < 2^63
static void p224_felem_diff_128_64(p224_widefelem out, const p224_felem in) {
static const p224_widelimb two64p8 =
(((p224_widelimb)1) << 64) + (((p224_widelimb)1) << 8);
@@ -336,7 +336,7 @@
(((p224_widelimb)1) << 48) -
(((p224_widelimb)1) << 8);
- /* Add 0 mod 2^224-2^96+1 to ensure out > in */
+ // Add 0 mod 2^224-2^96+1 to ensure out > in
out[0] += two64p8;
out[1] += two64m48m8;
out[2] += two64m8;
@@ -348,8 +348,8 @@
out[3] -= in[3];
}
-/* Multiply a field element by a scalar: out = out * scalar
- * The scalars we actually use are small, so results fit without overflow */
+// Multiply a field element by a scalar: out = out * scalar
+// The scalars we actually use are small, so results fit without overflow
static void p224_felem_scalar(p224_felem out, const p224_limb scalar) {
out[0] *= scalar;
out[1] *= scalar;
@@ -357,8 +357,8 @@
out[3] *= scalar;
}
-/* Multiply an unreduced field element by a scalar: out = out * scalar
- * The scalars we actually use are small, so results fit without overflow */
+// Multiply an unreduced field element by a scalar: out = out * scalar
+// The scalars we actually use are small, so results fit without overflow
static void p224_widefelem_scalar(p224_widefelem out,
const p224_widelimb scalar) {
out[0] *= scalar;
@@ -370,7 +370,7 @@
out[6] *= scalar;
}
-/* Square a field element: out = in^2 */
+// Square a field element: out = in^2
static void p224_felem_square(p224_widefelem out, const p224_felem in) {
p224_limb tmp0, tmp1, tmp2;
tmp0 = 2 * in[0];
@@ -385,7 +385,7 @@
out[6] = ((p224_widelimb)in[3]) * in[3];
}
-/* Multiply two field elements: out = in1 * in2 */
+// Multiply two field elements: out = in1 * in2
static void p224_felem_mul(p224_widefelem out, const p224_felem in1,
const p224_felem in2) {
out[0] = ((p224_widelimb)in1[0]) * in2[0];
@@ -400,9 +400,9 @@
out[6] = ((p224_widelimb)in1[3]) * in2[3];
}
-/* Reduce seven 128-bit coefficients to four 64-bit coefficients.
- * Requires in[i] < 2^126,
- * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */
+// Reduce seven 128-bit coefficients to four 64-bit coefficients.
+// Requires in[i] < 2^126,
+// ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16
static void p224_felem_reduce(p224_felem out, const p224_widefelem in) {
static const p224_widelimb two127p15 =
(((p224_widelimb)1) << 127) + (((p224_widelimb)1) << 15);
@@ -413,14 +413,14 @@
(((p224_widelimb)1) << 55);
p224_widelimb output[5];
- /* Add 0 mod 2^224-2^96+1 to ensure all differences are positive */
+ // Add 0 mod 2^224-2^96+1 to ensure all differences are positive
output[0] = in[0] + two127p15;
output[1] = in[1] + two127m71m55;
output[2] = in[2] + two127m71;
output[3] = in[3];
output[4] = in[4];
- /* Eliminate in[4], in[5], in[6] */
+ // Eliminate in[4], in[5], in[6]
output[4] += in[6] >> 16;
output[3] += (in[6] & 0xffff) << 40;
output[2] -= in[6];
@@ -433,90 +433,90 @@
output[1] += (output[4] & 0xffff) << 40;
output[0] -= output[4];
- /* Carry 2 -> 3 -> 4 */
+ // Carry 2 -> 3 -> 4
output[3] += output[2] >> 56;
output[2] &= 0x00ffffffffffffff;
output[4] = output[3] >> 56;
output[3] &= 0x00ffffffffffffff;
- /* Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 */
+ // Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72
- /* Eliminate output[4] */
+ // Eliminate output[4]
output[2] += output[4] >> 16;
- /* output[2] < 2^56 + 2^56 = 2^57 */
+ // output[2] < 2^56 + 2^56 = 2^57
output[1] += (output[4] & 0xffff) << 40;
output[0] -= output[4];
- /* Carry 0 -> 1 -> 2 -> 3 */
+ // Carry 0 -> 1 -> 2 -> 3
output[1] += output[0] >> 56;
out[0] = output[0] & 0x00ffffffffffffff;
output[2] += output[1] >> 56;
- /* output[2] < 2^57 + 2^72 */
+ // output[2] < 2^57 + 2^72
out[1] = output[1] & 0x00ffffffffffffff;
output[3] += output[2] >> 56;
- /* output[3] <= 2^56 + 2^16 */
+ // output[3] <= 2^56 + 2^16
out[2] = output[2] & 0x00ffffffffffffff;
- /* out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
- * out[3] <= 2^56 + 2^16 (due to final carry),
- * so out < 2*p */
+ // out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
+ // out[3] <= 2^56 + 2^16 (due to final carry),
+ // so out < 2*p
out[3] = output[3];
}
-/* Reduce to unique minimal representation.
- * Requires 0 <= in < 2*p (always call p224_felem_reduce first) */
+// Reduce to unique minimal representation.
+// Requires 0 <= in < 2*p (always call p224_felem_reduce first)
static void p224_felem_contract(p224_felem out, const p224_felem in) {
static const int64_t two56 = ((p224_limb)1) << 56;
- /* 0 <= in < 2*p, p = 2^224 - 2^96 + 1 */
- /* if in > p , reduce in = in - 2^224 + 2^96 - 1 */
+ // 0 <= in < 2*p, p = 2^224 - 2^96 + 1
+ // if in > p , reduce in = in - 2^224 + 2^96 - 1
int64_t tmp[4], a;
tmp[0] = in[0];
tmp[1] = in[1];
tmp[2] = in[2];
tmp[3] = in[3];
- /* Case 1: a = 1 iff in >= 2^224 */
+ // Case 1: a = 1 iff in >= 2^224
a = (in[3] >> 56);
tmp[0] -= a;
tmp[1] += a << 40;
tmp[3] &= 0x00ffffffffffffff;
- /* Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1 and
- * the lower part is non-zero */
+ // Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1 and
+ // the lower part is non-zero
a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) |
(((int64_t)(in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63);
a &= 0x00ffffffffffffff;
- /* turn a into an all-one mask (if a = 0) or an all-zero mask */
+ // turn a into an all-one mask (if a = 0) or an all-zero mask
a = (a - 1) >> 63;
- /* subtract 2^224 - 2^96 + 1 if a is all-one */
+ // subtract 2^224 - 2^96 + 1 if a is all-one
tmp[3] &= a ^ 0xffffffffffffffff;
tmp[2] &= a ^ 0xffffffffffffffff;
tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff;
tmp[0] -= 1 & a;
- /* eliminate negative coefficients: if tmp[0] is negative, tmp[1] must
- * be non-zero, so we only need one step */
+ // eliminate negative coefficients: if tmp[0] is negative, tmp[1] must
+ // be non-zero, so we only need one step
a = tmp[0] >> 63;
tmp[0] += two56 & a;
tmp[1] -= 1 & a;
- /* carry 1 -> 2 -> 3 */
+ // carry 1 -> 2 -> 3
tmp[2] += tmp[1] >> 56;
tmp[1] &= 0x00ffffffffffffff;
tmp[3] += tmp[2] >> 56;
tmp[2] &= 0x00ffffffffffffff;
- /* Now 0 <= out < p */
+ // Now 0 <= out < p
out[0] = tmp[0];
out[1] = tmp[1];
out[2] = tmp[2];
out[3] = tmp[3];
}
-/* Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field
- * elements are reduced to in < 2^225, so we only need to check three cases: 0,
- * 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2 */
+// Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field
+// elements are reduced to in < 2^225, so we only need to check three cases: 0,
+// 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2
static p224_limb p224_felem_is_zero(const p224_felem in) {
p224_limb zero = in[0] | in[1] | in[2] | in[3];
zero = (((int64_t)(zero)-1) >> 63) & 1;
@@ -532,92 +532,92 @@
return (zero | two224m96p1 | two225m97p2);
}
-/* Invert a field element */
-/* Computation chain copied from djb's code */
+// Invert a field element
+// Computation chain copied from djb's code
static void p224_felem_inv(p224_felem out, const p224_felem in) {
p224_felem ftmp, ftmp2, ftmp3, ftmp4;
p224_widefelem tmp;
p224_felem_square(tmp, in);
- p224_felem_reduce(ftmp, tmp); /* 2 */
+ p224_felem_reduce(ftmp, tmp); // 2
p224_felem_mul(tmp, in, ftmp);
- p224_felem_reduce(ftmp, tmp); /* 2^2 - 1 */
+ p224_felem_reduce(ftmp, tmp); // 2^2 - 1
p224_felem_square(tmp, ftmp);
- p224_felem_reduce(ftmp, tmp); /* 2^3 - 2 */
+ p224_felem_reduce(ftmp, tmp); // 2^3 - 2
p224_felem_mul(tmp, in, ftmp);
- p224_felem_reduce(ftmp, tmp); /* 2^3 - 1 */
+ p224_felem_reduce(ftmp, tmp); // 2^3 - 1
p224_felem_square(tmp, ftmp);
- p224_felem_reduce(ftmp2, tmp); /* 2^4 - 2 */
+ p224_felem_reduce(ftmp2, tmp); // 2^4 - 2
p224_felem_square(tmp, ftmp2);
- p224_felem_reduce(ftmp2, tmp); /* 2^5 - 4 */
+ p224_felem_reduce(ftmp2, tmp); // 2^5 - 4
p224_felem_square(tmp, ftmp2);
- p224_felem_reduce(ftmp2, tmp); /* 2^6 - 8 */
+ p224_felem_reduce(ftmp2, tmp); // 2^6 - 8
p224_felem_mul(tmp, ftmp2, ftmp);
- p224_felem_reduce(ftmp, tmp); /* 2^6 - 1 */
+ p224_felem_reduce(ftmp, tmp); // 2^6 - 1
p224_felem_square(tmp, ftmp);
- p224_felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
- for (size_t i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
+ p224_felem_reduce(ftmp2, tmp); // 2^7 - 2
+ for (size_t i = 0; i < 5; ++i) { // 2^12 - 2^6
p224_felem_square(tmp, ftmp2);
p224_felem_reduce(ftmp2, tmp);
}
p224_felem_mul(tmp, ftmp2, ftmp);
- p224_felem_reduce(ftmp2, tmp); /* 2^12 - 1 */
+ p224_felem_reduce(ftmp2, tmp); // 2^12 - 1
p224_felem_square(tmp, ftmp2);
- p224_felem_reduce(ftmp3, tmp); /* 2^13 - 2 */
- for (size_t i = 0; i < 11; ++i) {/* 2^24 - 2^12 */
+ p224_felem_reduce(ftmp3, tmp); // 2^13 - 2
+ for (size_t i = 0; i < 11; ++i) { // 2^24 - 2^12
p224_felem_square(tmp, ftmp3);
p224_felem_reduce(ftmp3, tmp);
}
p224_felem_mul(tmp, ftmp3, ftmp2);
- p224_felem_reduce(ftmp2, tmp); /* 2^24 - 1 */
+ p224_felem_reduce(ftmp2, tmp); // 2^24 - 1
p224_felem_square(tmp, ftmp2);
- p224_felem_reduce(ftmp3, tmp); /* 2^25 - 2 */
- for (size_t i = 0; i < 23; ++i) {/* 2^48 - 2^24 */
+ p224_felem_reduce(ftmp3, tmp); // 2^25 - 2
+ for (size_t i = 0; i < 23; ++i) { // 2^48 - 2^24
p224_felem_square(tmp, ftmp3);
p224_felem_reduce(ftmp3, tmp);
}
p224_felem_mul(tmp, ftmp3, ftmp2);
- p224_felem_reduce(ftmp3, tmp); /* 2^48 - 1 */
+ p224_felem_reduce(ftmp3, tmp); // 2^48 - 1
p224_felem_square(tmp, ftmp3);
- p224_felem_reduce(ftmp4, tmp); /* 2^49 - 2 */
- for (size_t i = 0; i < 47; ++i) {/* 2^96 - 2^48 */
+ p224_felem_reduce(ftmp4, tmp); // 2^49 - 2
+ for (size_t i = 0; i < 47; ++i) { // 2^96 - 2^48
p224_felem_square(tmp, ftmp4);
p224_felem_reduce(ftmp4, tmp);
}
p224_felem_mul(tmp, ftmp3, ftmp4);
- p224_felem_reduce(ftmp3, tmp); /* 2^96 - 1 */
+ p224_felem_reduce(ftmp3, tmp); // 2^96 - 1
p224_felem_square(tmp, ftmp3);
- p224_felem_reduce(ftmp4, tmp); /* 2^97 - 2 */
- for (size_t i = 0; i < 23; ++i) {/* 2^120 - 2^24 */
+ p224_felem_reduce(ftmp4, tmp); // 2^97 - 2
+ for (size_t i = 0; i < 23; ++i) { // 2^120 - 2^24
p224_felem_square(tmp, ftmp4);
p224_felem_reduce(ftmp4, tmp);
}
p224_felem_mul(tmp, ftmp2, ftmp4);
- p224_felem_reduce(ftmp2, tmp); /* 2^120 - 1 */
- for (size_t i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
+ p224_felem_reduce(ftmp2, tmp); // 2^120 - 1
+ for (size_t i = 0; i < 6; ++i) { // 2^126 - 2^6
p224_felem_square(tmp, ftmp2);
p224_felem_reduce(ftmp2, tmp);
}
p224_felem_mul(tmp, ftmp2, ftmp);
- p224_felem_reduce(ftmp, tmp); /* 2^126 - 1 */
+ p224_felem_reduce(ftmp, tmp); // 2^126 - 1
p224_felem_square(tmp, ftmp);
- p224_felem_reduce(ftmp, tmp); /* 2^127 - 2 */
+ p224_felem_reduce(ftmp, tmp); // 2^127 - 2
p224_felem_mul(tmp, ftmp, in);
- p224_felem_reduce(ftmp, tmp); /* 2^127 - 1 */
- for (size_t i = 0; i < 97; ++i) {/* 2^224 - 2^97 */
+ p224_felem_reduce(ftmp, tmp); // 2^127 - 1
+ for (size_t i = 0; i < 97; ++i) { // 2^224 - 2^97
p224_felem_square(tmp, ftmp);
p224_felem_reduce(ftmp, tmp);
}
p224_felem_mul(tmp, ftmp, ftmp3);
- p224_felem_reduce(out, tmp); /* 2^224 - 2^96 - 1 */
+ p224_felem_reduce(out, tmp); // 2^224 - 2^96 - 1
}
-/* Copy in constant time:
- * if icopy == 1, copy in to out,
- * if icopy == 0, copy out to itself. */
+// Copy in constant time:
+// if icopy == 1, copy in to out,
+// if icopy == 0, copy out to itself.
static void p224_copy_conditional(p224_felem out, const p224_felem in,
p224_limb icopy) {
- /* icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one */
+ // icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one
const p224_limb copy = -icopy;
for (size_t i = 0; i < 4; ++i) {
const p224_limb tmp = copy & (in[i] ^ out[i]);
@@ -625,19 +625,19 @@
}
}
-/* ELLIPTIC CURVE POINT OPERATIONS
- *
- * Points are represented in Jacobian projective coordinates:
- * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
- * or to the point at infinity if Z == 0. */
+// ELLIPTIC CURVE POINT OPERATIONS
+//
+// Points are represented in Jacobian projective coordinates:
+// (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
+// or to the point at infinity if Z == 0.
-/* Double an elliptic curve point:
- * (X', Y', Z') = 2 * (X, Y, Z), where
- * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
- * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2
- * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
- * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
- * while x_out == y_in is not (maybe this works, but it's not tested). */
+// Double an elliptic curve point:
+// (X', Y', Z') = 2 * (X, Y, Z), where
+// X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
+// Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2
+// Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
+// Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
+// while x_out == y_in is not (maybe this works, but it's not tested).
static void p224_point_double(p224_felem x_out, p224_felem y_out,
p224_felem z_out, const p224_felem x_in,
const p224_felem y_in, const p224_felem z_in) {
@@ -647,82 +647,82 @@
p224_felem_assign(ftmp, x_in);
p224_felem_assign(ftmp2, x_in);
- /* delta = z^2 */
+ // delta = z^2
p224_felem_square(tmp, z_in);
p224_felem_reduce(delta, tmp);
- /* gamma = y^2 */
+ // gamma = y^2
p224_felem_square(tmp, y_in);
p224_felem_reduce(gamma, tmp);
- /* beta = x*gamma */
+ // beta = x*gamma
p224_felem_mul(tmp, x_in, gamma);
p224_felem_reduce(beta, tmp);
- /* alpha = 3*(x-delta)*(x+delta) */
+ // alpha = 3*(x-delta)*(x+delta)
p224_felem_diff(ftmp, delta);
- /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */
+ // ftmp[i] < 2^57 + 2^58 + 2 < 2^59
p224_felem_sum(ftmp2, delta);
- /* ftmp2[i] < 2^57 + 2^57 = 2^58 */
+ // ftmp2[i] < 2^57 + 2^57 = 2^58
p224_felem_scalar(ftmp2, 3);
- /* ftmp2[i] < 3 * 2^58 < 2^60 */
+ // ftmp2[i] < 3 * 2^58 < 2^60
p224_felem_mul(tmp, ftmp, ftmp2);
- /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */
+ // tmp[i] < 2^60 * 2^59 * 4 = 2^121
p224_felem_reduce(alpha, tmp);
- /* x' = alpha^2 - 8*beta */
+ // x' = alpha^2 - 8*beta
p224_felem_square(tmp, alpha);
- /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
+ // tmp[i] < 4 * 2^57 * 2^57 = 2^116
p224_felem_assign(ftmp, beta);
p224_felem_scalar(ftmp, 8);
- /* ftmp[i] < 8 * 2^57 = 2^60 */
+ // ftmp[i] < 8 * 2^57 = 2^60
p224_felem_diff_128_64(tmp, ftmp);
- /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
+ // tmp[i] < 2^116 + 2^64 + 8 < 2^117
p224_felem_reduce(x_out, tmp);
- /* z' = (y + z)^2 - gamma - delta */
+ // z' = (y + z)^2 - gamma - delta
p224_felem_sum(delta, gamma);
- /* delta[i] < 2^57 + 2^57 = 2^58 */
+ // delta[i] < 2^57 + 2^57 = 2^58
p224_felem_assign(ftmp, y_in);
p224_felem_sum(ftmp, z_in);
- /* ftmp[i] < 2^57 + 2^57 = 2^58 */
+ // ftmp[i] < 2^57 + 2^57 = 2^58
p224_felem_square(tmp, ftmp);
- /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */
+ // tmp[i] < 4 * 2^58 * 2^58 = 2^118
p224_felem_diff_128_64(tmp, delta);
- /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */
+ // tmp[i] < 2^118 + 2^64 + 8 < 2^119
p224_felem_reduce(z_out, tmp);
- /* y' = alpha*(4*beta - x') - 8*gamma^2 */
+ // y' = alpha*(4*beta - x') - 8*gamma^2
p224_felem_scalar(beta, 4);
- /* beta[i] < 4 * 2^57 = 2^59 */
+ // beta[i] < 4 * 2^57 = 2^59
p224_felem_diff(beta, x_out);
- /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */
+ // beta[i] < 2^59 + 2^58 + 2 < 2^60
p224_felem_mul(tmp, alpha, beta);
- /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */
+ // tmp[i] < 4 * 2^57 * 2^60 = 2^119
p224_felem_square(tmp2, gamma);
- /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */
+ // tmp2[i] < 4 * 2^57 * 2^57 = 2^116
p224_widefelem_scalar(tmp2, 8);
- /* tmp2[i] < 8 * 2^116 = 2^119 */
+ // tmp2[i] < 8 * 2^116 = 2^119
p224_widefelem_diff(tmp, tmp2);
- /* tmp[i] < 2^119 + 2^120 < 2^121 */
+ // tmp[i] < 2^119 + 2^120 < 2^121
p224_felem_reduce(y_out, tmp);
}
-/* Add two elliptic curve points:
- * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
- * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
- * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
- * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 *
- * X_1)^2 - X_3) -
- * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
- * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2)
- *
- * This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0. */
+// Add two elliptic curve points:
+// (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
+// X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
+// 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
+// Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 *
+// X_1)^2 - X_3) -
+// Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
+// Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2)
+//
+// This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0.
-/* This function is not entirely constant-time: it includes a branch for
- * checking whether the two input points are equal, (while not equal to the
- * point at infinity). This case never happens during single point
- * multiplication, so there is no timing leak for ECDH or ECDSA signing. */
+// This function is not entirely constant-time: it includes a branch for
+// checking whether the two input points are equal, (while not equal to the
+// point at infinity). This case never happens during single point
+// multiplication, so there is no timing leak for ECDH or ECDSA signing.
static void p224_point_add(p224_felem x3, p224_felem y3, p224_felem z3,
const p224_felem x1, const p224_felem y1,
const p224_felem z1, const int mixed,
@@ -733,136 +733,136 @@
p224_limb z1_is_zero, z2_is_zero, x_equal, y_equal;
if (!mixed) {
- /* ftmp2 = z2^2 */
+ // ftmp2 = z2^2
p224_felem_square(tmp, z2);
p224_felem_reduce(ftmp2, tmp);
- /* ftmp4 = z2^3 */
+ // ftmp4 = z2^3
p224_felem_mul(tmp, ftmp2, z2);
p224_felem_reduce(ftmp4, tmp);
- /* ftmp4 = z2^3*y1 */
+ // ftmp4 = z2^3*y1
p224_felem_mul(tmp2, ftmp4, y1);
p224_felem_reduce(ftmp4, tmp2);
- /* ftmp2 = z2^2*x1 */
+ // ftmp2 = z2^2*x1
p224_felem_mul(tmp2, ftmp2, x1);
p224_felem_reduce(ftmp2, tmp2);
} else {
- /* We'll assume z2 = 1 (special case z2 = 0 is handled later) */
+ // We'll assume z2 = 1 (special case z2 = 0 is handled later)
- /* ftmp4 = z2^3*y1 */
+ // ftmp4 = z2^3*y1
p224_felem_assign(ftmp4, y1);
- /* ftmp2 = z2^2*x1 */
+ // ftmp2 = z2^2*x1
p224_felem_assign(ftmp2, x1);
}
- /* ftmp = z1^2 */
+ // ftmp = z1^2
p224_felem_square(tmp, z1);
p224_felem_reduce(ftmp, tmp);
- /* ftmp3 = z1^3 */
+ // ftmp3 = z1^3
p224_felem_mul(tmp, ftmp, z1);
p224_felem_reduce(ftmp3, tmp);
- /* tmp = z1^3*y2 */
+ // tmp = z1^3*y2
p224_felem_mul(tmp, ftmp3, y2);
- /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
+ // tmp[i] < 4 * 2^57 * 2^57 = 2^116
- /* ftmp3 = z1^3*y2 - z2^3*y1 */
+ // ftmp3 = z1^3*y2 - z2^3*y1
p224_felem_diff_128_64(tmp, ftmp4);
- /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
+ // tmp[i] < 2^116 + 2^64 + 8 < 2^117
p224_felem_reduce(ftmp3, tmp);
- /* tmp = z1^2*x2 */
+ // tmp = z1^2*x2
p224_felem_mul(tmp, ftmp, x2);
- /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
+ // tmp[i] < 4 * 2^57 * 2^57 = 2^116
- /* ftmp = z1^2*x2 - z2^2*x1 */
+ // ftmp = z1^2*x2 - z2^2*x1
p224_felem_diff_128_64(tmp, ftmp2);
- /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
+ // tmp[i] < 2^116 + 2^64 + 8 < 2^117
p224_felem_reduce(ftmp, tmp);
- /* the formulae are incorrect if the points are equal
- * so we check for this and do doubling if this happens */
+ // the formulae are incorrect if the points are equal
+ // so we check for this and do doubling if this happens
x_equal = p224_felem_is_zero(ftmp);
y_equal = p224_felem_is_zero(ftmp3);
z1_is_zero = p224_felem_is_zero(z1);
z2_is_zero = p224_felem_is_zero(z2);
- /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */
+ // In affine coordinates, (X_1, Y_1) == (X_2, Y_2)
if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) {
p224_point_double(x3, y3, z3, x1, y1, z1);
return;
}
- /* ftmp5 = z1*z2 */
+ // ftmp5 = z1*z2
if (!mixed) {
p224_felem_mul(tmp, z1, z2);
p224_felem_reduce(ftmp5, tmp);
} else {
- /* special case z2 = 0 is handled later */
+ // special case z2 = 0 is handled later
p224_felem_assign(ftmp5, z1);
}
- /* z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) */
+ // z_out = (z1^2*x2 - z2^2*x1)*(z1*z2)
p224_felem_mul(tmp, ftmp, ftmp5);
p224_felem_reduce(z_out, tmp);
- /* ftmp = (z1^2*x2 - z2^2*x1)^2 */
+ // ftmp = (z1^2*x2 - z2^2*x1)^2
p224_felem_assign(ftmp5, ftmp);
p224_felem_square(tmp, ftmp);
p224_felem_reduce(ftmp, tmp);
- /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */
+ // ftmp5 = (z1^2*x2 - z2^2*x1)^3
p224_felem_mul(tmp, ftmp, ftmp5);
p224_felem_reduce(ftmp5, tmp);
- /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
+ // ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2
p224_felem_mul(tmp, ftmp2, ftmp);
p224_felem_reduce(ftmp2, tmp);
- /* tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
+ // tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3
p224_felem_mul(tmp, ftmp4, ftmp5);
- /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
+ // tmp[i] < 4 * 2^57 * 2^57 = 2^116
- /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */
+ // tmp2 = (z1^3*y2 - z2^3*y1)^2
p224_felem_square(tmp2, ftmp3);
- /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */
+ // tmp2[i] < 4 * 2^57 * 2^57 < 2^116
- /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */
+ // tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3
p224_felem_diff_128_64(tmp2, ftmp5);
- /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */
+ // tmp2[i] < 2^116 + 2^64 + 8 < 2^117
- /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
+ // ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
p224_felem_assign(ftmp5, ftmp2);
p224_felem_scalar(ftmp5, 2);
- /* ftmp5[i] < 2 * 2^57 = 2^58 */
+ // ftmp5[i] < 2 * 2^57 = 2^58
/* x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
p224_felem_diff_128_64(tmp2, ftmp5);
- /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
+ // tmp2[i] < 2^117 + 2^64 + 8 < 2^118
p224_felem_reduce(x_out, tmp2);
- /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out */
+ // ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out
p224_felem_diff(ftmp2, x_out);
- /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */
+ // ftmp2[i] < 2^57 + 2^58 + 2 < 2^59
- /* tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) */
+ // tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out)
p224_felem_mul(tmp2, ftmp3, ftmp2);
- /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
+ // tmp2[i] < 4 * 2^57 * 2^59 = 2^118
/* y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
p224_widefelem_diff(tmp2, tmp);
- /* tmp2[i] < 2^118 + 2^120 < 2^121 */
+ // tmp2[i] < 2^118 + 2^120 < 2^121
p224_felem_reduce(y_out, tmp2);
- /* the result (x_out, y_out, z_out) is incorrect if one of the inputs is
- * the point at infinity, so we need to check for this separately */
+ // the result (x_out, y_out, z_out) is incorrect if one of the inputs is
+ // the point at infinity, so we need to check for this separately
- /* if point 1 is at infinity, copy point 2 to output, and vice versa */
+ // if point 1 is at infinity, copy point 2 to output, and vice versa
p224_copy_conditional(x_out, x2, z1_is_zero);
p224_copy_conditional(x_out, x1, z2_is_zero);
p224_copy_conditional(y_out, y2, z1_is_zero);
@@ -874,8 +874,8 @@
p224_felem_assign(z3, z_out);
}
-/* p224_select_point selects the |idx|th point from a precomputation table and
- * copies it to out. */
+// p224_select_point selects the |idx|th point from a precomputation table and
+// copies it to out.
static void p224_select_point(const uint64_t idx, size_t size,
const p224_felem pre_comp[/*size*/][3],
p224_felem out[3]) {
@@ -896,7 +896,7 @@
}
}
-/* p224_get_bit returns the |i|th bit in |in| */
+// p224_get_bit returns the |i|th bit in |in|
static char p224_get_bit(const p224_felem_bytearray in, size_t i) {
if (i >= 224) {
return 0;
@@ -904,11 +904,11 @@
return (in[i >> 3] >> (i & 7)) & 1;
}
-/* Interleaved point multiplication using precomputed point multiples:
- * The small point multiples 0*P, 1*P, ..., 16*P are in p_pre_comp, the scalars
- * in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple
- * of the generator, using certain (large) precomputed multiples in
- * g_p224_pre_comp. Output point (X, Y, Z) is stored in x_out, y_out, z_out */
+// Interleaved point multiplication using precomputed point multiples:
+// The small point multiples 0*P, 1*P, ..., 16*P are in p_pre_comp, the scalars
+// in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple
+// of the generator, using certain (large) precomputed multiples in
+// g_p224_pre_comp. Output point (X, Y, Z) is stored in x_out, y_out, z_out
static void p224_batch_mul(p224_felem x_out, p224_felem y_out, p224_felem z_out,
const uint8_t *p_scalar, const uint8_t *g_scalar,
const p224_felem p_pre_comp[17][3]) {
@@ -916,28 +916,28 @@
uint64_t bits;
uint8_t sign, digit;
- /* set nq to the point at infinity */
+ // set nq to the point at infinity
OPENSSL_memset(nq, 0, 3 * sizeof(p224_felem));
- /* Loop over both scalars msb-to-lsb, interleaving additions of multiples of
- * the generator (two in each of the last 28 rounds) and additions of p (every
- * 5th round). */
- int skip = 1; /* save two point operations in the first round */
+ // Loop over both scalars msb-to-lsb, interleaving additions of multiples of
+ // the generator (two in each of the last 28 rounds) and additions of p (every
+ // 5th round).
+ int skip = 1; // save two point operations in the first round
size_t i = p_scalar != NULL ? 220 : 27;
for (;;) {
- /* double */
+ // double
if (!skip) {
p224_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
}
- /* add multiples of the generator */
+ // add multiples of the generator
if (g_scalar != NULL && i <= 27) {
- /* first, look 28 bits upwards */
+ // first, look 28 bits upwards
bits = p224_get_bit(g_scalar, i + 196) << 3;
bits |= p224_get_bit(g_scalar, i + 140) << 2;
bits |= p224_get_bit(g_scalar, i + 84) << 1;
bits |= p224_get_bit(g_scalar, i + 28);
- /* select the point to add, in constant time */
+ // select the point to add, in constant time
p224_select_point(bits, 16, g_p224_pre_comp[1], tmp);
if (!skip) {
@@ -948,18 +948,18 @@
skip = 0;
}
- /* second, look at the current position */
+ // second, look at the current position
bits = p224_get_bit(g_scalar, i + 168) << 3;
bits |= p224_get_bit(g_scalar, i + 112) << 2;
bits |= p224_get_bit(g_scalar, i + 56) << 1;
bits |= p224_get_bit(g_scalar, i);
- /* select the point to add, in constant time */
+ // select the point to add, in constant time
p224_select_point(bits, 16, g_p224_pre_comp[0], tmp);
p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */,
tmp[0], tmp[1], tmp[2]);
}
- /* do other additions every 5 doublings */
+ // do other additions every 5 doublings
if (p_scalar != NULL && i % 5 == 0) {
bits = p224_get_bit(p_scalar, i + 4) << 5;
bits |= p224_get_bit(p_scalar, i + 3) << 4;
@@ -969,9 +969,9 @@
bits |= p224_get_bit(p_scalar, i - 1);
ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
- /* select the point to add or subtract */
+ // select the point to add or subtract
p224_select_point(digit, 17, p_pre_comp, tmp);
- p224_felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative point */
+ p224_felem_neg(tmp[3], tmp[1]); // (X, -Y, Z) is the negative point
p224_copy_conditional(tmp[1], tmp[3], sign);
if (!skip) {
@@ -993,8 +993,8 @@
p224_felem_assign(z_out, nq[2]);
}
-/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
- * (X', Y') = (X/Z^2, Y/Z^3) */
+// Takes the Jacobian coordinates (X, Y, Z) of a point and returns
+// (X', Y') = (X/Z^2, Y/Z^3)
static int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group,
const EC_POINT *point,
BIGNUM *x, BIGNUM *y,
@@ -1065,15 +1065,15 @@
}
if (p != NULL && p_scalar != NULL) {
- /* We treat NULL scalars as 0, and NULL points as points at infinity, i.e.,
- * they contribute nothing to the linear combination. */
+ // We treat NULL scalars as 0, and NULL points as points at infinity, i.e.,
+ // they contribute nothing to the linear combination.
OPENSSL_memset(&p_secret, 0, sizeof(p_secret));
OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp));
size_t num_bytes;
- /* reduce g_scalar to 0 <= g_scalar < 2^224 */
+ // reduce g_scalar to 0 <= g_scalar < 2^224
if (BN_num_bits(p_scalar) > 224 || BN_is_negative(p_scalar)) {
- /* this is an unusual input, and we don't guarantee
- * constant-timeness */
+ // this is an unusual input, and we don't guarantee
+ // constant-timeness
if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx)) {
OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB);
goto err;
@@ -1084,7 +1084,7 @@
}
p224_flip_endian(p_secret, tmp, num_bytes);
- /* precompute multiples */
+ // precompute multiples
if (!p224_BN_to_felem(x_out, &p->X) ||
!p224_BN_to_felem(y_out, &p->Y) ||
!p224_BN_to_felem(z_out, &p->Z)) {
@@ -1112,9 +1112,9 @@
if (g_scalar != NULL) {
OPENSSL_memset(g_secret, 0, sizeof(g_secret));
size_t num_bytes;
- /* reduce g_scalar to 0 <= g_scalar < 2^224 */
+ // reduce g_scalar to 0 <= g_scalar < 2^224
if (BN_num_bits(g_scalar) > 224 || BN_is_negative(g_scalar)) {
- /* this is an unusual input, and we don't guarantee constant-timeness */
+ // this is an unusual input, and we don't guarantee constant-timeness
if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) {
OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB);
goto err;
@@ -1130,7 +1130,7 @@
x_out, y_out, z_out, (p != NULL && p_scalar != NULL) ? p_secret : NULL,
g_scalar != NULL ? g_secret : NULL, (const p224_felem(*)[3])p_pre_comp);
- /* reduce the output to its unique minimal representation */
+ // reduce the output to its unique minimal representation
p224_felem_contract(x_in, x_out);
p224_felem_contract(y_in, y_out);
p224_felem_contract(z_in, z_out);
@@ -1162,4 +1162,4 @@
out->field_decode = NULL;
};
-#endif /* 64_BIT && !WINDOWS && !SMALL */
+#endif // 64_BIT && !WINDOWS && !SMALL
diff --git a/crypto/fipsmodule/ec/p256-64.c b/crypto/fipsmodule/ec/p256-64.c
index 8952aa2..f7d1ff1 100644
--- a/crypto/fipsmodule/ec/p256-64.c
+++ b/crypto/fipsmodule/ec/p256-64.c
@@ -12,12 +12,12 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
-/* A 64-bit implementation of the NIST P-256 elliptic curve point
- * multiplication
- *
- * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
- * Otherwise based on Emilia's P224 work, which was inspired by my curve25519
- * work which got its smarts from Daniel J. Bernstein's work on the same. */
+// A 64-bit implementation of the NIST P-256 elliptic curve point
+// multiplication
+//
+// OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
+// Otherwise based on Emilia's P224 work, which was inspired by my curve25519
+// work which got its smarts from Daniel J. Bernstein's work on the same.
#include <openssl/base.h>
@@ -35,29 +35,29 @@
#include "internal.h"
-/* The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We
- * can serialise an element of this field into 32 bytes. We call this an
- * felem_bytearray. */
+// The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We
+// can serialise an element of this field into 32 bytes. We call this an
+// felem_bytearray.
typedef uint8_t felem_bytearray[32];
-/* The representation of field elements.
- * ------------------------------------
- *
- * We represent field elements with either four 128-bit values, eight 128-bit
- * values, or four 64-bit values. The field element represented is:
- * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p)
- * or:
- * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p)
- *
- * 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits
- * apart, but are 128-bits wide, the most significant bits of each limb overlap
- * with the least significant bits of the next.
- *
- * A field element with four limbs is an 'felem'. One with eight limbs is a
- * 'longfelem'
- *
- * A field element with four, 64-bit values is called a 'smallfelem'. Small
- * values are used as intermediate values before multiplication. */
+// The representation of field elements.
+// ------------------------------------
+//
+// We represent field elements with either four 128-bit values, eight 128-bit
+// values, or four 64-bit values. The field element represented is:
+// v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p)
+// or:
+// v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p)
+//
+// 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits
+// apart, but are 128-bits wide, the most significant bits of each limb overlap
+// with the least significant bits of the next.
+//
+// A field element with four limbs is an 'felem'. One with eight limbs is a
+// 'longfelem'
+//
+// A field element with four, 64-bit values is called a 'smallfelem'. Small
+// values are used as intermediate values before multiplication.
#define NLIMBS 4
@@ -66,7 +66,7 @@
typedef limb longfelem[NLIMBS * 2];
typedef uint64_t smallfelem[NLIMBS];
-/* This is the value of the prime as four 64-bit words, little-endian. */
+// This is the value of the prime as four 64-bit words, little-endian.
static const uint64_t kPrime[4] = {0xfffffffffffffffful, 0xffffffff, 0,
0xffffffff00000001ul};
static const uint64_t bottom63bits = 0x7ffffffffffffffful;
@@ -81,8 +81,8 @@
OPENSSL_memcpy(out, &in, sizeof(in));
}
-/* bin32_to_felem takes a little-endian byte array and converts it into felem
- * form. This assumes that the CPU is little-endian. */
+// bin32_to_felem takes a little-endian byte array and converts it into felem
+// form. This assumes that the CPU is little-endian.
static void bin32_to_felem(felem out, const uint8_t in[32]) {
out[0] = load_u64(&in[0]);
out[1] = load_u64(&in[8]);
@@ -90,8 +90,8 @@
out[3] = load_u64(&in[24]);
}
-/* smallfelem_to_bin32 takes a smallfelem and serialises into a little endian,
- * 32 byte array. This assumes that the CPU is little-endian. */
+// smallfelem_to_bin32 takes a smallfelem and serialises into a little endian,
+// 32 byte array. This assumes that the CPU is little-endian.
static void smallfelem_to_bin32(uint8_t out[32], const smallfelem in) {
store_u64(&out[0], in[0]);
store_u64(&out[8], in[1]);
@@ -99,14 +99,14 @@
store_u64(&out[24], in[3]);
}
-/* To preserve endianness when using BN_bn2bin and BN_bin2bn. */
+// To preserve endianness when using BN_bn2bin and BN_bin2bn.
static void flip_endian(uint8_t *out, const uint8_t *in, size_t len) {
for (size_t i = 0; i < len; ++i) {
out[i] = in[len - 1 - i];
}
}
-/* BN_to_felem converts an OpenSSL BIGNUM into an felem. */
+// BN_to_felem converts an OpenSSL BIGNUM into an felem.
static int BN_to_felem(felem out, const BIGNUM *bn) {
if (BN_is_negative(bn)) {
OPENSSL_PUT_ERROR(EC, EC_R_BIGNUM_OUT_OF_RANGE);
@@ -114,7 +114,7 @@
}
felem_bytearray b_out;
- /* BN_bn2bin eats leading zeroes */
+ // BN_bn2bin eats leading zeroes
OPENSSL_memset(b_out, 0, sizeof(b_out));
size_t num_bytes = BN_num_bytes(bn);
if (num_bytes > sizeof(b_out)) {
@@ -129,7 +129,7 @@
return 1;
}
-/* felem_to_BN converts an felem into an OpenSSL BIGNUM. */
+// felem_to_BN converts an felem into an OpenSSL BIGNUM.
static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in) {
felem_bytearray b_in, b_out;
smallfelem_to_bin32(b_in, in);
@@ -137,7 +137,7 @@
return BN_bin2bn(b_out, sizeof(b_out), out);
}
-/* Field operations. */
+// Field operations.
static void felem_assign(felem out, const felem in) {
out[0] = in[0];
@@ -146,7 +146,7 @@
out[3] = in[3];
}
-/* felem_sum sets out = out + in. */
+// felem_sum sets out = out + in.
static void felem_sum(felem out, const felem in) {
out[0] += in[0];
out[1] += in[1];
@@ -154,7 +154,7 @@
out[3] += in[3];
}
-/* felem_small_sum sets out = out + in. */
+// felem_small_sum sets out = out + in.
static void felem_small_sum(felem out, const smallfelem in) {
out[0] += in[0];
out[1] += in[1];
@@ -162,7 +162,7 @@
out[3] += in[3];
}
-/* felem_scalar sets out = out * scalar */
+// felem_scalar sets out = out * scalar
static void felem_scalar(felem out, const uint64_t scalar) {
out[0] *= scalar;
out[1] *= scalar;
@@ -170,7 +170,7 @@
out[3] *= scalar;
}
-/* longfelem_scalar sets out = out * scalar */
+// longfelem_scalar sets out = out * scalar
static void longfelem_scalar(longfelem out, const uint64_t scalar) {
out[0] *= scalar;
out[1] *= scalar;
@@ -186,27 +186,27 @@
#define two105 (((limb)1) << 105)
#define two105m41p9 ((((limb)1) << 105) - (((limb)1) << 41) + (((limb)1) << 9))
-/* zero105 is 0 mod p */
+// zero105 is 0 mod p
static const felem zero105 = {two105m41m9, two105, two105m41p9, two105m41p9};
-/* smallfelem_neg sets |out| to |-small|
- * On exit:
- * out[i] < out[i] + 2^105 */
+// smallfelem_neg sets |out| to |-small|
+// On exit:
+// out[i] < out[i] + 2^105
static void smallfelem_neg(felem out, const smallfelem small) {
- /* In order to prevent underflow, we subtract from 0 mod p. */
+ // In order to prevent underflow, we subtract from 0 mod p.
out[0] = zero105[0] - small[0];
out[1] = zero105[1] - small[1];
out[2] = zero105[2] - small[2];
out[3] = zero105[3] - small[3];
}
-/* felem_diff subtracts |in| from |out|
- * On entry:
- * in[i] < 2^104
- * On exit:
- * out[i] < out[i] + 2^105. */
+// felem_diff subtracts |in| from |out|
+// On entry:
+// in[i] < 2^104
+// On exit:
+// out[i] < out[i] + 2^105.
static void felem_diff(felem out, const felem in) {
- /* In order to prevent underflow, we add 0 mod p before subtracting. */
+ // In order to prevent underflow, we add 0 mod p before subtracting.
out[0] += zero105[0];
out[1] += zero105[1];
out[2] += zero105[2];
@@ -224,17 +224,17 @@
#define two107m43p11 \
((((limb)1) << 107) - (((limb)1) << 43) + (((limb)1) << 11))
-/* zero107 is 0 mod p */
+// zero107 is 0 mod p
static const felem zero107 = {two107m43m11, two107, two107m43p11, two107m43p11};
-/* An alternative felem_diff for larger inputs |in|
- * felem_diff_zero107 subtracts |in| from |out|
- * On entry:
- * in[i] < 2^106
- * On exit:
- * out[i] < out[i] + 2^107. */
+// An alternative felem_diff for larger inputs |in|
+// felem_diff_zero107 subtracts |in| from |out|
+// On entry:
+// in[i] < 2^106
+// On exit:
+// out[i] < out[i] + 2^107.
static void felem_diff_zero107(felem out, const felem in) {
- /* In order to prevent underflow, we add 0 mod p before subtracting. */
+ // In order to prevent underflow, we add 0 mod p before subtracting.
out[0] += zero107[0];
out[1] += zero107[1];
out[2] += zero107[2];
@@ -246,11 +246,11 @@
out[3] -= in[3];
}
-/* longfelem_diff subtracts |in| from |out|
- * On entry:
- * in[i] < 7*2^67
- * On exit:
- * out[i] < out[i] + 2^70 + 2^40. */
+// longfelem_diff subtracts |in| from |out|
+// On entry:
+// in[i] < 7*2^67
+// On exit:
+// out[i] < out[i] + 2^70 + 2^40.
static void longfelem_diff(longfelem out, const longfelem in) {
static const limb two70m8p6 =
(((limb)1) << 70) - (((limb)1) << 8) + (((limb)1) << 6);
@@ -260,7 +260,7 @@
(((limb)1) << 38) + (((limb)1) << 6);
static const limb two70m6 = (((limb)1) << 70) - (((limb)1) << 6);
- /* add 0 mod p to avoid underflow */
+ // add 0 mod p to avoid underflow
out[0] += two70m8p6;
out[1] += two70p40;
out[2] += two70;
@@ -270,7 +270,7 @@
out[6] += two70m6;
out[7] += two70m6;
- /* in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 */
+ // in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6
out[0] -= in[0];
out[1] -= in[1];
out[2] -= in[2];
@@ -286,80 +286,80 @@
#define two64m46 ((((limb)1) << 64) - (((limb)1) << 46))
#define two64m32 ((((limb)1) << 64) - (((limb)1) << 32))
-/* zero110 is 0 mod p. */
+// zero110 is 0 mod p.
static const felem zero110 = {two64m0, two110p32m0, two64m46, two64m32};
-/* felem_shrink converts an felem into a smallfelem. The result isn't quite
- * minimal as the value may be greater than p.
- *
- * On entry:
- * in[i] < 2^109
- * On exit:
- * out[i] < 2^64. */
+// felem_shrink converts an felem into a smallfelem. The result isn't quite
+// minimal as the value may be greater than p.
+//
+// On entry:
+// in[i] < 2^109
+// On exit:
+// out[i] < 2^64.
static void felem_shrink(smallfelem out, const felem in) {
felem tmp;
uint64_t a, b, mask;
int64_t high, low;
static const uint64_t kPrime3Test =
- 0x7fffffff00000001ul; /* 2^63 - 2^32 + 1 */
+ 0x7fffffff00000001ul; // 2^63 - 2^32 + 1
- /* Carry 2->3 */
+ // Carry 2->3
tmp[3] = zero110[3] + in[3] + ((uint64_t)(in[2] >> 64));
- /* tmp[3] < 2^110 */
+ // tmp[3] < 2^110
tmp[2] = zero110[2] + (uint64_t)in[2];
tmp[0] = zero110[0] + in[0];
tmp[1] = zero110[1] + in[1];
- /* tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 */
+ // tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65
- /* We perform two partial reductions where we eliminate the high-word of
- * tmp[3]. We don't update the other words till the end. */
- a = tmp[3] >> 64; /* a < 2^46 */
+ // We perform two partial reductions where we eliminate the high-word of
+ // tmp[3]. We don't update the other words till the end.
+ a = tmp[3] >> 64; // a < 2^46
tmp[3] = (uint64_t)tmp[3];
tmp[3] -= a;
tmp[3] += ((limb)a) << 32;
- /* tmp[3] < 2^79 */
+ // tmp[3] < 2^79
b = a;
- a = tmp[3] >> 64; /* a < 2^15 */
- b += a; /* b < 2^46 + 2^15 < 2^47 */
+ a = tmp[3] >> 64; // a < 2^15
+ b += a; // b < 2^46 + 2^15 < 2^47
tmp[3] = (uint64_t)tmp[3];
tmp[3] -= a;
tmp[3] += ((limb)a) << 32;
- /* tmp[3] < 2^64 + 2^47 */
+ // tmp[3] < 2^64 + 2^47
- /* This adjusts the other two words to complete the two partial
- * reductions. */
+ // This adjusts the other two words to complete the two partial
+ // reductions.
tmp[0] += b;
tmp[1] -= (((limb)b) << 32);
- /* In order to make space in tmp[3] for the carry from 2 -> 3, we
- * conditionally subtract kPrime if tmp[3] is large enough. */
+ // In order to make space in tmp[3] for the carry from 2 -> 3, we
+ // conditionally subtract kPrime if tmp[3] is large enough.
high = tmp[3] >> 64;
- /* As tmp[3] < 2^65, high is either 1 or 0 */
+ // As tmp[3] < 2^65, high is either 1 or 0
high = ~(high - 1);
- /* high is:
- * all ones if the high word of tmp[3] is 1
- * all zeros if the high word of tmp[3] if 0 */
+ // high is:
+ // all ones if the high word of tmp[3] is 1
+ // all zeros if the high word of tmp[3] if 0
low = tmp[3];
mask = low >> 63;
- /* mask is:
- * all ones if the MSB of low is 1
- * all zeros if the MSB of low if 0 */
+ // mask is:
+ // all ones if the MSB of low is 1
+ // all zeros if the MSB of low if 0
low &= bottom63bits;
low -= kPrime3Test;
- /* if low was greater than kPrime3Test then the MSB is zero */
+ // if low was greater than kPrime3Test then the MSB is zero
low = ~low;
low >>= 63;
- /* low is:
- * all ones if low was > kPrime3Test
- * all zeros if low was <= kPrime3Test */
+ // low is:
+ // all ones if low was > kPrime3Test
+ // all zeros if low was <= kPrime3Test
mask = (mask & low) | high;
tmp[0] -= mask & kPrime[0];
tmp[1] -= mask & kPrime[1];
- /* kPrime[2] is zero, so omitted */
+ // kPrime[2] is zero, so omitted
tmp[3] -= mask & kPrime[3];
- /* tmp[3] < 2**64 - 2**32 + 1 */
+ // tmp[3] < 2**64 - 2**32 + 1
tmp[1] += ((uint64_t)(tmp[0] >> 64));
tmp[0] = (uint64_t)tmp[0];
@@ -367,7 +367,7 @@
tmp[1] = (uint64_t)tmp[1];
tmp[3] += ((uint64_t)(tmp[2] >> 64));
tmp[2] = (uint64_t)tmp[2];
- /* tmp[i] < 2^64 */
+ // tmp[i] < 2^64
out[0] = tmp[0];
out[1] = tmp[1];
@@ -375,7 +375,7 @@
out[3] = tmp[3];
}
-/* smallfelem_expand converts a smallfelem to an felem */
+// smallfelem_expand converts a smallfelem to an felem
static void smallfelem_expand(felem out, const smallfelem in) {
out[0] = in[0];
out[1] = in[1];
@@ -383,11 +383,11 @@
out[3] = in[3];
}
-/* smallfelem_square sets |out| = |small|^2
- * On entry:
- * small[i] < 2^64
- * On exit:
- * out[i] < 7 * 2^64 < 2^67 */
+// smallfelem_square sets |out| = |small|^2
+// On entry:
+// small[i] < 2^64
+// On exit:
+// out[i] < 7 * 2^64 < 2^67
static void smallfelem_square(longfelem out, const smallfelem small) {
limb a;
uint64_t high, low;
@@ -459,23 +459,23 @@
out[7] = high;
}
-/*felem_square sets |out| = |in|^2
- * On entry:
- * in[i] < 2^109
- * On exit:
- * out[i] < 7 * 2^64 < 2^67. */
+//felem_square sets |out| = |in|^2
+// On entry:
+// in[i] < 2^109
+// On exit:
+// out[i] < 7 * 2^64 < 2^67.
static void felem_square(longfelem out, const felem in) {
uint64_t small[4];
felem_shrink(small, in);
smallfelem_square(out, small);
}
-/* smallfelem_mul sets |out| = |small1| * |small2|
- * On entry:
- * small1[i] < 2^64
- * small2[i] < 2^64
- * On exit:
- * out[i] < 7 * 2^64 < 2^67. */
+// smallfelem_mul sets |out| = |small1| * |small2|
+// On entry:
+// small1[i] < 2^64
+// small2[i] < 2^64
+// On exit:
+// out[i] < 7 * 2^64 < 2^67.
static void smallfelem_mul(longfelem out, const smallfelem small1,
const smallfelem small2) {
limb a;
@@ -578,12 +578,12 @@
out[7] = high;
}
-/* felem_mul sets |out| = |in1| * |in2|
- * On entry:
- * in1[i] < 2^109
- * in2[i] < 2^109
- * On exit:
- * out[i] < 7 * 2^64 < 2^67 */
+// felem_mul sets |out| = |in1| * |in2|
+// On entry:
+// in1[i] < 2^109
+// in2[i] < 2^109
+// On exit:
+// out[i] < 7 * 2^64 < 2^67
static void felem_mul(longfelem out, const felem in1, const felem in2) {
smallfelem small1, small2;
felem_shrink(small1, in1);
@@ -591,12 +591,12 @@
smallfelem_mul(out, small1, small2);
}
-/* felem_small_mul sets |out| = |small1| * |in2|
- * On entry:
- * small1[i] < 2^64
- * in2[i] < 2^109
- * On exit:
- * out[i] < 7 * 2^64 < 2^67 */
+// felem_small_mul sets |out| = |small1| * |in2|
+// On entry:
+// small1[i] < 2^64
+// in2[i] < 2^109
+// On exit:
+// out[i] < 7 * 2^64 < 2^67
static void felem_small_mul(longfelem out, const smallfelem small1,
const felem in2) {
smallfelem small2;
@@ -608,24 +608,24 @@
#define two100 (((limb)1) << 100)
#define two100m36p4 ((((limb)1) << 100) - (((limb)1) << 36) + (((limb)1) << 4))
-/* zero100 is 0 mod p */
+// zero100 is 0 mod p
static const felem zero100 = {two100m36m4, two100, two100m36p4, two100m36p4};
-/* Internal function for the different flavours of felem_reduce.
- * felem_reduce_ reduces the higher coefficients in[4]-in[7].
- * On entry:
- * out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7]
- * out[1] >= in[7] + 2^32*in[4]
- * out[2] >= in[5] + 2^32*in[5]
- * out[3] >= in[4] + 2^32*in[5] + 2^32*in[6]
- * On exit:
- * out[0] <= out[0] + in[4] + 2^32*in[5]
- * out[1] <= out[1] + in[5] + 2^33*in[6]
- * out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
- * out[3] <= out[3] + 2^32*in[4] + 3*in[7] */
+// Internal function for the different flavours of felem_reduce.
+// felem_reduce_ reduces the higher coefficients in[4]-in[7].
+// On entry:
+// out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7]
+// out[1] >= in[7] + 2^32*in[4]
+// out[2] >= in[5] + 2^32*in[5]
+// out[3] >= in[4] + 2^32*in[5] + 2^32*in[6]
+// On exit:
+// out[0] <= out[0] + in[4] + 2^32*in[5]
+// out[1] <= out[1] + in[5] + 2^33*in[6]
+// out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
+// out[3] <= out[3] + 2^32*in[4] + 3*in[7]
static void felem_reduce_(felem out, const longfelem in) {
int128_t c;
- /* combine common terms from below */
+ // combine common terms from below
c = in[4] + (in[5] << 32);
out[0] += c;
out[3] -= c;
@@ -634,35 +634,35 @@
out[1] += c;
out[2] -= c;
- /* the remaining terms */
- /* 256: [(0,1),(96,-1),(192,-1),(224,1)] */
+ // the remaining terms
+ // 256: [(0,1),(96,-1),(192,-1),(224,1)]
out[1] -= (in[4] << 32);
out[3] += (in[4] << 32);
- /* 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] */
+ // 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)]
out[2] -= (in[5] << 32);
- /* 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] */
+ // 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)]
out[0] -= in[6];
out[0] -= (in[6] << 32);
out[1] += (in[6] << 33);
out[2] += (in[6] * 2);
out[3] -= (in[6] << 32);
- /* 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] */
+ // 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)]
out[0] -= in[7];
out[0] -= (in[7] << 32);
out[2] += (in[7] << 33);
out[3] += (in[7] * 3);
}
-/* felem_reduce converts a longfelem into an felem.
- * To be called directly after felem_square or felem_mul.
- * On entry:
- * in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64
- * in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64
- * On exit:
- * out[i] < 2^101 */
+// felem_reduce converts a longfelem into an felem.
+// To be called directly after felem_square or felem_mul.
+// On entry:
+// in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64
+// in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64
+// On exit:
+// out[i] < 2^101
static void felem_reduce(felem out, const longfelem in) {
out[0] = zero100[0] + in[0];
out[1] = zero100[1] + in[1];
@@ -671,22 +671,22 @@
felem_reduce_(out, in);
- /* out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
- * out[1] > 2^100 - 2^64 - 7*2^96 > 0
- * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
- * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
- *
- * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
- * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
- * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
- * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101 */
+ // out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
+ // out[1] > 2^100 - 2^64 - 7*2^96 > 0
+ // out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
+ // out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
+ //
+ // out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
+ // out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
+ // out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
+ // out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
}
-/* felem_reduce_zero105 converts a larger longfelem into an felem.
- * On entry:
- * in[0] < 2^71
- * On exit:
- * out[i] < 2^106 */
+// felem_reduce_zero105 converts a larger longfelem into an felem.
+// On entry:
+// in[0] < 2^71
+// On exit:
+// out[i] < 2^106
static void felem_reduce_zero105(felem out, const longfelem in) {
out[0] = zero105[0] + in[0];
out[1] = zero105[1] + in[1];
@@ -695,19 +695,19 @@
felem_reduce_(out, in);
- /* out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
- * out[1] > 2^105 - 2^71 - 2^103 > 0
- * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
- * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
- *
- * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
- * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
- * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
- * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106 */
+ // out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
+ // out[1] > 2^105 - 2^71 - 2^103 > 0
+ // out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
+ // out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
+ //
+ // out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
+ // out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
+ // out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
+ // out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
}
-/* subtract_u64 sets *result = *result - v and *carry to one if the
- * subtraction underflowed. */
+// subtract_u64 sets *result = *result - v and *carry to one if the
+// subtraction underflowed.
static void subtract_u64(uint64_t *result, uint64_t *carry, uint64_t v) {
uint128_t r = *result;
r -= v;
@@ -715,28 +715,28 @@
*result = (uint64_t)r;
}
-/* felem_contract converts |in| to its unique, minimal representation. On
- * entry: in[i] < 2^109. */
+// felem_contract converts |in| to its unique, minimal representation. On
+// entry: in[i] < 2^109.
static void felem_contract(smallfelem out, const felem in) {
uint64_t all_equal_so_far = 0, result = 0;
felem_shrink(out, in);
- /* small is minimal except that the value might be > p */
+ // small is minimal except that the value might be > p
all_equal_so_far--;
- /* We are doing a constant time test if out >= kPrime. We need to compare
- * each uint64_t, from most-significant to least significant. For each one, if
- * all words so far have been equal (m is all ones) then a non-equal
- * result is the answer. Otherwise we continue. */
+ // We are doing a constant time test if out >= kPrime. We need to compare
+ // each uint64_t, from most-significant to least significant. For each one, if
+ // all words so far have been equal (m is all ones) then a non-equal
+ // result is the answer. Otherwise we continue.
for (size_t i = 3; i < 4; i--) {
uint64_t equal;
uint128_t a = ((uint128_t)kPrime[i]) - out[i];
- /* if out[i] > kPrime[i] then a will underflow and the high 64-bits
- * will all be set. */
+ // if out[i] > kPrime[i] then a will underflow and the high 64-bits
+ // will all be set.
result |= all_equal_so_far & ((uint64_t)(a >> 64));
- /* if kPrime[i] == out[i] then |equal| will be all zeros and the
- * decrement will make it all ones. */
+ // if kPrime[i] == out[i] then |equal| will be all zeros and the
+ // decrement will make it all ones.
equal = kPrime[i] ^ out[i];
equal--;
equal &= equal << 32;
@@ -750,11 +750,11 @@
all_equal_so_far &= equal;
}
- /* if all_equal_so_far is still all ones then the two values are equal
- * and so out >= kPrime is true. */
+ // if all_equal_so_far is still all ones then the two values are equal
+ // and so out >= kPrime is true.
result |= all_equal_so_far;
- /* if out >= kPrime then we subtract kPrime. */
+ // if out >= kPrime then we subtract kPrime.
uint64_t carry;
subtract_u64(&out[0], &carry, result & kPrime[0]);
subtract_u64(&out[1], &carry, carry);
@@ -771,10 +771,10 @@
subtract_u64(&out[3], &carry, result & kPrime[3]);
}
-/* felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
- * otherwise.
- * On entry:
- * small[i] < 2^64 */
+// felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
+// otherwise.
+// On entry:
+// small[i] < 2^64
static limb smallfelem_is_zero(const smallfelem small) {
limb result;
uint64_t is_p;
@@ -807,118 +807,118 @@
return result;
}
-/* felem_inv calculates |out| = |in|^{-1}
- *
- * Based on Fermat's Little Theorem:
- * a^p = a (mod p)
- * a^{p-1} = 1 (mod p)
- * a^{p-2} = a^{-1} (mod p) */
+// felem_inv calculates |out| = |in|^{-1}
+//
+// Based on Fermat's Little Theorem:
+// a^p = a (mod p)
+// a^{p-1} = 1 (mod p)
+// a^{p-2} = a^{-1} (mod p)
static void felem_inv(felem out, const felem in) {
felem ftmp, ftmp2;
- /* each e_I will hold |in|^{2^I - 1} */
+ // each e_I will hold |in|^{2^I - 1}
felem e2, e4, e8, e16, e32, e64;
longfelem tmp;
felem_square(tmp, in);
- felem_reduce(ftmp, tmp); /* 2^1 */
+ felem_reduce(ftmp, tmp); // 2^1
felem_mul(tmp, in, ftmp);
- felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^2 - 2^0
felem_assign(e2, ftmp);
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */
+ felem_reduce(ftmp, tmp); // 2^3 - 2^1
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^4 - 2^2 */
+ felem_reduce(ftmp, tmp); // 2^4 - 2^2
felem_mul(tmp, ftmp, e2);
- felem_reduce(ftmp, tmp); /* 2^4 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^4 - 2^0
felem_assign(e4, ftmp);
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^5 - 2^1 */
+ felem_reduce(ftmp, tmp); // 2^5 - 2^1
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^6 - 2^2 */
+ felem_reduce(ftmp, tmp); // 2^6 - 2^2
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^7 - 2^3 */
+ felem_reduce(ftmp, tmp); // 2^7 - 2^3
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^8 - 2^4 */
+ felem_reduce(ftmp, tmp); // 2^8 - 2^4
felem_mul(tmp, ftmp, e4);
- felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^8 - 2^0
felem_assign(e8, ftmp);
for (size_t i = 0; i < 8; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- } /* 2^16 - 2^8 */
+ } // 2^16 - 2^8
felem_mul(tmp, ftmp, e8);
- felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^16 - 2^0
felem_assign(e16, ftmp);
for (size_t i = 0; i < 16; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- } /* 2^32 - 2^16 */
+ } // 2^32 - 2^16
felem_mul(tmp, ftmp, e16);
- felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^32 - 2^0
felem_assign(e32, ftmp);
for (size_t i = 0; i < 32; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- } /* 2^64 - 2^32 */
+ } // 2^64 - 2^32
felem_assign(e64, ftmp);
felem_mul(tmp, ftmp, in);
- felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */
+ felem_reduce(ftmp, tmp); // 2^64 - 2^32 + 2^0
for (size_t i = 0; i < 192; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- } /* 2^256 - 2^224 + 2^192 */
+ } // 2^256 - 2^224 + 2^192
felem_mul(tmp, e64, e32);
- felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^64 - 2^0
for (size_t i = 0; i < 16; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
- } /* 2^80 - 2^16 */
+ } // 2^80 - 2^16
felem_mul(tmp, ftmp2, e16);
- felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^80 - 2^0
for (size_t i = 0; i < 8; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
- } /* 2^88 - 2^8 */
+ } // 2^88 - 2^8
felem_mul(tmp, ftmp2, e8);
- felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^88 - 2^0
for (size_t i = 0; i < 4; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
- } /* 2^92 - 2^4 */
+ } // 2^92 - 2^4
felem_mul(tmp, ftmp2, e4);
- felem_reduce(ftmp2, tmp); /* 2^92 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^92 - 2^0
felem_square(tmp, ftmp2);
- felem_reduce(ftmp2, tmp); /* 2^93 - 2^1 */
+ felem_reduce(ftmp2, tmp); // 2^93 - 2^1
felem_square(tmp, ftmp2);
- felem_reduce(ftmp2, tmp); /* 2^94 - 2^2 */
+ felem_reduce(ftmp2, tmp); // 2^94 - 2^2
felem_mul(tmp, ftmp2, e2);
- felem_reduce(ftmp2, tmp); /* 2^94 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^94 - 2^0
felem_square(tmp, ftmp2);
- felem_reduce(ftmp2, tmp); /* 2^95 - 2^1 */
+ felem_reduce(ftmp2, tmp); // 2^95 - 2^1
felem_square(tmp, ftmp2);
- felem_reduce(ftmp2, tmp); /* 2^96 - 2^2 */
+ felem_reduce(ftmp2, tmp); // 2^96 - 2^2
felem_mul(tmp, ftmp2, in);
- felem_reduce(ftmp2, tmp); /* 2^96 - 3 */
+ felem_reduce(ftmp2, tmp); // 2^96 - 3
felem_mul(tmp, ftmp2, ftmp);
- felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */
+ felem_reduce(out, tmp); // 2^256 - 2^224 + 2^192 + 2^96 - 3
}
-/* Group operations
- * ----------------
- *
- * Building on top of the field operations we have the operations on the
- * elliptic curve group itself. Points on the curve are represented in Jacobian
- * coordinates. */
+// Group operations
+// ----------------
+//
+// Building on top of the field operations we have the operations on the
+// elliptic curve group itself. Points on the curve are represented in Jacobian
+// coordinates.
-/* point_double calculates 2*(x_in, y_in, z_in)
- *
- * The method is taken from:
- * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
- *
- * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
- * while x_out == y_in is not (maybe this works, but it's not tested). */
+// point_double calculates 2*(x_in, y_in, z_in)
+//
+// The method is taken from:
+// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
+//
+// Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
+// while x_out == y_in is not (maybe this works, but it's not tested).
static void point_double(felem x_out, felem y_out, felem z_out,
const felem x_in, const felem y_in, const felem z_in) {
longfelem tmp, tmp2;
@@ -926,77 +926,77 @@
smallfelem small1, small2;
felem_assign(ftmp, x_in);
- /* ftmp[i] < 2^106 */
+ // ftmp[i] < 2^106
felem_assign(ftmp2, x_in);
- /* ftmp2[i] < 2^106 */
+ // ftmp2[i] < 2^106
- /* delta = z^2 */
+ // delta = z^2
felem_square(tmp, z_in);
felem_reduce(delta, tmp);
- /* delta[i] < 2^101 */
+ // delta[i] < 2^101
- /* gamma = y^2 */
+ // gamma = y^2
felem_square(tmp, y_in);
felem_reduce(gamma, tmp);
- /* gamma[i] < 2^101 */
+ // gamma[i] < 2^101
felem_shrink(small1, gamma);
- /* beta = x*gamma */
+ // beta = x*gamma
felem_small_mul(tmp, small1, x_in);
felem_reduce(beta, tmp);
- /* beta[i] < 2^101 */
+ // beta[i] < 2^101
- /* alpha = 3*(x-delta)*(x+delta) */
+ // alpha = 3*(x-delta)*(x+delta)
felem_diff(ftmp, delta);
- /* ftmp[i] < 2^105 + 2^106 < 2^107 */
+ // ftmp[i] < 2^105 + 2^106 < 2^107
felem_sum(ftmp2, delta);
- /* ftmp2[i] < 2^105 + 2^106 < 2^107 */
+ // ftmp2[i] < 2^105 + 2^106 < 2^107
felem_scalar(ftmp2, 3);
- /* ftmp2[i] < 3 * 2^107 < 2^109 */
+ // ftmp2[i] < 3 * 2^107 < 2^109
felem_mul(tmp, ftmp, ftmp2);
felem_reduce(alpha, tmp);
- /* alpha[i] < 2^101 */
+ // alpha[i] < 2^101
felem_shrink(small2, alpha);
- /* x' = alpha^2 - 8*beta */
+ // x' = alpha^2 - 8*beta
smallfelem_square(tmp, small2);
felem_reduce(x_out, tmp);
felem_assign(ftmp, beta);
felem_scalar(ftmp, 8);
- /* ftmp[i] < 8 * 2^101 = 2^104 */
+ // ftmp[i] < 8 * 2^101 = 2^104
felem_diff(x_out, ftmp);
- /* x_out[i] < 2^105 + 2^101 < 2^106 */
+ // x_out[i] < 2^105 + 2^101 < 2^106
- /* z' = (y + z)^2 - gamma - delta */
+ // z' = (y + z)^2 - gamma - delta
felem_sum(delta, gamma);
- /* delta[i] < 2^101 + 2^101 = 2^102 */
+ // delta[i] < 2^101 + 2^101 = 2^102
felem_assign(ftmp, y_in);
felem_sum(ftmp, z_in);
- /* ftmp[i] < 2^106 + 2^106 = 2^107 */
+ // ftmp[i] < 2^106 + 2^106 = 2^107
felem_square(tmp, ftmp);
felem_reduce(z_out, tmp);
felem_diff(z_out, delta);
- /* z_out[i] < 2^105 + 2^101 < 2^106 */
+ // z_out[i] < 2^105 + 2^101 < 2^106
- /* y' = alpha*(4*beta - x') - 8*gamma^2 */
+ // y' = alpha*(4*beta - x') - 8*gamma^2
felem_scalar(beta, 4);
- /* beta[i] < 4 * 2^101 = 2^103 */
+ // beta[i] < 4 * 2^101 = 2^103
felem_diff_zero107(beta, x_out);
- /* beta[i] < 2^107 + 2^103 < 2^108 */
+ // beta[i] < 2^107 + 2^103 < 2^108
felem_small_mul(tmp, small2, beta);
- /* tmp[i] < 7 * 2^64 < 2^67 */
+ // tmp[i] < 7 * 2^64 < 2^67
smallfelem_square(tmp2, small1);
- /* tmp2[i] < 7 * 2^64 */
+ // tmp2[i] < 7 * 2^64
longfelem_scalar(tmp2, 8);
- /* tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 */
+ // tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67
longfelem_diff(tmp, tmp2);
- /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
+ // tmp[i] < 2^67 + 2^70 + 2^40 < 2^71
felem_reduce_zero105(y_out, tmp);
- /* y_out[i] < 2^106 */
+ // y_out[i] < 2^106
}
-/* point_double_small is the same as point_double, except that it operates on
- * smallfelems. */
+// point_double_small is the same as point_double, except that it operates on
+// smallfelems.
static void point_double_small(smallfelem x_out, smallfelem y_out,
smallfelem z_out, const smallfelem x_in,
const smallfelem y_in, const smallfelem z_in) {
@@ -1013,7 +1013,7 @@
felem_shrink(z_out, felem_z_out);
}
-/* p256_copy_conditional copies in to out iff mask is all ones. */
+// p256_copy_conditional copies in to out iff mask is all ones.
static void p256_copy_conditional(felem out, const felem in, limb mask) {
for (size_t i = 0; i < NLIMBS; ++i) {
const limb tmp = mask & (in[i] ^ out[i]);
@@ -1021,7 +1021,7 @@
}
}
-/* copy_small_conditional copies in to out iff mask is all ones. */
+// copy_small_conditional copies in to out iff mask is all ones.
static void copy_small_conditional(felem out, const smallfelem in, limb mask) {
const uint64_t mask64 = mask;
for (size_t i = 0; i < NLIMBS; ++i) {
@@ -1029,16 +1029,16 @@
}
}
-/* point_add calcuates (x1, y1, z1) + (x2, y2, z2)
- *
- * The method is taken from:
- * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
- * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
- *
- * This function includes a branch for checking whether the two input points
- * are equal, (while not equal to the point at infinity). This case never
- * happens during single point multiplication, so there is no timing leak for
- * ECDH or ECDSA signing. */
+// point_add calcuates (x1, y1, z1) + (x2, y2, z2)
+//
+// The method is taken from:
+// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
+// adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
+//
+// This function includes a branch for checking whether the two input points
+// are equal, (while not equal to the point at infinity). This case never
+// happens during single point multiplication, so there is no timing leak for
+// ECDH or ECDSA signing.
static void point_add(felem x3, felem y3, felem z3, const felem x1,
const felem y1, const felem z1, const int mixed,
const smallfelem x2, const smallfelem y2,
@@ -1053,94 +1053,94 @@
z1_is_zero = smallfelem_is_zero(small3);
z2_is_zero = smallfelem_is_zero(z2);
- /* ftmp = z1z1 = z1**2 */
+ // ftmp = z1z1 = z1**2
smallfelem_square(tmp, small3);
felem_reduce(ftmp, tmp);
- /* ftmp[i] < 2^101 */
+ // ftmp[i] < 2^101
felem_shrink(small1, ftmp);
if (!mixed) {
- /* ftmp2 = z2z2 = z2**2 */
+ // ftmp2 = z2z2 = z2**2
smallfelem_square(tmp, z2);
felem_reduce(ftmp2, tmp);
- /* ftmp2[i] < 2^101 */
+ // ftmp2[i] < 2^101
felem_shrink(small2, ftmp2);
felem_shrink(small5, x1);
- /* u1 = ftmp3 = x1*z2z2 */
+ // u1 = ftmp3 = x1*z2z2
smallfelem_mul(tmp, small5, small2);
felem_reduce(ftmp3, tmp);
- /* ftmp3[i] < 2^101 */
+ // ftmp3[i] < 2^101
- /* ftmp5 = z1 + z2 */
+ // ftmp5 = z1 + z2
felem_assign(ftmp5, z1);
felem_small_sum(ftmp5, z2);
- /* ftmp5[i] < 2^107 */
+ // ftmp5[i] < 2^107
- /* ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 */
+ // ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2
felem_square(tmp, ftmp5);
felem_reduce(ftmp5, tmp);
- /* ftmp2 = z2z2 + z1z1 */
+ // ftmp2 = z2z2 + z1z1
felem_sum(ftmp2, ftmp);
- /* ftmp2[i] < 2^101 + 2^101 = 2^102 */
+ // ftmp2[i] < 2^101 + 2^101 = 2^102
felem_diff(ftmp5, ftmp2);
- /* ftmp5[i] < 2^105 + 2^101 < 2^106 */
+ // ftmp5[i] < 2^105 + 2^101 < 2^106
- /* ftmp2 = z2 * z2z2 */
+ // ftmp2 = z2 * z2z2
smallfelem_mul(tmp, small2, z2);
felem_reduce(ftmp2, tmp);
- /* s1 = ftmp2 = y1 * z2**3 */
+ // s1 = ftmp2 = y1 * z2**3
felem_mul(tmp, y1, ftmp2);
felem_reduce(ftmp6, tmp);
- /* ftmp6[i] < 2^101 */
+ // ftmp6[i] < 2^101
} else {
- /* We'll assume z2 = 1 (special case z2 = 0 is handled later). */
+ // We'll assume z2 = 1 (special case z2 = 0 is handled later).
- /* u1 = ftmp3 = x1*z2z2 */
+ // u1 = ftmp3 = x1*z2z2
felem_assign(ftmp3, x1);
- /* ftmp3[i] < 2^106 */
+ // ftmp3[i] < 2^106
- /* ftmp5 = 2z1z2 */
+ // ftmp5 = 2z1z2
felem_assign(ftmp5, z1);
felem_scalar(ftmp5, 2);
- /* ftmp5[i] < 2*2^106 = 2^107 */
+ // ftmp5[i] < 2*2^106 = 2^107
- /* s1 = ftmp2 = y1 * z2**3 */
+ // s1 = ftmp2 = y1 * z2**3
felem_assign(ftmp6, y1);
- /* ftmp6[i] < 2^106 */
+ // ftmp6[i] < 2^106
}
- /* u2 = x2*z1z1 */
+ // u2 = x2*z1z1
smallfelem_mul(tmp, x2, small1);
felem_reduce(ftmp4, tmp);
- /* h = ftmp4 = u2 - u1 */
+ // h = ftmp4 = u2 - u1
felem_diff_zero107(ftmp4, ftmp3);
- /* ftmp4[i] < 2^107 + 2^101 < 2^108 */
+ // ftmp4[i] < 2^107 + 2^101 < 2^108
felem_shrink(small4, ftmp4);
x_equal = smallfelem_is_zero(small4);
- /* z_out = ftmp5 * h */
+ // z_out = ftmp5 * h
felem_small_mul(tmp, small4, ftmp5);
felem_reduce(z_out, tmp);
- /* z_out[i] < 2^101 */
+ // z_out[i] < 2^101
- /* ftmp = z1 * z1z1 */
+ // ftmp = z1 * z1z1
smallfelem_mul(tmp, small1, small3);
felem_reduce(ftmp, tmp);
- /* s2 = tmp = y2 * z1**3 */
+ // s2 = tmp = y2 * z1**3
felem_small_mul(tmp, y2, ftmp);
felem_reduce(ftmp5, tmp);
- /* r = ftmp5 = (s2 - s1)*2 */
+ // r = ftmp5 = (s2 - s1)*2
felem_diff_zero107(ftmp5, ftmp6);
- /* ftmp5[i] < 2^107 + 2^107 = 2^108 */
+ // ftmp5[i] < 2^107 + 2^107 = 2^108
felem_scalar(ftmp5, 2);
- /* ftmp5[i] < 2^109 */
+ // ftmp5[i] < 2^109
felem_shrink(small1, ftmp5);
y_equal = smallfelem_is_zero(small1);
@@ -1149,42 +1149,42 @@
return;
}
- /* I = ftmp = (2h)**2 */
+ // I = ftmp = (2h)**2
felem_assign(ftmp, ftmp4);
felem_scalar(ftmp, 2);
- /* ftmp[i] < 2*2^108 = 2^109 */
+ // ftmp[i] < 2*2^108 = 2^109
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- /* J = ftmp2 = h * I */
+ // J = ftmp2 = h * I
felem_mul(tmp, ftmp4, ftmp);
felem_reduce(ftmp2, tmp);
- /* V = ftmp4 = U1 * I */
+ // V = ftmp4 = U1 * I
felem_mul(tmp, ftmp3, ftmp);
felem_reduce(ftmp4, tmp);
- /* x_out = r**2 - J - 2V */
+ // x_out = r**2 - J - 2V
smallfelem_square(tmp, small1);
felem_reduce(x_out, tmp);
felem_assign(ftmp3, ftmp4);
felem_scalar(ftmp4, 2);
felem_sum(ftmp4, ftmp2);
- /* ftmp4[i] < 2*2^101 + 2^101 < 2^103 */
+ // ftmp4[i] < 2*2^101 + 2^101 < 2^103
felem_diff(x_out, ftmp4);
- /* x_out[i] < 2^105 + 2^101 */
+ // x_out[i] < 2^105 + 2^101
- /* y_out = r(V-x_out) - 2 * s1 * J */
+ // y_out = r(V-x_out) - 2 * s1 * J
felem_diff_zero107(ftmp3, x_out);
- /* ftmp3[i] < 2^107 + 2^101 < 2^108 */
+ // ftmp3[i] < 2^107 + 2^101 < 2^108
felem_small_mul(tmp, small1, ftmp3);
felem_mul(tmp2, ftmp6, ftmp2);
longfelem_scalar(tmp2, 2);
- /* tmp2[i] < 2*2^67 = 2^68 */
+ // tmp2[i] < 2*2^67 = 2^68
longfelem_diff(tmp, tmp2);
- /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
+ // tmp[i] < 2^67 + 2^70 + 2^40 < 2^71
felem_reduce_zero105(y_out, tmp);
- /* y_out[i] < 2^106 */
+ // y_out[i] < 2^106
copy_small_conditional(x_out, x2, z1_is_zero);
p256_copy_conditional(x_out, x1, z2_is_zero);
@@ -1197,8 +1197,8 @@
felem_assign(z3, z_out);
}
-/* point_add_small is the same as point_add, except that it operates on
- * smallfelems. */
+// point_add_small is the same as point_add, except that it operates on
+// smallfelems.
static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3,
smallfelem x1, smallfelem y1, smallfelem z1,
smallfelem x2, smallfelem y2, smallfelem z2) {
@@ -1214,42 +1214,42 @@
felem_shrink(z3, felem_z3);
}
-/* Base point pre computation
- * --------------------------
- *
- * Two different sorts of precomputed tables are used in the following code.
- * Each contain various points on the curve, where each point is three field
- * elements (x, y, z).
- *
- * For the base point table, z is usually 1 (0 for the point at infinity).
- * This table has 2 * 16 elements, starting with the following:
- * index | bits | point
- * ------+---------+------------------------------
- * 0 | 0 0 0 0 | 0G
- * 1 | 0 0 0 1 | 1G
- * 2 | 0 0 1 0 | 2^64G
- * 3 | 0 0 1 1 | (2^64 + 1)G
- * 4 | 0 1 0 0 | 2^128G
- * 5 | 0 1 0 1 | (2^128 + 1)G
- * 6 | 0 1 1 0 | (2^128 + 2^64)G
- * 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G
- * 8 | 1 0 0 0 | 2^192G
- * 9 | 1 0 0 1 | (2^192 + 1)G
- * 10 | 1 0 1 0 | (2^192 + 2^64)G
- * 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G
- * 12 | 1 1 0 0 | (2^192 + 2^128)G
- * 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G
- * 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G
- * 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G
- * followed by a copy of this with each element multiplied by 2^32.
- *
- * The reason for this is so that we can clock bits into four different
- * locations when doing simple scalar multiplies against the base point,
- * and then another four locations using the second 16 elements.
- *
- * Tables for other points have table[i] = iG for i in 0 .. 16. */
+// Base point pre computation
+// --------------------------
+//
+// Two different sorts of precomputed tables are used in the following code.
+// Each contain various points on the curve, where each point is three field
+// elements (x, y, z).
+//
+// For the base point table, z is usually 1 (0 for the point at infinity).
+// This table has 2 * 16 elements, starting with the following:
+// index | bits | point
+// ------+---------+------------------------------
+// 0 | 0 0 0 0 | 0G
+// 1 | 0 0 0 1 | 1G
+// 2 | 0 0 1 0 | 2^64G
+// 3 | 0 0 1 1 | (2^64 + 1)G
+// 4 | 0 1 0 0 | 2^128G
+// 5 | 0 1 0 1 | (2^128 + 1)G
+// 6 | 0 1 1 0 | (2^128 + 2^64)G
+// 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G
+// 8 | 1 0 0 0 | 2^192G
+// 9 | 1 0 0 1 | (2^192 + 1)G
+// 10 | 1 0 1 0 | (2^192 + 2^64)G
+// 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G
+// 12 | 1 1 0 0 | (2^192 + 2^128)G
+// 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G
+// 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G
+// 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G
+// followed by a copy of this with each element multiplied by 2^32.
+//
+// The reason for this is so that we can clock bits into four different
+// locations when doing simple scalar multiplies against the base point,
+// and then another four locations using the second 16 elements.
+//
+// Tables for other points have table[i] = iG for i in 0 .. 16.
-/* g_pre_comp is the table of precomputed base points */
+// g_pre_comp is the table of precomputed base points
static const smallfelem g_pre_comp[2][16][3] = {
{{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}},
{{0xf4a13945d898c296, 0x77037d812deb33a0, 0xf8bce6e563a440f2,
@@ -1404,8 +1404,8 @@
0x4ab5b6b2b8753f81},
{1, 0, 0, 0}}}};
-/* select_point selects the |idx|th point from a precomputation table and
- * copies it to out. */
+// select_point selects the |idx|th point from a precomputation table and
+// copies it to out.
static void select_point(const uint64_t idx, size_t size,
const smallfelem pre_comp[/*size*/][3],
smallfelem out[3]) {
@@ -1426,7 +1426,7 @@
}
}
-/* get_bit returns the |i|th bit in |in| */
+// get_bit returns the |i|th bit in |in|
static char get_bit(const felem_bytearray in, int i) {
if (i < 0 || i >= 256) {
return 0;
@@ -1434,11 +1434,11 @@
return (in[i >> 3] >> (i & 7)) & 1;
}
-/* Interleaved point multiplication using precomputed point multiples: The
- * small point multiples 0*P, 1*P, ..., 17*P are in p_pre_comp, the scalar
- * in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple
- * of the generator, using certain (large) precomputed multiples in g_pre_comp.
- * Output point (X, Y, Z) is stored in x_out, y_out, z_out. */
+// Interleaved point multiplication using precomputed point multiples: The
+// small point multiples 0*P, 1*P, ..., 17*P are in p_pre_comp, the scalar
+// in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple
+// of the generator, using certain (large) precomputed multiples in g_pre_comp.
+// Output point (X, Y, Z) is stored in x_out, y_out, z_out.
static void batch_mul(felem x_out, felem y_out, felem z_out,
const uint8_t *p_scalar, const uint8_t *g_scalar,
const smallfelem p_pre_comp[17][3]) {
@@ -1447,29 +1447,29 @@
uint64_t bits;
uint8_t sign, digit;
- /* set nq to the point at infinity */
+ // set nq to the point at infinity
OPENSSL_memset(nq, 0, 3 * sizeof(felem));
- /* Loop over both scalars msb-to-lsb, interleaving additions of multiples
- * of the generator (two in each of the last 32 rounds) and additions of p
- * (every 5th round). */
+ // Loop over both scalars msb-to-lsb, interleaving additions of multiples
+ // of the generator (two in each of the last 32 rounds) and additions of p
+ // (every 5th round).
- int skip = 1; /* save two point operations in the first round */
+ int skip = 1; // save two point operations in the first round
size_t i = p_scalar != NULL ? 255 : 31;
for (;;) {
- /* double */
+ // double
if (!skip) {
point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
}
- /* add multiples of the generator */
+ // add multiples of the generator
if (g_scalar != NULL && i <= 31) {
- /* first, look 32 bits upwards */
+ // first, look 32 bits upwards
bits = get_bit(g_scalar, i + 224) << 3;
bits |= get_bit(g_scalar, i + 160) << 2;
bits |= get_bit(g_scalar, i + 96) << 1;
bits |= get_bit(g_scalar, i + 32);
- /* select the point to add, in constant time */
+ // select the point to add, in constant time
select_point(bits, 16, g_pre_comp[1], tmp);
if (!skip) {
@@ -1482,18 +1482,18 @@
skip = 0;
}
- /* second, look at the current position */
+ // second, look at the current position
bits = get_bit(g_scalar, i + 192) << 3;
bits |= get_bit(g_scalar, i + 128) << 2;
bits |= get_bit(g_scalar, i + 64) << 1;
bits |= get_bit(g_scalar, i);
- /* select the point to add, in constant time */
+ // select the point to add, in constant time
select_point(bits, 16, g_pre_comp[0], tmp);
point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0],
tmp[1], tmp[2]);
}
- /* do other additions every 5 doublings */
+ // do other additions every 5 doublings
if (p_scalar != NULL && i % 5 == 0) {
bits = get_bit(p_scalar, i + 4) << 5;
bits |= get_bit(p_scalar, i + 3) << 4;
@@ -1503,10 +1503,10 @@
bits |= get_bit(p_scalar, i - 1);
ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
- /* select the point to add or subtract, in constant time. */
+ // select the point to add or subtract, in constant time.
select_point(digit, 17, p_pre_comp, tmp);
- smallfelem_neg(ftmp, tmp[1]); /* (X, -Y, Z) is the negative
- * point */
+ smallfelem_neg(ftmp, tmp[1]); // (X, -Y, Z) is the negative
+ // point
copy_small_conditional(ftmp, tmp[1], (((limb)sign) - 1));
felem_contract(tmp[1], ftmp);
@@ -1531,13 +1531,10 @@
felem_assign(z_out, nq[2]);
}
-/******************************************************************************/
-/*
- * OPENSSL EC_METHOD FUNCTIONS
- */
+// OPENSSL EC_METHOD FUNCTIONS
-/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
- * (X/Z^2, Y/Z^3). */
+// Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
+// (X/Z^2, Y/Z^3).
static int ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP *group,
const EC_POINT *point,
BIGNUM *x, BIGNUM *y,
@@ -1612,14 +1609,14 @@
}
if (p != NULL && p_scalar != NULL) {
- /* We treat NULL scalars as 0, and NULL points as points at infinity, i.e.,
- * they contribute nothing to the linear combination. */
+ // We treat NULL scalars as 0, and NULL points as points at infinity, i.e.,
+ // they contribute nothing to the linear combination.
OPENSSL_memset(&p_secret, 0, sizeof(p_secret));
OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp));
size_t num_bytes;
- /* Reduce g_scalar to 0 <= g_scalar < 2^256. */
+ // Reduce g_scalar to 0 <= g_scalar < 2^256.
if (BN_num_bits(p_scalar) > 256 || BN_is_negative(p_scalar)) {
- /* This is an unusual input, and we don't guarantee constant-timeness. */
+ // This is an unusual input, and we don't guarantee constant-timeness.
if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx)) {
OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB);
goto err;
@@ -1629,7 +1626,7 @@
num_bytes = BN_bn2bin(p_scalar, tmp);
}
flip_endian(p_secret, tmp, num_bytes);
- /* Precompute multiples. */
+ // Precompute multiples.
if (!BN_to_felem(x_out, &p->X) ||
!BN_to_felem(y_out, &p->Y) ||
!BN_to_felem(z_out, &p->Z)) {
@@ -1657,10 +1654,10 @@
size_t num_bytes;
OPENSSL_memset(g_secret, 0, sizeof(g_secret));
- /* reduce g_scalar to 0 <= g_scalar < 2^256 */
+ // reduce g_scalar to 0 <= g_scalar < 2^256
if (BN_num_bits(g_scalar) > 256 || BN_is_negative(g_scalar)) {
- /* this is an unusual input, and we don't guarantee
- * constant-timeness. */
+ // this is an unusual input, and we don't guarantee
+ // constant-timeness.
if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) {
OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB);
goto err;
@@ -1676,7 +1673,7 @@
g_scalar != NULL ? g_secret : NULL,
(const smallfelem(*)[3]) &p_pre_comp);
- /* reduce the output to its unique minimal representation */
+ // reduce the output to its unique minimal representation
felem_contract(x_in, x_out);
felem_contract(y_in, y_out);
felem_contract(z_in, z_out);
@@ -1708,4 +1705,4 @@
out->field_decode = NULL;
};
-#endif /* 64_BIT && !WINDOWS */
+#endif // 64_BIT && !WINDOWS
diff --git a/crypto/fipsmodule/ec/p256-x86_64-table.h b/crypto/fipsmodule/ec/p256-x86_64-table.h
index e4705f8..575a203 100644
--- a/crypto/fipsmodule/ec/p256-x86_64-table.h
+++ b/crypto/fipsmodule/ec/p256-x86_64-table.h
@@ -12,17 +12,17 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
-/* This is the precomputed constant time access table for the code in
- * p256-x86_64.c, for the default generator. The table consists of 37
- * subtables, each subtable contains 64 affine points. The affine points are
- * encoded as eight uint64's, four for the x coordinate and four for the y.
- * Both values are in little-endian order. There are 37 tables because a
- * signed, 6-bit wNAF form of the scalar is used and ceil(256/(6 + 1)) = 37.
- * Within each table there are 64 values because the 6-bit wNAF value can take
- * 64 values, ignoring the sign bit, which is implemented by performing a
- * negation of the affine point when required. We would like to align it to 2MB
- * in order to increase the chances of using a large page but that appears to
- * lead to invalid ELF files being produced. */
+// This is the precomputed constant time access table for the code in
+// p256-x86_64.c, for the default generator. The table consists of 37
+// subtables, each subtable contains 64 affine points. The affine points are
+// encoded as eight uint64's, four for the x coordinate and four for the y.
+// Both values are in little-endian order. There are 37 tables because a
+// signed, 6-bit wNAF form of the scalar is used and ceil(256/(6 + 1)) = 37.
+// Within each table there are 64 values because the 6-bit wNAF value can take
+// 64 values, ignoring the sign bit, which is implemented by performing a
+// negation of the affine point when required. We would like to align it to 2MB
+// in order to increase the chances of using a large page but that appears to
+// lead to invalid ELF files being produced.
static const alignas(4096) BN_ULONG
ecp_nistz256_precomputed[37][64 * sizeof(P256_POINT_AFFINE) /
diff --git a/crypto/fipsmodule/ec/p256-x86_64.c b/crypto/fipsmodule/ec/p256-x86_64.c
index de80dca..8b51677 100644
--- a/crypto/fipsmodule/ec/p256-x86_64.c
+++ b/crypto/fipsmodule/ec/p256-x86_64.c
@@ -12,13 +12,13 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
-/* Developers and authors:
- * Shay Gueron (1, 2), and Vlad Krasnov (1)
- * (1) Intel Corporation, Israel Development Center
- * (2) University of Haifa
- * Reference:
- * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
- * 256 Bit Primes" */
+// Developers and authors:
+// Shay Gueron (1, 2), and Vlad Krasnov (1)
+// (1) Intel Corporation, Israel Development Center
+// (2) University of Haifa
+// Reference:
+// S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
+// 256 Bit Primes"
#include <openssl/ec.h>
@@ -42,16 +42,16 @@
typedef P256_POINT_AFFINE PRECOMP256_ROW[64];
-/* One converted into the Montgomery domain */
+// One converted into the Montgomery domain
static const BN_ULONG ONE[P256_LIMBS] = {
TOBN(0x00000000, 0x00000001), TOBN(0xffffffff, 0x00000000),
TOBN(0xffffffff, 0xffffffff), TOBN(0x00000000, 0xfffffffe),
};
-/* Precomputed tables for the default generator */
+// Precomputed tables for the default generator
#include "p256-x86_64-table.h"
-/* Recode window to a signed digit, see util-64.c for details */
+// Recode window to a signed digit, see util-64.c for details
static unsigned booth_recode_w5(unsigned in) {
unsigned s, d;
@@ -74,11 +74,11 @@
return (d << 1) + (s & 1);
}
-/* copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is
- * if |move| is zero.
- *
- * WARNING: this breaks the usual convention of constant-time functions
- * returning masks. */
+// copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is
+// if |move| is zero.
+//
+// WARNING: this breaks the usual convention of constant-time functions
+// returning masks.
static void copy_conditional(BN_ULONG dst[P256_LIMBS],
const BN_ULONG src[P256_LIMBS], BN_ULONG move) {
BN_ULONG mask1 = ((BN_ULONG)0) - move;
@@ -96,32 +96,32 @@
}
}
-/* is_not_zero returns one iff in != 0 and zero otherwise.
- *
- * WARNING: this breaks the usual convention of constant-time functions
- * returning masks.
- *
- * (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64)
- * (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f)
- * )
- *
- * (declare-fun x () (_ BitVec 64))
- *
- * (assert (and (= x #x0000000000000000) (= (is_not_zero x) #x0000000000000001)))
- * (check-sat)
- *
- * (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x) #x0000000000000000)))
- * (check-sat)
- * */
+// is_not_zero returns one iff in != 0 and zero otherwise.
+//
+// WARNING: this breaks the usual convention of constant-time functions
+// returning masks.
+//
+// (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64)
+// (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f)
+// )
+//
+// (declare-fun x () (_ BitVec 64))
+//
+// (assert (and (= x #x0000000000000000) (= (is_not_zero x) #x0000000000000001)))
+// (check-sat)
+//
+// (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x) #x0000000000000000)))
+// (check-sat)
+//
static BN_ULONG is_not_zero(BN_ULONG in) {
in |= (0 - in);
in >>= BN_BITS2 - 1;
return in;
}
-/* ecp_nistz256_mod_inverse_mont sets |r| to (|in| * 2^-256)^-1 * 2^256 mod p.
- * That is, |r| is the modular inverse of |in| for input and output in the
- * Montgomery domain. */
+// ecp_nistz256_mod_inverse_mont sets |r| to (|in| * 2^-256)^-1 * 2^256 mod p.
+// That is, |r| is the modular inverse of |in| for input and output in the
+// Montgomery domain.
static void ecp_nistz256_mod_inverse_mont(BN_ULONG r[P256_LIMBS],
const BN_ULONG in[P256_LIMBS]) {
/* The poly is ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff
@@ -136,29 +136,29 @@
int i;
ecp_nistz256_sqr_mont(res, in);
- ecp_nistz256_mul_mont(p2, res, in); /* 3*p */
+ ecp_nistz256_mul_mont(p2, res, in); // 3*p
ecp_nistz256_sqr_mont(res, p2);
ecp_nistz256_sqr_mont(res, res);
- ecp_nistz256_mul_mont(p4, res, p2); /* f*p */
+ ecp_nistz256_mul_mont(p4, res, p2); // f*p
ecp_nistz256_sqr_mont(res, p4);
ecp_nistz256_sqr_mont(res, res);
ecp_nistz256_sqr_mont(res, res);
ecp_nistz256_sqr_mont(res, res);
- ecp_nistz256_mul_mont(p8, res, p4); /* ff*p */
+ ecp_nistz256_mul_mont(p8, res, p4); // ff*p
ecp_nistz256_sqr_mont(res, p8);
for (i = 0; i < 7; i++) {
ecp_nistz256_sqr_mont(res, res);
}
- ecp_nistz256_mul_mont(p16, res, p8); /* ffff*p */
+ ecp_nistz256_mul_mont(p16, res, p8); // ffff*p
ecp_nistz256_sqr_mont(res, p16);
for (i = 0; i < 15; i++) {
ecp_nistz256_sqr_mont(res, res);
}
- ecp_nistz256_mul_mont(p32, res, p16); /* ffffffff*p */
+ ecp_nistz256_mul_mont(p32, res, p16); // ffffffff*p
ecp_nistz256_sqr_mont(res, p32);
for (i = 0; i < 31; i++) {
@@ -201,8 +201,8 @@
ecp_nistz256_mul_mont(r, res, in);
}
-/* ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and
- * returns one if it fits. Otherwise it returns zero. */
+// ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and
+// returns one if it fits. Otherwise it returns zero.
static int ecp_nistz256_bignum_to_field_elem(BN_ULONG out[P256_LIMBS],
const BIGNUM *in) {
if (in->top > P256_LIMBS) {
@@ -214,7 +214,7 @@
return 1;
}
-/* r = p * p_scalar */
+// r = p * p_scalar
static int ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r,
const EC_POINT *p, const BIGNUM *p_scalar,
BN_CTX *ctx) {
@@ -224,9 +224,9 @@
static const unsigned kWindowSize = 5;
static const unsigned kMask = (1 << (5 /* kWindowSize */ + 1)) - 1;
- /* A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should
- * add no more than 63 bytes of overhead. Thus, |table| should require
- * ~1599 ((96 * 16) + 63) bytes of stack space. */
+ // A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should
+ // add no more than 63 bytes of overhead. Thus, |table| should require
+ // ~1599 ((96 * 16) + 63) bytes of stack space.
alignas(64) P256_POINT table[16];
uint8_t p_str[33];
@@ -279,9 +279,9 @@
p_str[j] = 0;
}
- /* table[0] is implicitly (0,0,0) (the point at infinity), therefore it is
- * not stored. All other values are actually stored with an offset of -1 in
- * table. */
+ // table[0] is implicitly (0,0,0) (the point at infinity), therefore it is
+ // not stored. All other values are actually stored with an offset of -1 in
+ // table.
P256_POINT *row = table;
if (!ecp_nistz256_bignum_to_field_elem(row[1 - 1].X, &p->X) ||
@@ -341,7 +341,7 @@
ecp_nistz256_point_double(r, r);
}
- /* Final window */
+ // Final window
wvalue = p_str[0];
wvalue = (wvalue << 1) & kMask;
@@ -426,7 +426,7 @@
p_str[i] = 0;
}
- /* First window */
+ // First window
unsigned wvalue = (p_str[0] << 1) & kMask;
unsigned index = kWindowSize;
@@ -439,9 +439,9 @@
ecp_nistz256_neg(p.p.Z, p.p.Y);
copy_conditional(p.p.Y, p.p.Z, wvalue & 1);
- /* Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p|
- * is infinity and |ONE| otherwise. |p| was computed from the table, so it
- * is infinity iff |wvalue >> 1| is zero. */
+ // Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p|
+ // is infinity and |ONE| otherwise. |p| was computed from the table, so it
+ // is infinity iff |wvalue >> 1| is zero.
OPENSSL_memset(p.p.Z, 0, sizeof(p.p.Z));
copy_conditional(p.p.Z, ONE, is_not_zero(wvalue >> 1));
@@ -478,7 +478,7 @@
}
}
- /* Not constant-time, but we're only operating on the public output. */
+ // Not constant-time, but we're only operating on the public output.
if (!bn_set_words(&r->X, p.p.X, P256_LIMBS) ||
!bn_set_words(&r->Y, p.p.Y, P256_LIMBS) ||
!bn_set_words(&r->Z, p.p.Z, P256_LIMBS)) {
@@ -516,10 +516,10 @@
ecp_nistz256_mod_inverse_mont(z_inv3, point_z);
ecp_nistz256_sqr_mont(z_inv2, z_inv3);
- /* Instead of using |ecp_nistz256_from_mont| to convert the |x| coordinate
- * and then calling |ecp_nistz256_from_mont| again to convert the |y|
- * coordinate below, convert the common factor |z_inv2| once now, saving one
- * reduction. */
+ // Instead of using |ecp_nistz256_from_mont| to convert the |x| coordinate
+ // and then calling |ecp_nistz256_from_mont| again to convert the |y|
+ // coordinate below, convert the common factor |z_inv2| once now, saving one
+ // reduction.
ecp_nistz256_from_mont(z_inv2, z_inv2);
if (x != NULL) {
diff --git a/crypto/fipsmodule/ec/p256-x86_64.h b/crypto/fipsmodule/ec/p256-x86_64.h
index 0132348..6a0bebb 100644
--- a/crypto/fipsmodule/ec/p256-x86_64.h
+++ b/crypto/fipsmodule/ec/p256-x86_64.h
@@ -27,30 +27,30 @@
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \
!defined(OPENSSL_SMALL)
-/* P-256 field operations.
- *
- * An element mod P in P-256 is represented as a little-endian array of
- * |P256_LIMBS| |BN_ULONG|s, spanning the full range of values.
- *
- * The following functions take fully-reduced inputs mod P and give
- * fully-reduced outputs. They may be used in-place. */
+// P-256 field operations.
+//
+// An element mod P in P-256 is represented as a little-endian array of
+// |P256_LIMBS| |BN_ULONG|s, spanning the full range of values.
+//
+// The following functions take fully-reduced inputs mod P and give
+// fully-reduced outputs. They may be used in-place.
#define P256_LIMBS (256 / BN_BITS2)
-/* ecp_nistz256_neg sets |res| to -|a| mod P. */
+// ecp_nistz256_neg sets |res| to -|a| mod P.
void ecp_nistz256_neg(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]);
-/* ecp_nistz256_mul_mont sets |res| to |a| * |b| * 2^-256 mod P. */
+// ecp_nistz256_mul_mont sets |res| to |a| * |b| * 2^-256 mod P.
void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]);
-/* ecp_nistz256_sqr_mont sets |res| to |a| * |a| * 2^-256 mod P. */
+// ecp_nistz256_sqr_mont sets |res| to |a| * |a| * 2^-256 mod P.
void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS]);
-/* ecp_nistz256_from_mont sets |res| to |in|, converted from Montgomery domain
- * by multiplying with 1. */
+// ecp_nistz256_from_mont sets |res| to |in|, converted from Montgomery domain
+// by multiplying with 1.
static inline void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG in[P256_LIMBS]) {
static const BN_ULONG ONE[P256_LIMBS] = { 1 };
@@ -58,47 +58,47 @@
}
-/* P-256 point operations.
- *
- * The following functions may be used in-place. All coordinates are in the
- * Montgomery domain. */
+// P-256 point operations.
+//
+// The following functions may be used in-place. All coordinates are in the
+// Montgomery domain.
-/* A P256_POINT represents a P-256 point in Jacobian coordinates. */
+// A P256_POINT represents a P-256 point in Jacobian coordinates.
typedef struct {
BN_ULONG X[P256_LIMBS];
BN_ULONG Y[P256_LIMBS];
BN_ULONG Z[P256_LIMBS];
} P256_POINT;
-/* A P256_POINT_AFFINE represents a P-256 point in affine coordinates. Infinity
- * is encoded as (0, 0). */
+// A P256_POINT_AFFINE represents a P-256 point in affine coordinates. Infinity
+// is encoded as (0, 0).
typedef struct {
BN_ULONG X[P256_LIMBS];
BN_ULONG Y[P256_LIMBS];
} P256_POINT_AFFINE;
-/* ecp_nistz256_select_w5 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 16
- * and all zeros (the point at infinity) if |index| is 0. This is done in
- * constant time. */
+// ecp_nistz256_select_w5 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 16
+// and all zeros (the point at infinity) if |index| is 0. This is done in
+// constant time.
void ecp_nistz256_select_w5(P256_POINT *val, const P256_POINT in_t[16],
int index);
-/* ecp_nistz256_select_w7 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 64
- * and all zeros (the point at infinity) if |index| is 0. This is done in
- * constant time. */
+// ecp_nistz256_select_w7 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 64
+// and all zeros (the point at infinity) if |index| is 0. This is done in
+// constant time.
void ecp_nistz256_select_w7(P256_POINT_AFFINE *val,
const P256_POINT_AFFINE in_t[64], int index);
-/* ecp_nistz256_point_double sets |r| to |a| doubled. */
+// ecp_nistz256_point_double sets |r| to |a| doubled.
void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a);
-/* ecp_nistz256_point_add adds |a| to |b| and places the result in |r|. */
+// ecp_nistz256_point_add adds |a| to |b| and places the result in |r|.
void ecp_nistz256_point_add(P256_POINT *r, const P256_POINT *a,
const P256_POINT *b);
-/* ecp_nistz256_point_add_affine adds |a| to |b| and places the result in
- * |r|. |a| and |b| must not represent the same point unless they are both
- * infinity. */
+// ecp_nistz256_point_add_affine adds |a| to |b| and places the result in
+// |r|. |a| and |b| must not represent the same point unless they are both
+// infinity.
void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a,
const P256_POINT_AFFINE *b);
@@ -107,7 +107,7 @@
#if defined(__cplusplus)
-} /* extern C++ */
+} // extern C++
#endif
-#endif /* OPENSSL_HEADER_EC_P256_X86_64_H */
+#endif // OPENSSL_HEADER_EC_P256_X86_64_H
diff --git a/crypto/fipsmodule/ec/simple.c b/crypto/fipsmodule/ec/simple.c
index 1a03d84..75c06da 100644
--- a/crypto/fipsmodule/ec/simple.c
+++ b/crypto/fipsmodule/ec/simple.c
@@ -77,16 +77,16 @@
#include "../../internal.h"
-/* Most method functions in this file are designed to work with non-trivial
- * representations of field elements if necessary (see ecp_mont.c): while
- * standard modular addition and subtraction are used, the field_mul and
- * field_sqr methods will be used for multiplication, and field_encode and
- * field_decode (if defined) will be used for converting between
- * representations.
- *
- * Functions here specifically assume that if a non-trivial representation is
- * used, it is a Montgomery representation (i.e. 'encoding' means multiplying
- * by some factor R). */
+// Most method functions in this file are designed to work with non-trivial
+// representations of field elements if necessary (see ecp_mont.c): while
+// standard modular addition and subtraction are used, the field_mul and
+// field_sqr methods will be used for multiplication, and field_encode and
+// field_decode (if defined) will be used for converting between
+// representations.
+//
+// Functions here specifically assume that if a non-trivial representation is
+// used, it is a Montgomery representation (i.e. 'encoding' means multiplying
+// by some factor R).
int ec_GFp_simple_group_init(EC_GROUP *group) {
BN_init(&group->field);
@@ -123,7 +123,7 @@
BN_CTX *new_ctx = NULL;
BIGNUM *tmp_a;
- /* p must be a prime > 3 */
+ // p must be a prime > 3
if (BN_num_bits(p) <= 2 || !BN_is_odd(p)) {
OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FIELD);
return 0;
@@ -142,13 +142,13 @@
goto err;
}
- /* group->field */
+ // group->field
if (!BN_copy(&group->field, p)) {
goto err;
}
BN_set_negative(&group->field, 0);
- /* group->a */
+ // group->a
if (!BN_nnmod(tmp_a, a, p, ctx)) {
goto err;
}
@@ -160,7 +160,7 @@
goto err;
}
- /* group->b */
+ // group->b
if (!BN_nnmod(&group->b, b, p, ctx)) {
goto err;
}
@@ -169,7 +169,7 @@
goto err;
}
- /* group->a_is_minus3 */
+ // group->a_is_minus3
if (!BN_add_word(tmp_a, 3)) {
goto err;
}
@@ -360,7 +360,7 @@
EC_POINT *point, const BIGNUM *x,
const BIGNUM *y, BN_CTX *ctx) {
if (x == NULL || y == NULL) {
- /* unlike for projective coordinates, we do not tolerate this */
+ // unlike for projective coordinates, we do not tolerate this
OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER);
return 0;
}
@@ -412,88 +412,87 @@
goto end;
}
- /* Note that in this function we must not read components of 'a' or 'b'
- * once we have written the corresponding components of 'r'.
- * ('r' might be one of 'a' or 'b'.)
- */
+ // Note that in this function we must not read components of 'a' or 'b'
+ // once we have written the corresponding components of 'r'.
+ // ('r' might be one of 'a' or 'b'.)
- /* n1, n2 */
+ // n1, n2
int b_Z_is_one = BN_cmp(&b->Z, &group->one) == 0;
if (b_Z_is_one) {
if (!BN_copy(n1, &a->X) || !BN_copy(n2, &a->Y)) {
goto end;
}
- /* n1 = X_a */
- /* n2 = Y_a */
+ // n1 = X_a
+ // n2 = Y_a
} else {
if (!field_sqr(group, n0, &b->Z, ctx) ||
!field_mul(group, n1, &a->X, n0, ctx)) {
goto end;
}
- /* n1 = X_a * Z_b^2 */
+ // n1 = X_a * Z_b^2
if (!field_mul(group, n0, n0, &b->Z, ctx) ||
!field_mul(group, n2, &a->Y, n0, ctx)) {
goto end;
}
- /* n2 = Y_a * Z_b^3 */
+ // n2 = Y_a * Z_b^3
}
- /* n3, n4 */
+ // n3, n4
int a_Z_is_one = BN_cmp(&a->Z, &group->one) == 0;
if (a_Z_is_one) {
if (!BN_copy(n3, &b->X) || !BN_copy(n4, &b->Y)) {
goto end;
}
- /* n3 = X_b */
- /* n4 = Y_b */
+ // n3 = X_b
+ // n4 = Y_b
} else {
if (!field_sqr(group, n0, &a->Z, ctx) ||
!field_mul(group, n3, &b->X, n0, ctx)) {
goto end;
}
- /* n3 = X_b * Z_a^2 */
+ // n3 = X_b * Z_a^2
if (!field_mul(group, n0, n0, &a->Z, ctx) ||
!field_mul(group, n4, &b->Y, n0, ctx)) {
goto end;
}
- /* n4 = Y_b * Z_a^3 */
+ // n4 = Y_b * Z_a^3
}
- /* n5, n6 */
+ // n5, n6
if (!BN_mod_sub_quick(n5, n1, n3, p) ||
!BN_mod_sub_quick(n6, n2, n4, p)) {
goto end;
}
- /* n5 = n1 - n3 */
- /* n6 = n2 - n4 */
+ // n5 = n1 - n3
+ // n6 = n2 - n4
if (BN_is_zero(n5)) {
if (BN_is_zero(n6)) {
- /* a is the same point as b */
+ // a is the same point as b
BN_CTX_end(ctx);
ret = EC_POINT_dbl(group, r, a, ctx);
ctx = NULL;
goto end;
} else {
- /* a is the inverse of b */
+ // a is the inverse of b
BN_zero(&r->Z);
ret = 1;
goto end;
}
}
- /* 'n7', 'n8' */
+ // 'n7', 'n8'
if (!BN_mod_add_quick(n1, n1, n3, p) ||
!BN_mod_add_quick(n2, n2, n4, p)) {
goto end;
}
- /* 'n7' = n1 + n3 */
- /* 'n8' = n2 + n4 */
+ // 'n7' = n1 + n3
+ // 'n8' = n2 + n4
- /* Z_r */
+ // Z_r
if (a_Z_is_one && b_Z_is_one) {
if (!BN_copy(&r->Z, n5)) {
goto end;
@@ -515,28 +514,28 @@
}
}
- /* Z_r = Z_a * Z_b * n5 */
+ // Z_r = Z_a * Z_b * n5
- /* X_r */
+ // X_r
if (!field_sqr(group, n0, n6, ctx) ||
!field_sqr(group, n4, n5, ctx) ||
!field_mul(group, n3, n1, n4, ctx) ||
!BN_mod_sub_quick(&r->X, n0, n3, p)) {
goto end;
}
- /* X_r = n6^2 - n5^2 * 'n7' */
+ // X_r = n6^2 - n5^2 * 'n7'
- /* 'n9' */
+ // 'n9'
if (!BN_mod_lshift1_quick(n0, &r->X, p) ||
!BN_mod_sub_quick(n0, n3, n0, p)) {
goto end;
}
- /* n9 = n5^2 * 'n7' - 2 * X_r */
+ // n9 = n5^2 * 'n7' - 2 * X_r
- /* Y_r */
+ // Y_r
if (!field_mul(group, n0, n0, n6, ctx) ||
!field_mul(group, n5, n4, n5, ctx)) {
- goto end; /* now n5 is n5^3 */
+ goto end; // now n5 is n5^3
}
if (!field_mul(group, n1, n2, n5, ctx) ||
!BN_mod_sub_quick(n0, n0, n1, p)) {
@@ -545,17 +544,17 @@
if (BN_is_odd(n0) && !BN_add(n0, n0, p)) {
goto end;
}
- /* now 0 <= n0 < 2*p, and n0 is even */
+ // now 0 <= n0 < 2*p, and n0 is even
if (!BN_rshift1(&r->Y, n0)) {
goto end;
}
- /* Y_r = (n6 * 'n9' - 'n8' * 'n5^3') / 2 */
+ // Y_r = (n6 * 'n9' - 'n8' * 'n5^3') / 2
ret = 1;
end:
if (ctx) {
- /* otherwise we already called BN_CTX_end */
+ // otherwise we already called BN_CTX_end
BN_CTX_end(ctx);
}
BN_CTX_free(new_ctx);
@@ -597,12 +596,11 @@
goto err;
}
- /* Note that in this function we must not read components of 'a'
- * once we have written the corresponding components of 'r'.
- * ('r' might the same as 'a'.)
- */
+ // Note that in this function we must not read components of 'a'
+ // once we have written the corresponding components of 'r'.
+ // ('r' might the same as 'a'.)
- /* n1 */
+ // n1
if (BN_cmp(&a->Z, &group->one) == 0) {
if (!field_sqr(group, n0, &a->X, ctx) ||
!BN_mod_lshift1_quick(n1, n0, p) ||
@@ -610,7 +608,7 @@
!BN_mod_add_quick(n1, n0, &group->a, p)) {
goto err;
}
- /* n1 = 3 * X_a^2 + a_curve */
+ // n1 = 3 * X_a^2 + a_curve
} else if (group->a_is_minus3) {
if (!field_sqr(group, n1, &a->Z, ctx) ||
!BN_mod_add_quick(n0, &a->X, n1, p) ||
@@ -620,8 +618,8 @@
!BN_mod_add_quick(n1, n0, n1, p)) {
goto err;
}
- /* n1 = 3 * (X_a + Z_a^2) * (X_a - Z_a^2)
- * = 3 * X_a^2 - 3 * Z_a^4 */
+ // n1 = 3 * (X_a + Z_a^2) * (X_a - Z_a^2)
+ // = 3 * X_a^2 - 3 * Z_a^4
} else {
if (!field_sqr(group, n0, &a->X, ctx) ||
!BN_mod_lshift1_quick(n1, n0, p) ||
@@ -632,10 +630,10 @@
!BN_mod_add_quick(n1, n1, n0, p)) {
goto err;
}
- /* n1 = 3 * X_a^2 + a_curve * Z_a^4 */
+ // n1 = 3 * X_a^2 + a_curve * Z_a^4
}
- /* Z_r */
+ // Z_r
if (BN_cmp(&a->Z, &group->one) == 0) {
if (!BN_copy(n0, &a->Y)) {
goto err;
@@ -646,38 +644,38 @@
if (!BN_mod_lshift1_quick(&r->Z, n0, p)) {
goto err;
}
- /* Z_r = 2 * Y_a * Z_a */
+ // Z_r = 2 * Y_a * Z_a
- /* n2 */
+ // n2
if (!field_sqr(group, n3, &a->Y, ctx) ||
!field_mul(group, n2, &a->X, n3, ctx) ||
!BN_mod_lshift_quick(n2, n2, 2, p)) {
goto err;
}
- /* n2 = 4 * X_a * Y_a^2 */
+ // n2 = 4 * X_a * Y_a^2
- /* X_r */
+ // X_r
if (!BN_mod_lshift1_quick(n0, n2, p) ||
!field_sqr(group, &r->X, n1, ctx) ||
!BN_mod_sub_quick(&r->X, &r->X, n0, p)) {
goto err;
}
- /* X_r = n1^2 - 2 * n2 */
+ // X_r = n1^2 - 2 * n2
- /* n3 */
+ // n3
if (!field_sqr(group, n0, n3, ctx) ||
!BN_mod_lshift_quick(n3, n0, 3, p)) {
goto err;
}
- /* n3 = 8 * Y_a^4 */
+ // n3 = 8 * Y_a^4
- /* Y_r */
+ // Y_r
if (!BN_mod_sub_quick(n0, n2, &r->X, p) ||
!field_mul(group, n0, n1, n0, ctx) ||
!BN_mod_sub_quick(&r->Y, n0, n3, p)) {
goto err;
}
- /* Y_r = n1 * (n2 - X_r) - n3 */
+ // Y_r = n1 * (n2 - X_r) - n3
ret = 1;
@@ -689,7 +687,7 @@
int ec_GFp_simple_invert(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx) {
if (EC_POINT_is_at_infinity(group, point) || BN_is_zero(&point->Y)) {
- /* point is its own inverse */
+ // point is its own inverse
return 1;
}
@@ -734,17 +732,16 @@
goto err;
}
- /* We have a curve defined by a Weierstrass equation
- * y^2 = x^3 + a*x + b.
- * The point to consider is given in Jacobian projective coordinates
- * where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3).
- * Substituting this and multiplying by Z^6 transforms the above equation
- * into
- * Y^2 = X^3 + a*X*Z^4 + b*Z^6.
- * To test this, we add up the right-hand side in 'rh'.
- */
+ // We have a curve defined by a Weierstrass equation
+ // y^2 = x^3 + a*x + b.
+ // The point to consider is given in Jacobian projective coordinates
+ // where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3).
+ // Substituting this and multiplying by Z^6 transforms the above equation
+ // into
+ // Y^2 = X^3 + a*X*Z^4 + b*Z^6.
+ // To test this, we add up the right-hand side in 'rh'.
- /* rh := X^2 */
+ // rh := X^2
if (!field_sqr(group, rh, &point->X, ctx)) {
goto err;
}
@@ -756,7 +753,7 @@
goto err;
}
- /* rh := (rh + a*Z^4)*X */
+ // rh := (rh + a*Z^4)*X
if (group->a_is_minus3) {
if (!BN_mod_lshift1_quick(tmp, Z4, p) ||
!BN_mod_add_quick(tmp, tmp, Z4, p) ||
@@ -772,24 +769,24 @@
}
}
- /* rh := rh + b*Z^6 */
+ // rh := rh + b*Z^6
if (!field_mul(group, tmp, &group->b, Z6, ctx) ||
!BN_mod_add_quick(rh, rh, tmp, p)) {
goto err;
}
} else {
- /* rh := (rh + a)*X */
+ // rh := (rh + a)*X
if (!BN_mod_add_quick(rh, rh, &group->a, p) ||
!field_mul(group, rh, rh, &point->X, ctx)) {
goto err;
}
- /* rh := rh + b */
+ // rh := rh + b
if (!BN_mod_add_quick(rh, rh, &group->b, p)) {
goto err;
}
}
- /* 'lh' := Y^2 */
+ // 'lh' := Y^2
if (!field_sqr(group, tmp, &point->Y, ctx)) {
goto err;
}
@@ -804,11 +801,10 @@
int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a,
const EC_POINT *b, BN_CTX *ctx) {
- /* return values:
- * -1 error
- * 0 equal (in affine coordinates)
- * 1 not equal
- */
+ // return values:
+ // -1 error
+ // 0 equal (in affine coordinates)
+ // 1 not equal
int (*field_mul)(const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *,
BN_CTX *);
@@ -852,11 +848,10 @@
goto end;
}
- /* We have to decide whether
- * (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3),
- * or equivalently, whether
- * (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3).
- */
+ // We have to decide whether
+ // (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3),
+ // or equivalently, whether
+ // (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3).
if (!b_Z_is_one) {
if (!field_sqr(group, Zb23, &b->Z, ctx) ||
@@ -877,9 +872,9 @@
tmp2_ = &b->X;
}
- /* compare X_a*Z_b^2 with X_b*Z_a^2 */
+ // compare X_a*Z_b^2 with X_b*Z_a^2
if (BN_cmp(tmp1_, tmp2_) != 0) {
- ret = 1; /* points differ */
+ ret = 1; // points differ
goto end;
}
@@ -889,7 +884,7 @@
!field_mul(group, tmp1, &a->Y, Zb23, ctx)) {
goto end;
}
- /* tmp1_ = tmp1 */
+ // tmp1_ = tmp1
} else {
tmp1_ = &a->Y;
}
@@ -898,18 +893,18 @@
!field_mul(group, tmp2, &b->Y, Za23, ctx)) {
goto end;
}
- /* tmp2_ = tmp2 */
+ // tmp2_ = tmp2
} else {
tmp2_ = &b->Y;
}
- /* compare Y_a*Z_b^3 with Y_b*Z_a^3 */
+ // compare Y_a*Z_b^3 with Y_b*Z_a^3
if (BN_cmp(tmp1_, tmp2_) != 0) {
- ret = 1; /* points differ */
+ ret = 1; // points differ
goto end;
}
- /* points are equal */
+ // points are equal
ret = 0;
end:
@@ -997,8 +992,8 @@
}
}
- /* Set each prod_Z[i] to the product of points[0]->Z .. points[i]->Z,
- * skipping any zero-valued inputs (pretend that they're 1). */
+ // Set each prod_Z[i] to the product of points[0]->Z .. points[i]->Z,
+ // skipping any zero-valued inputs (pretend that they're 1).
if (!BN_is_zero(&points[0]->Z)) {
if (!BN_copy(prod_Z[0], &points[0]->Z)) {
@@ -1023,13 +1018,13 @@
}
}
- /* Now use a single explicit inversion to replace every non-zero points[i]->Z
- * by its inverse. We use |BN_mod_inverse_odd| instead of doing a constant-
- * time inversion using Fermat's Little Theorem because this function is
- * usually only used for converting multiples of a public key point to
- * affine, and a public key point isn't secret. If we were to use Fermat's
- * Little Theorem then the cost of the inversion would usually be so high
- * that converting the multiples to affine would be counterproductive. */
+ // Now use a single explicit inversion to replace every non-zero points[i]->Z
+ // by its inverse. We use |BN_mod_inverse_odd| instead of doing a constant-
+ // time inversion using Fermat's Little Theorem because this function is
+ // usually only used for converting multiples of a public key point to
+ // affine, and a public key point isn't secret. If we were to use Fermat's
+ // Little Theorem then the cost of the inversion would usually be so high
+ // that converting the multiples to affine would be counterproductive.
int no_inverse;
if (!BN_mod_inverse_odd(tmp, &no_inverse, prod_Z[num - 1], &group->field,
ctx)) {
@@ -1038,9 +1033,9 @@
}
if (group->meth->field_encode != NULL) {
- /* In the Montgomery case, we just turned R*H (representing H)
- * into 1/(R*H), but we need R*(1/H) (representing 1/H);
- * i.e. we need to multiply by the Montgomery factor twice. */
+ // In the Montgomery case, we just turned R*H (representing H)
+ // into 1/(R*H), but we need R*(1/H) (representing 1/H);
+ // i.e. we need to multiply by the Montgomery factor twice.
if (!group->meth->field_encode(group, tmp, tmp, ctx) ||
!group->meth->field_encode(group, tmp, tmp, ctx)) {
goto err;
@@ -1048,34 +1043,34 @@
}
for (size_t i = num - 1; i > 0; --i) {
- /* Loop invariant: tmp is the product of the inverses of
- * points[0]->Z .. points[i]->Z (zero-valued inputs skipped). */
+ // Loop invariant: tmp is the product of the inverses of
+ // points[0]->Z .. points[i]->Z (zero-valued inputs skipped).
if (BN_is_zero(&points[i]->Z)) {
continue;
}
- /* Set tmp_Z to the inverse of points[i]->Z (as product
- * of Z inverses 0 .. i, Z values 0 .. i - 1). */
+ // Set tmp_Z to the inverse of points[i]->Z (as product
+ // of Z inverses 0 .. i, Z values 0 .. i - 1).
if (!group->meth->field_mul(group, tmp_Z, prod_Z[i - 1], tmp, ctx) ||
- /* Update tmp to satisfy the loop invariant for i - 1. */
+ // Update tmp to satisfy the loop invariant for i - 1.
!group->meth->field_mul(group, tmp, tmp, &points[i]->Z, ctx) ||
- /* Replace points[i]->Z by its inverse. */
+ // Replace points[i]->Z by its inverse.
!BN_copy(&points[i]->Z, tmp_Z)) {
goto err;
}
}
- /* Replace points[0]->Z by its inverse. */
+ // Replace points[0]->Z by its inverse.
if (!BN_is_zero(&points[0]->Z) && !BN_copy(&points[0]->Z, tmp)) {
goto err;
}
- /* Finally, fix up the X and Y coordinates for all points. */
+ // Finally, fix up the X and Y coordinates for all points.
for (size_t i = 0; i < num; i++) {
EC_POINT *p = points[i];
if (!BN_is_zero(&p->Z)) {
- /* turn (X, Y, 1/Z) into (X/Z^2, Y/Z^3, 1). */
+ // turn (X, Y, 1/Z) into (X/Z^2, Y/Z^3, 1).
if (!group->meth->field_sqr(group, tmp, &p->Z, ctx) ||
!group->meth->field_mul(group, &p->X, &p->X, tmp, ctx) ||
!group->meth->field_mul(group, tmp, tmp, &p->Z, ctx) ||
diff --git a/crypto/fipsmodule/ec/util-64.c b/crypto/fipsmodule/ec/util-64.c
index 4006271..0cb117b 100644
--- a/crypto/fipsmodule/ec/util-64.c
+++ b/crypto/fipsmodule/ec/util-64.c
@@ -21,77 +21,77 @@
#include "internal.h"
-/* This function looks at 5+1 scalar bits (5 current, 1 adjacent less
- * significant bit), and recodes them into a signed digit for use in fast point
- * multiplication: the use of signed rather than unsigned digits means that
- * fewer points need to be precomputed, given that point inversion is easy (a
- * precomputed point dP makes -dP available as well).
- *
- * BACKGROUND:
- *
- * Signed digits for multiplication were introduced by Booth ("A signed binary
- * multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV,
- * pt. 2 (1951), pp. 236-240), in that case for multiplication of integers.
- * Booth's original encoding did not generally improve the density of nonzero
- * digits over the binary representation, and was merely meant to simplify the
- * handling of signed factors given in two's complement; but it has since been
- * shown to be the basis of various signed-digit representations that do have
- * further advantages, including the wNAF, using the following general
- * approach:
- *
- * (1) Given a binary representation
- *
- * b_k ... b_2 b_1 b_0,
- *
- * of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1
- * by using bit-wise subtraction as follows:
- *
- * b_k b_(k-1) ... b_2 b_1 b_0
- * - b_k ... b_3 b_2 b_1 b_0
- * -------------------------------------
- * s_k b_(k-1) ... s_3 s_2 s_1 s_0
- *
- * A left-shift followed by subtraction of the original value yields a new
- * representation of the same value, using signed bits s_i = b_(i+1) - b_i.
- * This representation from Booth's paper has since appeared in the
- * literature under a variety of different names including "reversed binary
- * form", "alternating greedy expansion", "mutual opposite form", and
- * "sign-alternating {+-1}-representation".
- *
- * An interesting property is that among the nonzero bits, values 1 and -1
- * strictly alternate.
- *
- * (2) Various window schemes can be applied to the Booth representation of
- * integers: for example, right-to-left sliding windows yield the wNAF
- * (a signed-digit encoding independently discovered by various researchers
- * in the 1990s), and left-to-right sliding windows yield a left-to-right
- * equivalent of the wNAF (independently discovered by various researchers
- * around 2004).
- *
- * To prevent leaking information through side channels in point multiplication,
- * we need to recode the given integer into a regular pattern: sliding windows
- * as in wNAFs won't do, we need their fixed-window equivalent -- which is a few
- * decades older: we'll be using the so-called "modified Booth encoding" due to
- * MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49
- * (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five
- * signed bits into a signed digit:
- *
- * s_(4j + 4) s_(4j + 3) s_(4j + 2) s_(4j + 1) s_(4j)
- *
- * The sign-alternating property implies that the resulting digit values are
- * integers from -16 to 16.
- *
- * Of course, we don't actually need to compute the signed digits s_i as an
- * intermediate step (that's just a nice way to see how this scheme relates
- * to the wNAF): a direct computation obtains the recoded digit from the
- * six bits b_(4j + 4) ... b_(4j - 1).
- *
- * This function takes those five bits as an integer (0 .. 63), writing the
- * recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute
- * value, in the range 0 .. 8). Note that this integer essentially provides the
- * input bits "shifted to the left" by one position: for example, the input to
- * compute the least significant recoded digit, given that there's no bit b_-1,
- * has to be b_4 b_3 b_2 b_1 b_0 0. */
+// This function looks at 5+1 scalar bits (5 current, 1 adjacent less
+// significant bit), and recodes them into a signed digit for use in fast point
+// multiplication: the use of signed rather than unsigned digits means that
+// fewer points need to be precomputed, given that point inversion is easy (a
+// precomputed point dP makes -dP available as well).
+//
+// BACKGROUND:
+//
+// Signed digits for multiplication were introduced by Booth ("A signed binary
+// multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV,
+// pt. 2 (1951), pp. 236-240), in that case for multiplication of integers.
+// Booth's original encoding did not generally improve the density of nonzero
+// digits over the binary representation, and was merely meant to simplify the
+// handling of signed factors given in two's complement; but it has since been
+// shown to be the basis of various signed-digit representations that do have
+// further advantages, including the wNAF, using the following general
+// approach:
+//
+// (1) Given a binary representation
+//
+// b_k ... b_2 b_1 b_0,
+//
+// of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1
+// by using bit-wise subtraction as follows:
+//
+// b_k b_(k-1) ... b_2 b_1 b_0
+// - b_k ... b_3 b_2 b_1 b_0
+// -------------------------------------
+// s_k b_(k-1) ... s_3 s_2 s_1 s_0
+//
+// A left-shift followed by subtraction of the original value yields a new
+// representation of the same value, using signed bits s_i = b_(i+1) - b_i.
+// This representation from Booth's paper has since appeared in the
+// literature under a variety of different names including "reversed binary
+// form", "alternating greedy expansion", "mutual opposite form", and
+// "sign-alternating {+-1}-representation".
+//
+// An interesting property is that among the nonzero bits, values 1 and -1
+// strictly alternate.
+//
+// (2) Various window schemes can be applied to the Booth representation of
+// integers: for example, right-to-left sliding windows yield the wNAF
+// (a signed-digit encoding independently discovered by various researchers
+// in the 1990s), and left-to-right sliding windows yield a left-to-right
+// equivalent of the wNAF (independently discovered by various researchers
+// around 2004).
+//
+// To prevent leaking information through side channels in point multiplication,
+// we need to recode the given integer into a regular pattern: sliding windows
+// as in wNAFs won't do, we need their fixed-window equivalent -- which is a few
+// decades older: we'll be using the so-called "modified Booth encoding" due to
+// MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49
+// (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five
+// signed bits into a signed digit:
+//
+// s_(4j + 4) s_(4j + 3) s_(4j + 2) s_(4j + 1) s_(4j)
+//
+// The sign-alternating property implies that the resulting digit values are
+// integers from -16 to 16.
+//
+// Of course, we don't actually need to compute the signed digits s_i as an
+// intermediate step (that's just a nice way to see how this scheme relates
+// to the wNAF): a direct computation obtains the recoded digit from the
+// six bits b_(4j + 4) ... b_(4j - 1).
+//
+// This function takes those five bits as an integer (0 .. 63), writing the
+// recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute
+// value, in the range 0 .. 8). Note that this integer essentially provides the
+// input bits "shifted to the left" by one position: for example, the input to
+// compute the least significant recoded digit, given that there's no bit b_-1,
+// has to be b_4 b_3 b_2 b_1 b_0 0.
void ec_GFp_nistp_recode_scalar_bits(uint8_t *sign, uint8_t *digit,
uint8_t in) {
uint8_t s, d;
@@ -106,4 +106,4 @@
*digit = d;
}
-#endif /* 64_BIT && !WINDOWS */
+#endif // 64_BIT && !WINDOWS
diff --git a/crypto/fipsmodule/ec/wnaf.c b/crypto/fipsmodule/ec/wnaf.c
index f009469..0e3ee13 100644
--- a/crypto/fipsmodule/ec/wnaf.c
+++ b/crypto/fipsmodule/ec/wnaf.c
@@ -78,19 +78,18 @@
#include "../../internal.h"
-/* This file implements the wNAF-based interleaving multi-exponentiation method
- * at:
- * http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13
- * http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf */
+// This file implements the wNAF-based interleaving multi-exponentiation method
+// at:
+// http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13
+// http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf
-/* Determine the modified width-(w+1) Non-Adjacent Form (wNAF) of 'scalar'.
- * This is an array r[] of values that are either zero or odd with an
- * absolute value less than 2^w satisfying
- * scalar = \sum_j r[j]*2^j
- * where at most one of any w+1 consecutive digits is non-zero
- * with the exception that the most significant digit may be only
- * w-1 zeros away from that next non-zero digit.
- */
+// Determine the modified width-(w+1) Non-Adjacent Form (wNAF) of 'scalar'.
+// This is an array r[] of values that are either zero or odd with an
+// absolute value less than 2^w satisfying
+// scalar = \sum_j r[j]*2^j
+// where at most one of any w+1 consecutive digits is non-zero
+// with the exception that the most significant digit may be only
+// w-1 zeros away from that next non-zero digit.
static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) {
int window_val;
int ok = 0;
@@ -110,14 +109,14 @@
return r;
}
- /* 'int8_t' can represent integers with absolute values less than 2^7. */
+ // 'int8_t' can represent integers with absolute values less than 2^7.
if (w <= 0 || w > 7) {
OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR);
goto err;
}
- bit = 1 << w; /* at most 128 */
- next_bit = bit << 1; /* at most 256 */
- mask = next_bit - 1; /* at most 255 */
+ bit = 1 << w; // at most 128
+ next_bit = bit << 1; // at most 256
+ mask = next_bit - 1; // at most 255
if (BN_is_negative(scalar)) {
sign = -1;
@@ -129,9 +128,9 @@
}
len = BN_num_bits(scalar);
- /* The modified wNAF may be one digit longer than binary representation
- * (*ret_len will be set to the actual length, i.e. at most
- * BN_num_bits(scalar) + 1). */
+ // The modified wNAF may be one digit longer than binary representation
+ // (*ret_len will be set to the actual length, i.e. at most
+ // BN_num_bits(scalar) + 1).
r = OPENSSL_malloc(len + 1);
if (r == NULL) {
OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE);
@@ -139,30 +138,30 @@
}
window_val = scalar->d[0] & mask;
j = 0;
- /* If j+w+1 >= len, window_val will not increase. */
+ // If j+w+1 >= len, window_val will not increase.
while (window_val != 0 || j + w + 1 < len) {
int digit = 0;
- /* 0 <= window_val <= 2^(w+1) */
+ // 0 <= window_val <= 2^(w+1)
if (window_val & 1) {
- /* 0 < window_val < 2^(w+1) */
+ // 0 < window_val < 2^(w+1)
if (window_val & bit) {
- digit = window_val - next_bit; /* -2^w < digit < 0 */
+ digit = window_val - next_bit; // -2^w < digit < 0
-#if 1 /* modified wNAF */
+#if 1 // modified wNAF
if (j + w + 1 >= len) {
- /* special case for generating modified wNAFs:
- * no new bits will be added into window_val,
- * so using a positive digit here will decrease
- * the total length of the representation */
+ // special case for generating modified wNAFs:
+ // no new bits will be added into window_val,
+ // so using a positive digit here will decrease
+ // the total length of the representation
- digit = window_val & (mask >> 1); /* 0 < digit < 2^w */
+ digit = window_val & (mask >> 1); // 0 < digit < 2^w
}
#endif
} else {
- digit = window_val; /* 0 < digit < 2^w */
+ digit = window_val; // 0 < digit < 2^w
}
if (digit <= -bit || digit >= bit || !(digit & 1)) {
@@ -172,8 +171,8 @@
window_val -= digit;
- /* Now window_val is 0 or 2^(w+1) in standard wNAF generation;
- * for modified window NAFs, it may also be 2^w. */
+ // Now window_val is 0 or 2^(w+1) in standard wNAF generation;
+ // for modified window NAFs, it may also be 2^w.
if (window_val != 0 && window_val != next_bit && window_val != bit) {
OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR);
goto err;
@@ -210,10 +209,9 @@
}
-/* TODO: table should be optimised for the wNAF-based implementation,
- * sometimes smaller windows will give better performance
- * (thus the boundaries should be increased)
- */
+// TODO: table should be optimised for the wNAF-based implementation,
+// sometimes smaller windows will give better performance
+// (thus the boundaries should be increased)
static size_t window_bits_for_scalar_size(size_t b) {
if (b >= 2000) {
return 6;
@@ -248,14 +246,14 @@
int k;
int r_is_inverted = 0;
int r_is_at_infinity = 1;
- size_t *wsize = NULL; /* individual window sizes */
- int8_t **wNAF = NULL; /* individual wNAFs */
+ size_t *wsize = NULL; // individual window sizes
+ int8_t **wNAF = NULL; // individual wNAFs
size_t *wNAF_len = NULL;
size_t max_len = 0;
size_t num_val = 0;
- EC_POINT **val = NULL; /* precomputation */
+ EC_POINT **val = NULL; // precomputation
EC_POINT **v;
- EC_POINT ***val_sub = NULL; /* pointers to sub-arrays of 'val' */
+ EC_POINT ***val_sub = NULL; // pointers to sub-arrays of 'val'
int ret = 0;
if (ctx == NULL) {
@@ -265,9 +263,9 @@
}
}
- /* TODO: This function used to take |points| and |scalars| as arrays of
- * |num| elements. The code below should be simplified to work in terms of |p|
- * and |p_scalar|. */
+ // TODO: This function used to take |points| and |scalars| as arrays of
+ // |num| elements. The code below should be simplified to work in terms of |p|
+ // and |p_scalar|.
size_t num = p != NULL ? 1 : 0;
const EC_POINT **points = p != NULL ? &p : NULL;
const BIGNUM **scalars = p != NULL ? &p_scalar : NULL;
@@ -281,7 +279,7 @@
goto err;
}
- ++total_num; /* treat 'g_scalar' like 'num'-th element of 'scalars' */
+ ++total_num; // treat 'g_scalar' like 'num'-th element of 'scalars'
}
@@ -290,7 +288,7 @@
wNAF = OPENSSL_malloc(total_num * sizeof(wNAF[0]));
val_sub = OPENSSL_malloc(total_num * sizeof(val_sub[0]));
- /* Ensure wNAF is initialised in case we end up going to err. */
+ // Ensure wNAF is initialised in case we end up going to err.
if (wNAF != NULL) {
OPENSSL_memset(wNAF, 0, total_num * sizeof(wNAF[0]));
}
@@ -300,7 +298,7 @@
goto err;
}
- /* num_val will be the total number of temporarily precomputed points */
+ // num_val will be the total number of temporarily precomputed points
num_val = 0;
for (i = 0; i < total_num; i++) {
@@ -319,8 +317,8 @@
}
}
- /* All points we precompute now go into a single array 'val'. 'val_sub[i]' is
- * a pointer to the subarray for the i-th point. */
+ // All points we precompute now go into a single array 'val'. 'val_sub[i]' is
+ // a pointer to the subarray for the i-th point.
val = OPENSSL_malloc(num_val * sizeof(val[0]));
if (val == NULL) {
OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE);
@@ -328,7 +326,7 @@
}
OPENSSL_memset(val, 0, num_val * sizeof(val[0]));
- /* allocate points for precomputation */
+ // allocate points for precomputation
v = val;
for (i = 0; i < total_num; i++) {
val_sub[i] = v;
@@ -349,12 +347,11 @@
goto err;
}
- /* prepare precomputed values:
- * val_sub[i][0] := points[i]
- * val_sub[i][1] := 3 * points[i]
- * val_sub[i][2] := 5 * points[i]
- * ...
- */
+ // prepare precomputed values:
+ // val_sub[i][0] := points[i]
+ // val_sub[i][1] := 3 * points[i]
+ // val_sub[i][2] := 5 * points[i]
+ // ...
for (i = 0; i < total_num; i++) {
if (i < num) {
if (!EC_POINT_copy(val_sub[i][0], points[i])) {
@@ -376,7 +373,7 @@
}
}
-#if 1 /* optional; window_bits_for_scalar_size assumes we do this step */
+#if 1 // optional; window_bits_for_scalar_size assumes we do this step
if (!EC_POINTs_make_affine(group, num_val, val, ctx)) {
goto err;
}
@@ -408,7 +405,7 @@
r_is_inverted = !r_is_inverted;
}
- /* digit > 0 */
+ // digit > 0
if (r_is_at_infinity) {
if (!EC_POINT_copy(r, val_sub[i][digit >> 1])) {
diff --git a/crypto/fipsmodule/ecdsa/ecdsa.c b/crypto/fipsmodule/ecdsa/ecdsa.c
index c061ab8..dfa3b67 100644
--- a/crypto/fipsmodule/ecdsa/ecdsa.c
+++ b/crypto/fipsmodule/ecdsa/ecdsa.c
@@ -64,16 +64,16 @@
#include "../../internal.h"
-/* digest_to_bn interprets |digest_len| bytes from |digest| as a big-endian
- * number and sets |out| to that value. It then truncates |out| so that it's,
- * at most, as long as |order|. It returns one on success and zero otherwise. */
+// digest_to_bn interprets |digest_len| bytes from |digest| as a big-endian
+// number and sets |out| to that value. It then truncates |out| so that it's,
+// at most, as long as |order|. It returns one on success and zero otherwise.
static int digest_to_bn(BIGNUM *out, const uint8_t *digest, size_t digest_len,
const BIGNUM *order) {
size_t num_bits;
num_bits = BN_num_bits(order);
- /* Need to truncate digest if it is too long: first truncate whole
- * bytes. */
+ // Need to truncate digest if it is too long: first truncate whole
+ // bytes.
if (8 * digest_len > num_bits) {
digest_len = (num_bits + 7) / 8;
}
@@ -82,7 +82,7 @@
return 0;
}
- /* If still too long truncate remaining bits with a shift */
+ // If still too long truncate remaining bits with a shift
if ((8 * digest_len > num_bits) &&
!BN_rshift(out, out, 8 - (num_bits & 0x7))) {
OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB);
@@ -130,7 +130,7 @@
const EC_GROUP *group;
const EC_POINT *pub_key;
- /* check input values */
+ // check input values
if ((group = EC_KEY_get0_group(eckey)) == NULL ||
(pub_key = EC_KEY_get0_public_key(eckey)) == NULL ||
sig == NULL) {
@@ -160,7 +160,7 @@
OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE);
goto err;
}
- /* calculate tmp1 = inv(S) mod order */
+ // calculate tmp1 = inv(S) mod order
int no_inverse;
if (!BN_mod_inverse_odd(u2, &no_inverse, sig->s, order, ctx)) {
OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB);
@@ -169,12 +169,12 @@
if (!digest_to_bn(m, digest, digest_len, order)) {
goto err;
}
- /* u1 = m * tmp mod order */
+ // u1 = m * tmp mod order
if (!BN_mod_mul(u1, m, u2, order, ctx)) {
OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB);
goto err;
}
- /* u2 = r * w mod q */
+ // u2 = r * w mod q
if (!BN_mod_mul(u2, sig->r, u2, order, ctx)) {
OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB);
goto err;
@@ -197,7 +197,7 @@
OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB);
goto err;
}
- /* if the signature is correct u1 is equal to sig->r */
+ // if the signature is correct u1 is equal to sig->r
if (BN_ucmp(u1, sig->r) != 0) {
OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE);
goto err;
@@ -236,8 +236,8 @@
}
k = BN_new();
- kinv = BN_new(); /* this value is later returned in *kinvp */
- r = BN_new(); /* this value is later returned in *rp */
+ kinv = BN_new(); // this value is later returned in *kinvp
+ r = BN_new(); // this value is later returned in *rp
tmp = BN_new();
if (k == NULL || kinv == NULL || r == NULL || tmp == NULL) {
OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE);
@@ -251,17 +251,17 @@
const BIGNUM *order = EC_GROUP_get0_order(group);
- /* Check that the size of the group order is FIPS compliant (FIPS 186-4
- * B.5.2). */
+ // Check that the size of the group order is FIPS compliant (FIPS 186-4
+ // B.5.2).
if (BN_num_bits(order) < 160) {
OPENSSL_PUT_ERROR(ECDSA, EC_R_INVALID_GROUP_ORDER);
goto err;
}
do {
- /* If possible, we'll include the private key and message digest in the k
- * generation. The |digest| argument is only empty if |ECDSA_sign_setup| is
- * being used. */
+ // If possible, we'll include the private key and message digest in the k
+ // generation. The |digest| argument is only empty if |ECDSA_sign_setup| is
+ // being used.
if (eckey->fixed_k != NULL) {
if (!BN_copy(k, eckey->fixed_k)) {
goto err;
@@ -279,18 +279,18 @@
goto err;
}
- /* Compute the inverse of k. The order is a prime, so use Fermat's Little
- * Theorem. Note |ec_group_get_order_mont| may return NULL but
- * |bn_mod_inverse_prime| allows this. */
+ // Compute the inverse of k. The order is a prime, so use Fermat's Little
+ // Theorem. Note |ec_group_get_order_mont| may return NULL but
+ // |bn_mod_inverse_prime| allows this.
if (!bn_mod_inverse_prime(kinv, k, order, ctx,
ec_group_get_order_mont(group))) {
OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB);
goto err;
}
- /* We do not want timing information to leak the length of k,
- * so we compute G*k using an equivalent scalar of fixed
- * bit-length. */
+ // We do not want timing information to leak the length of k,
+ // so we compute G*k using an equivalent scalar of fixed
+ // bit-length.
if (!BN_add(k, k, order)) {
goto err;
@@ -301,7 +301,7 @@
}
}
- /* compute r the x-coordinate of generator * k */
+ // compute r the x-coordinate of generator * k
if (!EC_POINT_mul(group, tmp_point, k, NULL, NULL, ctx)) {
OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB);
goto err;
@@ -318,11 +318,11 @@
}
} while (BN_is_zero(r));
- /* clear old values if necessary */
+ // clear old values if necessary
BN_clear_free(*rp);
BN_clear_free(*kinvp);
- /* save the pre-computed values */
+ // save the pre-computed values
*rp = r;
*kinvp = kinv;
ret = 1;
@@ -417,14 +417,14 @@
goto err;
}
if (BN_is_zero(s)) {
- /* if kinv and r have been supplied by the caller
- * don't to generate new kinv and r values */
+ // if kinv and r have been supplied by the caller
+ // don't to generate new kinv and r values
if (in_kinv != NULL && in_r != NULL) {
OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NEED_NEW_SETUP_VALUES);
goto err;
}
} else {
- /* s != 0 => we have a valid signature */
+ // s != 0 => we have a valid signature
break;
}
}
diff --git a/crypto/fipsmodule/ecdsa/ecdsa_test.cc b/crypto/fipsmodule/ecdsa/ecdsa_test.cc
index e1f109b..de4bc48 100644
--- a/crypto/fipsmodule/ecdsa/ecdsa_test.cc
+++ b/crypto/fipsmodule/ecdsa/ecdsa_test.cc
@@ -242,13 +242,13 @@
SCOPED_TRACE(bits);
size_t order_len = BitsToBytes(bits);
- /* Create the largest possible |ECDSA_SIG| of the given constraints. */
+ // Create the largest possible |ECDSA_SIG| of the given constraints.
bssl::UniquePtr<ECDSA_SIG> sig(ECDSA_SIG_new());
ASSERT_TRUE(sig);
std::vector<uint8_t> bytes(order_len, 0xff);
ASSERT_TRUE(BN_bin2bn(bytes.data(), bytes.size(), sig->r));
ASSERT_TRUE(BN_bin2bn(bytes.data(), bytes.size(), sig->s));
- /* Serialize it. */
+ // Serialize it.
uint8_t *der;
size_t der_len;
ASSERT_TRUE(ECDSA_SIG_to_bytes(&der, &der_len, sig.get()));
diff --git a/crypto/fipsmodule/hmac/hmac.c b/crypto/fipsmodule/hmac/hmac.c
index 3292350..5c098db 100644
--- a/crypto/fipsmodule/hmac/hmac.c
+++ b/crypto/fipsmodule/hmac/hmac.c
@@ -100,13 +100,13 @@
md = ctx->md;
}
- /* If either |key| is non-NULL or |md| has changed, initialize with a new key
- * rather than rewinding the previous one.
- *
- * TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is
- * ambiguous between using the empty key and reusing the previous key. There
- * exist callers which intend the latter, but the former is an awkward edge
- * case. Fix to API to avoid this. */
+ // If either |key| is non-NULL or |md| has changed, initialize with a new key
+ // rather than rewinding the previous one.
+ //
+ // TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is
+ // ambiguous between using the empty key and reusing the previous key. There
+ // exist callers which intend the latter, but the former is an awkward edge
+ // case. Fix to API to avoid this.
if (md != ctx->md || key != NULL) {
uint8_t pad[EVP_MAX_MD_BLOCK_SIZE];
uint8_t key_block[EVP_MAX_MD_BLOCK_SIZE];
@@ -115,7 +115,7 @@
size_t block_size = EVP_MD_block_size(md);
assert(block_size <= sizeof(key_block));
if (block_size < key_len) {
- /* Long keys are hashed. */
+ // Long keys are hashed.
if (!EVP_DigestInit_ex(&ctx->md_ctx, md, impl) ||
!EVP_DigestUpdate(&ctx->md_ctx, key, key_len) ||
!EVP_DigestFinal_ex(&ctx->md_ctx, key_block, &key_block_len)) {
@@ -126,7 +126,7 @@
OPENSSL_memcpy(key_block, key, key_len);
key_block_len = (unsigned)key_len;
}
- /* Keys are then padded with zeros. */
+ // Keys are then padded with zeros.
if (key_block_len != EVP_MAX_MD_BLOCK_SIZE) {
OPENSSL_memset(&key_block[key_block_len], 0, sizeof(key_block) - key_block_len);
}
@@ -165,8 +165,8 @@
unsigned int i;
uint8_t buf[EVP_MAX_MD_SIZE];
- /* TODO(davidben): The only thing that can officially fail here is
- * |EVP_MD_CTX_copy_ex|, but even that should be impossible in this case. */
+ // TODO(davidben): The only thing that can officially fail here is
+ // |EVP_MD_CTX_copy_ex|, but even that should be impossible in this case.
if (!EVP_DigestFinal_ex(&ctx->md_ctx, buf, &i) ||
!EVP_MD_CTX_copy_ex(&ctx->md_ctx, &ctx->o_ctx) ||
!EVP_DigestUpdate(&ctx->md_ctx, buf, i) ||
diff --git a/crypto/fipsmodule/is_fips.c b/crypto/fipsmodule/is_fips.c
index bff1a05..4182dfb 100644
--- a/crypto/fipsmodule/is_fips.c
+++ b/crypto/fipsmodule/is_fips.c
@@ -15,8 +15,8 @@
#include <openssl/crypto.h>
-/* This file exists in order to give the fipsmodule target, in non-FIPS mode,
- * something to compile. */
+// This file exists in order to give the fipsmodule target, in non-FIPS mode,
+// something to compile.
int FIPS_mode(void) {
#if defined(BORINGSSL_FIPS) && !defined(OPENSSL_ASAN)
diff --git a/crypto/fipsmodule/md4/md4.c b/crypto/fipsmodule/md4/md4.c
index 3028c8b..f0c1dcd 100644
--- a/crypto/fipsmodule/md4/md4.c
+++ b/crypto/fipsmodule/md4/md4.c
@@ -71,7 +71,7 @@
return out;
}
-/* Implemented from RFC1186 The MD4 Message-Digest Algorithm. */
+// Implemented from RFC1186 The MD4 Message-Digest Algorithm.
int MD4_Init(MD4_CTX *md4) {
OPENSSL_memset(md4, 0, sizeof(MD4_CTX));
@@ -107,9 +107,9 @@
#include "../digest/md32_common.h"
-/* As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
- * simplified to the code below. Wei attributes these optimizations
- * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. */
+// As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
+// simplified to the code below. Wei attributes these optimizations
+// to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
#define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
#define G(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
#define H(b, c, d) ((b) ^ (c) ^ (d))
@@ -148,7 +148,7 @@
X0 = l;
HOST_c2l(data, l);
X1 = l;
- /* Round 0 */
+ // Round 0
R0(A, B, C, D, X0, 3, 0);
HOST_c2l(data, l);
X2 = l;
@@ -193,7 +193,7 @@
X15 = l;
R0(C, D, A, B, X14, 11, 0);
R0(B, C, D, A, X15, 19, 0);
- /* Round 1 */
+ // Round 1
R1(A, B, C, D, X0, 3, 0x5A827999L);
R1(D, A, B, C, X4, 5, 0x5A827999L);
R1(C, D, A, B, X8, 9, 0x5A827999L);
@@ -210,7 +210,7 @@
R1(D, A, B, C, X7, 5, 0x5A827999L);
R1(C, D, A, B, X11, 9, 0x5A827999L);
R1(B, C, D, A, X15, 13, 0x5A827999L);
- /* Round 2 */
+ // Round 2
R2(A, B, C, D, X0, 3, 0x6ED9EBA1L);
R2(D, A, B, C, X8, 9, 0x6ED9EBA1L);
R2(C, D, A, B, X4, 11, 0x6ED9EBA1L);
diff --git a/crypto/fipsmodule/md5/md5.c b/crypto/fipsmodule/md5/md5.c
index 15a0f53..32429da 100644
--- a/crypto/fipsmodule/md5/md5.c
+++ b/crypto/fipsmodule/md5/md5.c
@@ -113,10 +113,9 @@
#include "../digest/md32_common.h"
-/* As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
- * simplified to the code below. Wei attributes these optimizations
- * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
- */
+// As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
+// simplified to the code below. Wei attributes these optimizations
+// to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
#define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
#define G(b, c, d) ((((b) ^ (c)) & (d)) ^ (c))
#define H(b, c, d) ((b) ^ (c) ^ (d))
@@ -172,7 +171,7 @@
X(0) = l;
HOST_c2l(data, l);
X(1) = l;
- /* Round 0 */
+ // Round 0
R0(A, B, C, D, X(0), 7, 0xd76aa478L);
HOST_c2l(data, l);
X(2) = l;
@@ -217,7 +216,7 @@
X(15) = l;
R0(C, D, A, B, X(14), 17, 0xa679438eL);
R0(B, C, D, A, X(15), 22, 0x49b40821L);
- /* Round 1 */
+ // Round 1
R1(A, B, C, D, X(1), 5, 0xf61e2562L);
R1(D, A, B, C, X(6), 9, 0xc040b340L);
R1(C, D, A, B, X(11), 14, 0x265e5a51L);
@@ -234,7 +233,7 @@
R1(D, A, B, C, X(2), 9, 0xfcefa3f8L);
R1(C, D, A, B, X(7), 14, 0x676f02d9L);
R1(B, C, D, A, X(12), 20, 0x8d2a4c8aL);
- /* Round 2 */
+ // Round 2
R2(A, B, C, D, X(5), 4, 0xfffa3942L);
R2(D, A, B, C, X(8), 11, 0x8771f681L);
R2(C, D, A, B, X(11), 16, 0x6d9d6122L);
@@ -251,7 +250,7 @@
R2(D, A, B, C, X(12), 11, 0xe6db99e5L);
R2(C, D, A, B, X(15), 16, 0x1fa27cf8L);
R2(B, C, D, A, X(2), 23, 0xc4ac5665L);
- /* Round 3 */
+ // Round 3
R3(A, B, C, D, X(0), 6, 0xf4292244L);
R3(D, A, B, C, X(7), 10, 0x432aff97L);
R3(C, D, A, B, X(14), 15, 0xab9423a7L);
diff --git a/crypto/fipsmodule/modes/cbc.c b/crypto/fipsmodule/modes/cbc.c
index 12d551c..4b3bdb8 100644
--- a/crypto/fipsmodule/modes/cbc.c
+++ b/crypto/fipsmodule/modes/cbc.c
@@ -120,12 +120,12 @@
const uintptr_t inptr = (uintptr_t) in;
const uintptr_t outptr = (uintptr_t) out;
- /* If |in| and |out| alias, |in| must be ahead. */
+ // If |in| and |out| alias, |in| must be ahead.
assert(inptr >= outptr || inptr + len <= outptr);
if ((inptr >= 32 && outptr <= inptr - 32) || inptr < outptr) {
- /* If |out| is at least two blocks behind |in| or completely disjoint, there
- * is no need to decrypt to a temporary block. */
+ // If |out| is at least two blocks behind |in| or completely disjoint, there
+ // is no need to decrypt to a temporary block.
const uint8_t *iv = ivec;
if (STRICT_ALIGNMENT &&
@@ -140,7 +140,7 @@
in += 16;
out += 16;
}
- } else if (16 % sizeof(size_t) == 0) { /* always true */
+ } else if (16 % sizeof(size_t) == 0) { // always true
while (len >= 16) {
size_t *out_t = (size_t *)out, *iv_t = (size_t *)iv;
@@ -156,9 +156,9 @@
}
OPENSSL_memcpy(ivec, iv, 16);
} else {
- /* |out| is less than two blocks behind |in|. Decrypting an input block
- * directly to |out| would overwrite a ciphertext block before it is used as
- * the next block's IV. Decrypt to a temporary block instead. */
+ // |out| is less than two blocks behind |in|. Decrypting an input block
+ // directly to |out| would overwrite a ciphertext block before it is used as
+ // the next block's IV. Decrypt to a temporary block instead.
if (STRICT_ALIGNMENT &&
((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
uint8_t c;
@@ -173,7 +173,7 @@
in += 16;
out += 16;
}
- } else if (16 % sizeof(size_t) == 0) { /* always true */
+ } else if (16 % sizeof(size_t) == 0) { // always true
while (len >= 16) {
size_t c, *out_t = (size_t *)out, *ivec_t = (size_t *)ivec;
const size_t *in_t = (const size_t *)in;
diff --git a/crypto/fipsmodule/modes/cfb.c b/crypto/fipsmodule/modes/cfb.c
index 836eb3f..2775d19 100644
--- a/crypto/fipsmodule/modes/cfb.c
+++ b/crypto/fipsmodule/modes/cfb.c
@@ -166,23 +166,23 @@
return;
}
- /* fill in the first half of the new IV with the current IV */
+ // fill in the first half of the new IV with the current IV
OPENSSL_memcpy(ovec, ivec, 16);
- /* construct the new IV */
+ // construct the new IV
(*block)(ivec, ivec, key);
num = (nbits + 7) / 8;
if (enc) {
- /* encrypt the input */
+ // encrypt the input
for (n = 0; n < num; ++n) {
out[n] = (ovec[16 + n] = in[n] ^ ivec[n]);
}
} else {
- /* decrypt the input */
+ // decrypt the input
for (n = 0; n < num; ++n) {
out[n] = (ovec[16 + n] = in[n]) ^ ivec[n];
}
}
- /* shift ovec left... */
+ // shift ovec left...
rem = nbits % 8;
num = nbits / 8;
if (rem == 0) {
@@ -193,10 +193,10 @@
}
}
- /* it is not necessary to cleanse ovec, since the IV is not secret */
+ // it is not necessary to cleanse ovec, since the IV is not secret
}
-/* N.B. This expects the input to be packed, MS bit first */
+// N.B. This expects the input to be packed, MS bit first
void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits,
const void *key, uint8_t ivec[16], unsigned *num,
int enc, block128_f block) {
diff --git a/crypto/fipsmodule/modes/ctr.c b/crypto/fipsmodule/modes/ctr.c
index a191f39..5a97cf6 100644
--- a/crypto/fipsmodule/modes/ctr.c
+++ b/crypto/fipsmodule/modes/ctr.c
@@ -54,10 +54,10 @@
#include "internal.h"
-/* NOTE: the IV/counter CTR mode is big-endian. The code itself
- * is endian-neutral. */
+// NOTE: the IV/counter CTR mode is big-endian. The code itself
+// is endian-neutral.
-/* increment counter (128-bit int) by 1 */
+// increment counter (128-bit int) by 1
static void ctr128_inc(uint8_t *counter) {
uint32_t n = 16, c = 1;
@@ -71,16 +71,16 @@
OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size_ctr);
-/* The input encrypted as though 128bit counter mode is being used. The extra
- * state information to record how much of the 128bit block we have used is
- * contained in *num, and the encrypted counter is kept in ecount_buf. Both
- * *num and ecount_buf must be initialised with zeros before the first call to
- * CRYPTO_ctr128_encrypt().
- *
- * This algorithm assumes that the counter is in the x lower bits of the IV
- * (ivec), and that the application has full control over overflow and the rest
- * of the IV. This implementation takes NO responsibility for checking that
- * the counter doesn't overflow into the rest of the IV when incremented. */
+// The input encrypted as though 128bit counter mode is being used. The extra
+// state information to record how much of the 128bit block we have used is
+// contained in *num, and the encrypted counter is kept in ecount_buf. Both
+// *num and ecount_buf must be initialised with zeros before the first call to
+// CRYPTO_ctr128_encrypt().
+//
+// This algorithm assumes that the counter is in the x lower bits of the IV
+// (ivec), and that the application has full control over overflow and the rest
+// of the IV. This implementation takes NO responsibility for checking that
+// the counter doesn't overflow into the rest of the IV when incremented.
void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16],
uint8_t ecount_buf[16], unsigned int *num,
@@ -140,7 +140,7 @@
*num = n;
}
-/* increment upper 96 bits of 128-bit counter by 1 */
+// increment upper 96 bits of 128-bit counter by 1
static void ctr96_inc(uint8_t *counter) {
uint32_t n = 12, c = 1;
@@ -174,25 +174,25 @@
ctr32 = GETU32(ivec + 12);
while (len >= 16) {
size_t blocks = len / 16;
- /* 1<<28 is just a not-so-small yet not-so-large number...
- * Below condition is practically never met, but it has to
- * be checked for code correctness. */
+ // 1<<28 is just a not-so-small yet not-so-large number...
+ // Below condition is practically never met, but it has to
+ // be checked for code correctness.
if (sizeof(size_t) > sizeof(unsigned int) && blocks > (1U << 28)) {
blocks = (1U << 28);
}
- /* As (*func) operates on 32-bit counter, caller
- * has to handle overflow. 'if' below detects the
- * overflow, which is then handled by limiting the
- * amount of blocks to the exact overflow point... */
+ // As (*func) operates on 32-bit counter, caller
+ // has to handle overflow. 'if' below detects the
+ // overflow, which is then handled by limiting the
+ // amount of blocks to the exact overflow point...
ctr32 += (uint32_t)blocks;
if (ctr32 < blocks) {
blocks -= ctr32;
ctr32 = 0;
}
(*func)(in, out, blocks, key, ivec);
- /* (*func) does not update ivec, caller does: */
+ // (*func) does not update ivec, caller does:
PUTU32(ivec + 12, ctr32);
- /* ... overflow was detected, propogate carry. */
+ // ... overflow was detected, propogate carry.
if (ctr32 == 0) {
ctr96_inc(ivec);
}
diff --git a/crypto/fipsmodule/modes/gcm.c b/crypto/fipsmodule/modes/gcm.c
index 47b093f..bb5be54 100644
--- a/crypto/fipsmodule/modes/gcm.c
+++ b/crypto/fipsmodule/modes/gcm.c
@@ -177,11 +177,11 @@
Xi[1] = CRYPTO_bswap8(Z.lo);
}
-/* Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
- * details... Compiler-generated code doesn't seem to give any
- * performance improvement, at least not on x86[_64]. It's here
- * mostly as reference and a placeholder for possible future
- * non-trivial optimization[s]... */
+// Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
+// details... Compiler-generated code doesn't seem to give any
+// performance improvement, at least not on x86[_64]. It's here
+// mostly as reference and a placeholder for possible future
+// non-trivial optimization[s]...
static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16],
const uint8_t *inp, size_t len) {
u128 Z;
@@ -237,7 +237,7 @@
Xi[1] = CRYPTO_bswap8(Z.lo);
} while (inp += 16, len -= 16);
}
-#else /* GHASH_ASM */
+#else // GHASH_ASM
void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]);
void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len);
@@ -246,9 +246,9 @@
#define GCM_MUL(ctx, Xi) gcm_gmult_4bit((ctx)->Xi.u, (ctx)->Htable)
#if defined(GHASH_ASM)
#define GHASH(ctx, in, len) gcm_ghash_4bit((ctx)->Xi.u, (ctx)->Htable, in, len)
-/* GHASH_CHUNK is "stride parameter" missioned to mitigate cache
- * trashing effect. In other words idea is to hash data while it's
- * still in L1 cache after encryption pass... */
+// GHASH_CHUNK is "stride parameter" missioned to mitigate cache
+// trashing effect. In other words idea is to hash data while it's
+// still in L1 cache after encryption pass...
#define GHASH_CHUNK (3 * 1024)
#endif
@@ -298,7 +298,7 @@
size_t len);
#if defined(OPENSSL_ARM)
-/* 32-bit ARM also has support for doing GCM with NEON instructions. */
+// 32-bit ARM also has support for doing GCM with NEON instructions.
static int neon_capable(void) {
return CRYPTO_is_NEON_capable();
}
@@ -308,7 +308,7 @@
void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len);
#else
-/* AArch64 only has the ARMv8 versions of functions. */
+// AArch64 only has the ARMv8 versions of functions.
static int neon_capable(void) {
return 0;
}
@@ -357,7 +357,7 @@
OPENSSL_memcpy(H.c, gcm_key, 16);
- /* H is stored in host byte order */
+ // H is stored in host byte order
H.u[0] = CRYPTO_bswap8(H.u[0]);
H.u[1] = CRYPTO_bswap8(H.u[1]);
@@ -365,7 +365,7 @@
#if defined(GHASH_ASM_X86_64)
if (crypto_gcm_clmul_enabled()) {
- if (((OPENSSL_ia32cap_get()[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
+ if (((OPENSSL_ia32cap_get()[1] >> 22) & 0x41) == 0x41) { // AVX+MOVBE
gcm_init_avx(out_table, H.u);
*out_mult = gcm_gmult_avx;
*out_hash = gcm_ghash_avx;
@@ -444,8 +444,8 @@
ctx->Yi.u[1] = 0;
ctx->Xi.u[0] = 0;
ctx->Xi.u[1] = 0;
- ctx->len.u[0] = 0; /* AAD length */
- ctx->len.u[1] = 0; /* message length */
+ ctx->len.u[0] = 0; // AAD length
+ ctx->len.u[1] = 0; // message length
ctx->ares = 0;
ctx->mres = 0;
@@ -518,7 +518,7 @@
}
}
- /* Process a whole number of blocks. */
+ // Process a whole number of blocks.
#ifdef GHASH
size_t len_blocks = len & kSizeTWithoutLower4Bits;
if (len_blocks != 0) {
@@ -537,7 +537,7 @@
}
#endif
- /* Process the remainder. */
+ // Process the remainder.
if (len != 0) {
n = (unsigned int)len;
for (size_t i = 0; i < len; ++i) {
@@ -571,7 +571,7 @@
ctx->len.u[1] = mlen;
if (ctx->ares) {
- /* First call to encrypt finalizes GHASH(AAD) */
+ // First call to encrypt finalizes GHASH(AAD)
GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
@@ -701,7 +701,7 @@
ctx->len.u[1] = mlen;
if (ctx->ares) {
- /* First call to decrypt finalizes GHASH(AAD) */
+ // First call to decrypt finalizes GHASH(AAD)
GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
@@ -839,7 +839,7 @@
ctx->len.u[1] = mlen;
if (ctx->ares) {
- /* First call to encrypt finalizes GHASH(AAD) */
+ // First call to encrypt finalizes GHASH(AAD)
GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
@@ -861,8 +861,8 @@
#if defined(AESNI_GCM)
if (ctx->use_aesni_gcm_crypt) {
- /* |aesni_gcm_encrypt| may not process all the input given to it. It may
- * not process *any* of its input if it is deemed too small. */
+ // |aesni_gcm_encrypt| may not process all the input given to it. It may
+ // not process *any* of its input if it is deemed too small.
size_t bulk = aesni_gcm_encrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
in += bulk;
out += bulk;
@@ -940,7 +940,7 @@
ctx->len.u[1] = mlen;
if (ctx->ares) {
- /* First call to decrypt finalizes GHASH(AAD) */
+ // First call to decrypt finalizes GHASH(AAD)
GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
@@ -964,8 +964,8 @@
#if defined(AESNI_GCM)
if (ctx->use_aesni_gcm_crypt) {
- /* |aesni_gcm_decrypt| may not process all the input given to it. It may
- * not process *any* of its input if it is deemed too small. */
+ // |aesni_gcm_decrypt| may not process all the input given to it. It may
+ // not process *any* of its input if it is deemed too small.
size_t bulk = aesni_gcm_decrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
in += bulk;
out += bulk;
@@ -1065,8 +1065,8 @@
int crypto_gcm_clmul_enabled(void) {
#ifdef GHASH_ASM
const uint32_t *ia32cap = OPENSSL_ia32cap_get();
- return (ia32cap[0] & (1 << 24)) && /* check FXSR bit */
- (ia32cap[1] & (1 << 1)); /* check PCLMULQDQ bit */
+ return (ia32cap[0] & (1 << 24)) && // check FXSR bit
+ (ia32cap[1] & (1 << 1)); // check PCLMULQDQ bit
#else
return 0;
#endif
diff --git a/crypto/fipsmodule/modes/gcm_test.cc b/crypto/fipsmodule/modes/gcm_test.cc
index bfd4275..5988945 100644
--- a/crypto/fipsmodule/modes/gcm_test.cc
+++ b/crypto/fipsmodule/modes/gcm_test.cc
@@ -46,9 +46,9 @@
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ==================================================================== */
-/* Per C99, various stdint.h and inttypes.h macros (the latter used by
- * internal.h) are unavailable in C++ unless some macros are defined. C++11
- * overruled this decision, but older Android NDKs still require it. */
+// Per C99, various stdint.h and inttypes.h macros (the latter used by
+// internal.h) are unavailable in C++ unless some macros are defined. C++11
+// overruled this decision, but older Android NDKs still require it.
#if !defined(__STDC_CONSTANT_MACROS)
#define __STDC_CONSTANT_MACROS
#endif
diff --git a/crypto/fipsmodule/modes/internal.h b/crypto/fipsmodule/modes/internal.h
index 227f704..6a5ff99 100644
--- a/crypto/fipsmodule/modes/internal.h
+++ b/crypto/fipsmodule/modes/internal.h
@@ -109,28 +109,28 @@
OPENSSL_memcpy(out, &v, sizeof(v));
}
-/* block128_f is the type of a 128-bit, block cipher. */
+// block128_f is the type of a 128-bit, block cipher.
typedef void (*block128_f)(const uint8_t in[16], uint8_t out[16],
const void *key);
-/* GCM definitions */
+// GCM definitions
typedef struct { uint64_t hi,lo; } u128;
-/* gmult_func multiplies |Xi| by the GCM key and writes the result back to
- * |Xi|. */
+// gmult_func multiplies |Xi| by the GCM key and writes the result back to
+// |Xi|.
typedef void (*gmult_func)(uint64_t Xi[2], const u128 Htable[16]);
-/* ghash_func repeatedly multiplies |Xi| by the GCM key and adds in blocks from
- * |inp|. The result is written back to |Xi| and the |len| argument must be a
- * multiple of 16. */
+// ghash_func repeatedly multiplies |Xi| by the GCM key and adds in blocks from
+// |inp|. The result is written back to |Xi| and the |len| argument must be a
+// multiple of 16.
typedef void (*ghash_func)(uint64_t Xi[2], const u128 Htable[16],
const uint8_t *inp, size_t len);
-/* This differs from upstream's |gcm128_context| in that it does not have the
- * |key| pointer, in order to make it |memcpy|-friendly. Rather the key is
- * passed into each call that needs it. */
+// This differs from upstream's |gcm128_context| in that it does not have the
+// |key| pointer, in order to make it |memcpy|-friendly. Rather the key is
+// passed into each call that needs it.
struct gcm128_context {
- /* Following 6 names follow names in GCM specification */
+ // Following 6 names follow names in GCM specification
union {
uint64_t u[2];
uint32_t d[4];
@@ -138,8 +138,8 @@
size_t t[16 / sizeof(size_t)];
} Yi, EKi, EK0, len, Xi;
- /* Note that the order of |Xi|, |H| and |Htable| is fixed by the MOVBE-based,
- * x86-64, GHASH assembly. */
+ // Note that the order of |Xi|, |H| and |Htable| is fixed by the MOVBE-based,
+ // x86-64, GHASH assembly.
u128 H;
u128 Htable[16];
gmult_func gmult;
@@ -148,39 +148,39 @@
unsigned int mres, ares;
block128_f block;
- /* use_aesni_gcm_crypt is true if this context should use the assembly
- * functions |aesni_gcm_encrypt| and |aesni_gcm_decrypt| to process data. */
+ // use_aesni_gcm_crypt is true if this context should use the assembly
+ // functions |aesni_gcm_encrypt| and |aesni_gcm_decrypt| to process data.
unsigned use_aesni_gcm_crypt:1;
};
#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
-/* crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is
- * used. */
+// crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is
+// used.
int crypto_gcm_clmul_enabled(void);
#endif
-/* CTR. */
+// CTR.
-/* ctr128_f is the type of a function that performs CTR-mode encryption. */
+// ctr128_f is the type of a function that performs CTR-mode encryption.
typedef void (*ctr128_f)(const uint8_t *in, uint8_t *out, size_t blocks,
const void *key, const uint8_t ivec[16]);
-/* CRYPTO_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode)
- * |len| bytes from |in| to |out| using |block| in counter mode. There's no
- * requirement that |len| be a multiple of any value and any partial blocks are
- * stored in |ecount_buf| and |*num|, which must be zeroed before the initial
- * call. The counter is a 128-bit, big-endian value in |ivec| and is
- * incremented by this function. */
+// CRYPTO_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode)
+// |len| bytes from |in| to |out| using |block| in counter mode. There's no
+// requirement that |len| be a multiple of any value and any partial blocks are
+// stored in |ecount_buf| and |*num|, which must be zeroed before the initial
+// call. The counter is a 128-bit, big-endian value in |ivec| and is
+// incremented by this function.
void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16],
uint8_t ecount_buf[16], unsigned *num,
block128_f block);
-/* CRYPTO_ctr128_encrypt_ctr32 acts like |CRYPTO_ctr128_encrypt| but takes
- * |ctr|, a function that performs CTR mode but only deals with the lower 32
- * bits of the counter. This is useful when |ctr| can be an optimised
- * function. */
+// CRYPTO_ctr128_encrypt_ctr32 acts like |CRYPTO_ctr128_encrypt| but takes
+// |ctr|, a function that performs CTR mode but only deals with the lower 32
+// bits of the counter. This is useful when |ctr| can be an optimised
+// function.
void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16],
uint8_t ecount_buf[16], unsigned *num,
@@ -193,137 +193,137 @@
#endif
-/* GCM.
- *
- * This API differs from the upstream API slightly. The |GCM128_CONTEXT| does
- * not have a |key| pointer that points to the key as upstream's version does.
- * Instead, every function takes a |key| parameter. This way |GCM128_CONTEXT|
- * can be safely copied. */
+// GCM.
+//
+// This API differs from the upstream API slightly. The |GCM128_CONTEXT| does
+// not have a |key| pointer that points to the key as upstream's version does.
+// Instead, every function takes a |key| parameter. This way |GCM128_CONTEXT|
+// can be safely copied.
typedef struct gcm128_context GCM128_CONTEXT;
-/* CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to
- * |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware
- * accelerated) functions for performing operations in the GHASH field. If the
- * AVX implementation was used |*out_is_avx| will be true. */
+// CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to
+// |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware
+// accelerated) functions for performing operations in the GHASH field. If the
+// AVX implementation was used |*out_is_avx| will be true.
void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash,
u128 *out_key, u128 out_table[16], int *out_is_avx,
const uint8_t *gcm_key);
-/* CRYPTO_gcm128_init initialises |ctx| to use |block| (typically AES) with
- * the given key. |is_aesni_encrypt| is one if |block| is |aesni_encrypt|. */
+// CRYPTO_gcm128_init initialises |ctx| to use |block| (typically AES) with
+// the given key. |is_aesni_encrypt| is one if |block| is |aesni_encrypt|.
OPENSSL_EXPORT void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *key,
block128_f block, int is_aesni_encrypt);
-/* CRYPTO_gcm128_setiv sets the IV (nonce) for |ctx|. The |key| must be the
- * same key that was passed to |CRYPTO_gcm128_init|. */
+// CRYPTO_gcm128_setiv sets the IV (nonce) for |ctx|. The |key| must be the
+// same key that was passed to |CRYPTO_gcm128_init|.
OPENSSL_EXPORT void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key,
const uint8_t *iv, size_t iv_len);
-/* CRYPTO_gcm128_aad sets the authenticated data for an instance of GCM.
- * This must be called before and data is encrypted. It returns one on success
- * and zero otherwise. */
+// CRYPTO_gcm128_aad sets the authenticated data for an instance of GCM.
+// This must be called before and data is encrypted. It returns one on success
+// and zero otherwise.
OPENSSL_EXPORT int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad,
size_t len);
-/* CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. The |key|
- * must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one
- * on success and zero otherwise. */
+// CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. The |key|
+// must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one
+// on success and zero otherwise.
OPENSSL_EXPORT int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key,
const uint8_t *in, uint8_t *out,
size_t len);
-/* CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. The |key|
- * must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one
- * on success and zero otherwise. */
+// CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. The |key|
+// must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one
+// on success and zero otherwise.
OPENSSL_EXPORT int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key,
const uint8_t *in, uint8_t *out,
size_t len);
-/* CRYPTO_gcm128_encrypt_ctr32 encrypts |len| bytes from |in| to |out| using
- * a CTR function that only handles the bottom 32 bits of the nonce, like
- * |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was
- * passed to |CRYPTO_gcm128_init|. It returns one on success and zero
- * otherwise. */
+// CRYPTO_gcm128_encrypt_ctr32 encrypts |len| bytes from |in| to |out| using
+// a CTR function that only handles the bottom 32 bits of the nonce, like
+// |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was
+// passed to |CRYPTO_gcm128_init|. It returns one on success and zero
+// otherwise.
OPENSSL_EXPORT int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
const void *key,
const uint8_t *in, uint8_t *out,
size_t len, ctr128_f stream);
-/* CRYPTO_gcm128_decrypt_ctr32 decrypts |len| bytes from |in| to |out| using
- * a CTR function that only handles the bottom 32 bits of the nonce, like
- * |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was
- * passed to |CRYPTO_gcm128_init|. It returns one on success and zero
- * otherwise. */
+// CRYPTO_gcm128_decrypt_ctr32 decrypts |len| bytes from |in| to |out| using
+// a CTR function that only handles the bottom 32 bits of the nonce, like
+// |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was
+// passed to |CRYPTO_gcm128_init|. It returns one on success and zero
+// otherwise.
OPENSSL_EXPORT int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
const void *key,
const uint8_t *in, uint8_t *out,
size_t len, ctr128_f stream);
-/* CRYPTO_gcm128_finish calculates the authenticator and compares it against
- * |len| bytes of |tag|. It returns one on success and zero otherwise. */
+// CRYPTO_gcm128_finish calculates the authenticator and compares it against
+// |len| bytes of |tag|. It returns one on success and zero otherwise.
OPENSSL_EXPORT int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag,
size_t len);
-/* CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|.
- * The minimum of |len| and 16 bytes are copied into |tag|. */
+// CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|.
+// The minimum of |len| and 16 bytes are copied into |tag|.
OPENSSL_EXPORT void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, uint8_t *tag,
size_t len);
-/* CBC. */
+// CBC.
-/* cbc128_f is the type of a function that performs CBC-mode encryption. */
+// cbc128_f is the type of a function that performs CBC-mode encryption.
typedef void (*cbc128_f)(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16], int enc);
-/* CRYPTO_cbc128_encrypt encrypts |len| bytes from |in| to |out| using the
- * given IV and block cipher in CBC mode. The input need not be a multiple of
- * 128 bits long, but the output will round up to the nearest 128 bit multiple,
- * zero padding the input if needed. The IV will be updated on return. */
+// CRYPTO_cbc128_encrypt encrypts |len| bytes from |in| to |out| using the
+// given IV and block cipher in CBC mode. The input need not be a multiple of
+// 128 bits long, but the output will round up to the nearest 128 bit multiple,
+// zero padding the input if needed. The IV will be updated on return.
void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16], block128_f block);
-/* CRYPTO_cbc128_decrypt decrypts |len| bytes from |in| to |out| using the
- * given IV and block cipher in CBC mode. If |len| is not a multiple of 128
- * bits then only that many bytes will be written, but a multiple of 128 bits
- * is always read from |in|. The IV will be updated on return. */
+// CRYPTO_cbc128_decrypt decrypts |len| bytes from |in| to |out| using the
+// given IV and block cipher in CBC mode. If |len| is not a multiple of 128
+// bits then only that many bytes will be written, but a multiple of 128 bits
+// is always read from |in|. The IV will be updated on return.
void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16], block128_f block);
-/* OFB. */
+// OFB.
-/* CRYPTO_ofb128_encrypt encrypts (or decrypts, it's the same with OFB mode)
- * |len| bytes from |in| to |out| using |block| in OFB mode. There's no
- * requirement that |len| be a multiple of any value and any partial blocks are
- * stored in |ivec| and |*num|, the latter must be zero before the initial
- * call. */
+// CRYPTO_ofb128_encrypt encrypts (or decrypts, it's the same with OFB mode)
+// |len| bytes from |in| to |out| using |block| in OFB mode. There's no
+// requirement that |len| be a multiple of any value and any partial blocks are
+// stored in |ivec| and |*num|, the latter must be zero before the initial
+// call.
void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out,
size_t len, const void *key, uint8_t ivec[16],
unsigned *num, block128_f block);
-/* CFB. */
+// CFB.
-/* CRYPTO_cfb128_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
- * from |in| to |out| using |block| in CFB mode. There's no requirement that
- * |len| be a multiple of any value and any partial blocks are stored in |ivec|
- * and |*num|, the latter must be zero before the initial call. */
+// CRYPTO_cfb128_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
+// from |in| to |out| using |block| in CFB mode. There's no requirement that
+// |len| be a multiple of any value and any partial blocks are stored in |ivec|
+// and |*num|, the latter must be zero before the initial call.
void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16], unsigned *num,
int enc, block128_f block);
-/* CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
- * from |in| to |out| using |block| in CFB-8 mode. Prior to the first call
- * |num| should be set to zero. */
+// CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
+// from |in| to |out| using |block| in CFB-8 mode. Prior to the first call
+// |num| should be set to zero.
void CRYPTO_cfb128_8_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const void *key, uint8_t ivec[16], unsigned *num,
int enc, block128_f block);
-/* CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
- * from |in| to |out| using |block| in CFB-1 mode. Prior to the first call
- * |num| should be set to zero. */
+// CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes
+// from |in| to |out| using |block| in CFB-1 mode. Prior to the first call
+// |num| should be set to zero.
void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits,
const void *key, uint8_t ivec[16], unsigned *num,
int enc, block128_f block);
@@ -333,11 +333,11 @@
block128_f block);
-/* POLYVAL.
- *
- * POLYVAL is a polynomial authenticator that operates over a field very
- * similar to the one that GHASH uses. See
- * https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#section-3. */
+// POLYVAL.
+//
+// POLYVAL is a polynomial authenticator that operates over a field very
+// similar to the one that GHASH uses. See
+// https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#section-3.
typedef union {
uint64_t u[2];
@@ -345,8 +345,8 @@
} polyval_block;
struct polyval_ctx {
- /* Note that the order of |S|, |H| and |Htable| is fixed by the MOVBE-based,
- * x86-64, GHASH assembly. */
+ // Note that the order of |S|, |H| and |Htable| is fixed by the MOVBE-based,
+ // x86-64, GHASH assembly.
polyval_block S;
u128 H;
u128 Htable[16];
@@ -354,21 +354,21 @@
ghash_func ghash;
};
-/* CRYPTO_POLYVAL_init initialises |ctx| using |key|. */
+// CRYPTO_POLYVAL_init initialises |ctx| using |key|.
void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]);
-/* CRYPTO_POLYVAL_update_blocks updates the accumulator in |ctx| given the
- * blocks from |in|. Only a whole number of blocks can be processed so |in_len|
- * must be a multiple of 16. */
+// CRYPTO_POLYVAL_update_blocks updates the accumulator in |ctx| given the
+// blocks from |in|. Only a whole number of blocks can be processed so |in_len|
+// must be a multiple of 16.
void CRYPTO_POLYVAL_update_blocks(struct polyval_ctx *ctx, const uint8_t *in,
size_t in_len);
-/* CRYPTO_POLYVAL_finish writes the accumulator from |ctx| to |out|. */
+// CRYPTO_POLYVAL_finish writes the accumulator from |ctx| to |out|.
void CRYPTO_POLYVAL_finish(const struct polyval_ctx *ctx, uint8_t out[16]);
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_MODES_INTERNAL_H */
+#endif // OPENSSL_HEADER_MODES_INTERNAL_H
diff --git a/crypto/fipsmodule/modes/polyval.c b/crypto/fipsmodule/modes/polyval.c
index 392e2d8..857dc0e 100644
--- a/crypto/fipsmodule/modes/polyval.c
+++ b/crypto/fipsmodule/modes/polyval.c
@@ -21,16 +21,16 @@
#include "../../internal.h"
-/* byte_reverse reverses the order of the bytes in |b->c|. */
+// byte_reverse reverses the order of the bytes in |b->c|.
static void byte_reverse(polyval_block *b) {
const uint64_t t = CRYPTO_bswap8(b->u[0]);
b->u[0] = CRYPTO_bswap8(b->u[1]);
b->u[1] = t;
}
-/* reverse_and_mulX_ghash interprets the bytes |b->c| as a reversed element of
- * the GHASH field, multiplies that by 'x' and serialises the result back into
- * |b|, but with GHASH's backwards bit ordering. */
+// reverse_and_mulX_ghash interprets the bytes |b->c| as a reversed element of
+// the GHASH field, multiplies that by 'x' and serialises the result back into
+// |b|, but with GHASH's backwards bit ordering.
static void reverse_and_mulX_ghash(polyval_block *b) {
uint64_t hi = b->u[0];
uint64_t lo = b->u[1];
@@ -44,11 +44,11 @@
b->u[1] = CRYPTO_bswap8(hi);
}
-/* POLYVAL(H, X_1, ..., X_n) =
- * ByteReverse(GHASH(mulX_GHASH(ByteReverse(H)), ByteReverse(X_1), ...,
- * ByteReverse(X_n))).
- *
- * See https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#appendix-A. */
+// POLYVAL(H, X_1, ..., X_n) =
+// ByteReverse(GHASH(mulX_GHASH(ByteReverse(H)), ByteReverse(X_1), ...,
+// ByteReverse(X_n))).
+//
+// See https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#appendix-A.
void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]) {
polyval_block H;
diff --git a/crypto/fipsmodule/rand/ctrdrbg.c b/crypto/fipsmodule/rand/ctrdrbg.c
index 2b22f5d..9f8be66 100644
--- a/crypto/fipsmodule/rand/ctrdrbg.c
+++ b/crypto/fipsmodule/rand/ctrdrbg.c
@@ -21,16 +21,16 @@
#include "../cipher/internal.h"
-/* Section references in this file refer to SP 800-90Ar1:
- * http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf */
+// Section references in this file refer to SP 800-90Ar1:
+// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf
-/* See table 3. */
+// See table 3.
static const uint64_t kMaxReseedCount = UINT64_C(1) << 48;
int CTR_DRBG_init(CTR_DRBG_STATE *drbg,
const uint8_t entropy[CTR_DRBG_ENTROPY_LEN],
const uint8_t *personalization, size_t personalization_len) {
- /* Section 10.2.1.3.1 */
+ // Section 10.2.1.3.1
if (personalization_len > CTR_DRBG_ENTROPY_LEN) {
return 0;
}
@@ -42,10 +42,10 @@
seed_material[i] ^= personalization[i];
}
- /* Section 10.2.1.2 */
+ // Section 10.2.1.2
- /* kInitMask is the result of encrypting blocks with big-endian value 1, 2
- * and 3 with the all-zero AES-256 key. */
+ // kInitMask is the result of encrypting blocks with big-endian value 1, 2
+ // and 3 with the all-zero AES-256 key.
static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = {
0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1,
0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e,
@@ -67,8 +67,8 @@
OPENSSL_COMPILE_ASSERT(CTR_DRBG_ENTROPY_LEN % AES_BLOCK_SIZE == 0,
not_a_multiple_of_block_size);
-/* ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a
- * big-endian number. */
+// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a
+// big-endian number.
static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) {
drbg->counter.words[3] =
CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n);
@@ -76,9 +76,9 @@
static int CTR_DRBG_update(CTR_DRBG_STATE *drbg, const uint8_t *data,
size_t data_len) {
- /* Section 10.2.1.2. A value of |data_len| which less than
- * |CTR_DRBG_ENTROPY_LEN| is permitted and acts the same as right-padding
- * with zeros. This can save a copy. */
+ // Section 10.2.1.2. A value of |data_len| which less than
+ // |CTR_DRBG_ENTROPY_LEN| is permitted and acts the same as right-padding
+ // with zeros. This can save a copy.
if (data_len > CTR_DRBG_ENTROPY_LEN) {
return 0;
}
@@ -103,7 +103,7 @@
const uint8_t entropy[CTR_DRBG_ENTROPY_LEN],
const uint8_t *additional_data,
size_t additional_data_len) {
- /* Section 10.2.1.4 */
+ // Section 10.2.1.4
uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN];
if (additional_data_len > 0) {
@@ -131,12 +131,12 @@
int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len,
const uint8_t *additional_data,
size_t additional_data_len) {
- /* See 9.3.1 */
+ // See 9.3.1
if (out_len > CTR_DRBG_MAX_GENERATE_LENGTH) {
return 0;
}
- /* See 10.2.1.5.1 */
+ // See 10.2.1.5.1
if (drbg->reseed_counter > kMaxReseedCount) {
return 0;
}
@@ -146,12 +146,12 @@
return 0;
}
- /* kChunkSize is used to interact better with the cache. Since the AES-CTR
- * code assumes that it's encrypting rather than just writing keystream, the
- * buffer has to be zeroed first. Without chunking, large reads would zero
- * the whole buffer, flushing the L1 cache, and then do another pass (missing
- * the cache every time) to “encrypt” it. The code can avoid this by
- * chunking. */
+ // kChunkSize is used to interact better with the cache. Since the AES-CTR
+ // code assumes that it's encrypting rather than just writing keystream, the
+ // buffer has to be zeroed first. Without chunking, large reads would zero
+ // the whole buffer, flushing the L1 cache, and then do another pass (missing
+ // the cache every time) to “encrypt” it. The code can avoid this by
+ // chunking.
static const size_t kChunkSize = 8 * 1024;
while (out_len >= AES_BLOCK_SIZE) {
diff --git a/crypto/fipsmodule/rand/internal.h b/crypto/fipsmodule/rand/internal.h
index f569c38..c0812ee 100644
--- a/crypto/fipsmodule/rand/internal.h
+++ b/crypto/fipsmodule/rand/internal.h
@@ -25,21 +25,21 @@
#endif
-/* RAND_bytes_with_additional_data samples from the RNG after mixing 32 bytes
- * from |user_additional_data| in. */
+// RAND_bytes_with_additional_data samples from the RNG after mixing 32 bytes
+// from |user_additional_data| in.
void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len,
const uint8_t user_additional_data[32]);
-/* CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating
- * system. */
+// CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating
+// system.
void CRYPTO_sysrand(uint8_t *buf, size_t len);
-/* rand_fork_unsafe_buffering_enabled returns whether fork-unsafe buffering has
- * been enabled via |RAND_enable_fork_unsafe_buffering|. */
+// rand_fork_unsafe_buffering_enabled returns whether fork-unsafe buffering has
+// been enabled via |RAND_enable_fork_unsafe_buffering|.
int rand_fork_unsafe_buffering_enabled(void);
-/* CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP
- * 800-90Ar1. */
+// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP
+// 800-90Ar1.
typedef struct {
alignas(16) AES_KEY ks;
block128_f block;
@@ -51,42 +51,42 @@
uint64_t reseed_counter;
} CTR_DRBG_STATE;
-/* See SP 800-90Ar1, table 3. */
+// See SP 800-90Ar1, table 3.
#define CTR_DRBG_ENTROPY_LEN 48
#define CTR_DRBG_MAX_GENERATE_LENGTH 65536
-/* CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of
- * entropy in |entropy| and, optionally, a personalization string up to
- * |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero
- * on error. */
+// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of
+// entropy in |entropy| and, optionally, a personalization string up to
+// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero
+// on error.
OPENSSL_EXPORT int CTR_DRBG_init(CTR_DRBG_STATE *drbg,
const uint8_t entropy[CTR_DRBG_ENTROPY_LEN],
const uint8_t *personalization,
size_t personalization_len);
-/* CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy
- * in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of
- * additional data. It returns one on success or zero on error. */
+// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy
+// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of
+// additional data. It returns one on success or zero on error.
OPENSSL_EXPORT int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg,
const uint8_t entropy[CTR_DRBG_ENTROPY_LEN],
const uint8_t *additional_data,
size_t additional_data_len);
-/* CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional
- * data (if any) and then writes |out_len| random bytes to |out|, where
- * |out_len| <= |CTR_DRBG_MAX_GENERATE_LENGTH|. It returns one on success or
- * zero on error. */
+// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional
+// data (if any) and then writes |out_len| random bytes to |out|, where
+// |out_len| <= |CTR_DRBG_MAX_GENERATE_LENGTH|. It returns one on success or
+// zero on error.
OPENSSL_EXPORT int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out,
size_t out_len,
const uint8_t *additional_data,
size_t additional_data_len);
-/* CTR_DRBG_clear zeroises the state of |drbg|. */
+// CTR_DRBG_clear zeroises the state of |drbg|.
OPENSSL_EXPORT void CTR_DRBG_clear(CTR_DRBG_STATE *drbg);
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H */
+#endif // OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H
diff --git a/crypto/fipsmodule/rand/rand.c b/crypto/fipsmodule/rand/rand.c
index 9480ddb..dafc91f 100644
--- a/crypto/fipsmodule/rand/rand.c
+++ b/crypto/fipsmodule/rand/rand.c
@@ -31,53 +31,53 @@
#include "../delocate.h"
-/* It's assumed that the operating system always has an unfailing source of
- * entropy which is accessed via |CRYPTO_sysrand|. (If the operating system
- * entropy source fails, it's up to |CRYPTO_sysrand| to abort the process—we
- * don't try to handle it.)
- *
- * In addition, the hardware may provide a low-latency RNG. Intel's rdrand
- * instruction is the canonical example of this. When a hardware RNG is
- * available we don't need to worry about an RNG failure arising from fork()ing
- * the process or moving a VM, so we can keep thread-local RNG state and use it
- * as an additional-data input to CTR-DRBG.
- *
- * (We assume that the OS entropy is safe from fork()ing and VM duplication.
- * This might be a bit of a leap of faith, esp on Windows, but there's nothing
- * that we can do about it.) */
+// It's assumed that the operating system always has an unfailing source of
+// entropy which is accessed via |CRYPTO_sysrand|. (If the operating system
+// entropy source fails, it's up to |CRYPTO_sysrand| to abort the process—we
+// don't try to handle it.)
+//
+// In addition, the hardware may provide a low-latency RNG. Intel's rdrand
+// instruction is the canonical example of this. When a hardware RNG is
+// available we don't need to worry about an RNG failure arising from fork()ing
+// the process or moving a VM, so we can keep thread-local RNG state and use it
+// as an additional-data input to CTR-DRBG.
+//
+// (We assume that the OS entropy is safe from fork()ing and VM duplication.
+// This might be a bit of a leap of faith, esp on Windows, but there's nothing
+// that we can do about it.)
-/* kReseedInterval is the number of generate calls made to CTR-DRBG before
- * reseeding. */
+// kReseedInterval is the number of generate calls made to CTR-DRBG before
+// reseeding.
static const unsigned kReseedInterval = 4096;
-/* CRNGT_BLOCK_SIZE is the number of bytes in a “block” for the purposes of the
- * continuous random number generator test in FIPS 140-2, section 4.9.2. */
+// CRNGT_BLOCK_SIZE is the number of bytes in a “block” for the purposes of the
+// continuous random number generator test in FIPS 140-2, section 4.9.2.
#define CRNGT_BLOCK_SIZE 16
-/* rand_thread_state contains the per-thread state for the RNG. */
+// rand_thread_state contains the per-thread state for the RNG.
struct rand_thread_state {
CTR_DRBG_STATE drbg;
- /* calls is the number of generate calls made on |drbg| since it was last
- * (re)seeded. This is bound by |kReseedInterval|. */
+ // calls is the number of generate calls made on |drbg| since it was last
+ // (re)seeded. This is bound by |kReseedInterval|.
unsigned calls;
- /* last_block_valid is non-zero iff |last_block| contains data from
- * |CRYPTO_sysrand|. */
+ // last_block_valid is non-zero iff |last_block| contains data from
+ // |CRYPTO_sysrand|.
int last_block_valid;
#if defined(BORINGSSL_FIPS)
- /* last_block contains the previous block from |CRYPTO_sysrand|. */
+ // last_block contains the previous block from |CRYPTO_sysrand|.
uint8_t last_block[CRNGT_BLOCK_SIZE];
- /* next and prev form a NULL-terminated, double-linked list of all states in
- * a process. */
+ // next and prev form a NULL-terminated, double-linked list of all states in
+ // a process.
struct rand_thread_state *next, *prev;
#endif
};
#if defined(BORINGSSL_FIPS)
-/* thread_states_list is the head of a linked-list of all |rand_thread_state|
- * objects in the process, one per thread. This is needed because FIPS requires
- * that they be zeroed on process exit, but thread-local destructors aren't
- * called when the whole process is exiting. */
+// thread_states_list is the head of a linked-list of all |rand_thread_state|
+// objects in the process, one per thread. This is needed because FIPS requires
+// that they be zeroed on process exit, but thread-local destructors aren't
+// called when the whole process is exiting.
DEFINE_BSS_GET(struct rand_thread_state *, thread_states_list);
DEFINE_STATIC_MUTEX(thread_states_list_lock);
@@ -88,13 +88,13 @@
cur != NULL; cur = cur->next) {
CTR_DRBG_clear(&cur->drbg);
}
- /* |thread_states_list_lock is deliberately left locked so that any threads
- * that are still running will hang if they try to call |RAND_bytes|. */
+ // |thread_states_list_lock is deliberately left locked so that any threads
+ // that are still running will hang if they try to call |RAND_bytes|.
}
#endif
-/* rand_thread_state_free frees a |rand_thread_state|. This is called when a
- * thread exits. */
+// rand_thread_state_free frees a |rand_thread_state|. This is called when a
+// thread exits.
static void rand_thread_state_free(void *state_in) {
struct rand_thread_state *state = state_in;
@@ -126,7 +126,7 @@
#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \
!defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE)
-/* These functions are defined in asm/rdrand-x86_64.pl */
+// These functions are defined in asm/rdrand-x86_64.pl
extern int CRYPTO_rdrand(uint8_t out[8]);
extern int CRYPTO_rdrand_multiple8_buf(uint8_t *buf, size_t len);
@@ -183,8 +183,8 @@
state->last_block_valid = 1;
}
- /* We overread from /dev/urandom or RDRAND by a factor of 10 and XOR to
- * whiten. */
+ // We overread from /dev/urandom or RDRAND by a factor of 10 and XOR to
+ // whiten.
#define FIPS_OVERREAD 10
uint8_t entropy[CTR_DRBG_ENTROPY_LEN * FIPS_OVERREAD];
@@ -192,9 +192,9 @@
CRYPTO_sysrand(entropy, sizeof(entropy));
}
- /* See FIPS 140-2, section 4.9.2. This is the “continuous random number
- * generator test” which causes the program to randomly abort. Hopefully the
- * rate of failure is small enough not to be a problem in practice. */
+ // See FIPS 140-2, section 4.9.2. This is the “continuous random number
+ // generator test” which causes the program to randomly abort. Hopefully the
+ // rate of failure is small enough not to be a problem in practice.
if (CRYPTO_memcmp(state->last_block, entropy, CRNGT_BLOCK_SIZE) == 0) {
printf("CRNGT failed.\n");
BORINGSSL_FIPS_abort();
@@ -225,8 +225,8 @@
static void rand_get_seed(struct rand_thread_state *state,
uint8_t seed[CTR_DRBG_ENTROPY_LEN]) {
- /* If not in FIPS mode, we don't overread from the system entropy source and
- * we don't depend only on the hardware RDRAND. */
+ // If not in FIPS mode, we don't overread from the system entropy source and
+ // we don't depend only on the hardware RDRAND.
CRYPTO_sysrand(seed, CTR_DRBG_ENTROPY_LEN);
}
@@ -238,16 +238,16 @@
return;
}
- /* Additional data is mixed into every CTR-DRBG call to protect, as best we
- * can, against forks & VM clones. We do not over-read this information and
- * don't reseed with it so, from the point of view of FIPS, this doesn't
- * provide “prediction resistance”. But, in practice, it does. */
+ // Additional data is mixed into every CTR-DRBG call to protect, as best we
+ // can, against forks & VM clones. We do not over-read this information and
+ // don't reseed with it so, from the point of view of FIPS, this doesn't
+ // provide “prediction resistance”. But, in practice, it does.
uint8_t additional_data[32];
if (!hwrand(additional_data, sizeof(additional_data))) {
- /* Without a hardware RNG to save us from address-space duplication, the OS
- * entropy is used. This can be expensive (one read per |RAND_bytes| call)
- * and so can be disabled by applications that we have ensured don't fork
- * and aren't at risk of VM cloning. */
+ // Without a hardware RNG to save us from address-space duplication, the OS
+ // entropy is used. This can be expensive (one read per |RAND_bytes| call)
+ // and so can be disabled by applications that we have ensured don't fork
+ // and aren't at risk of VM cloning.
if (!rand_fork_unsafe_buffering_enabled()) {
CRYPTO_sysrand(additional_data, sizeof(additional_data));
} else {
@@ -268,8 +268,8 @@
if (state == NULL ||
!CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_RAND, state,
rand_thread_state_free)) {
- /* If the system is out of memory, use an ephemeral state on the
- * stack. */
+ // If the system is out of memory, use an ephemeral state on the
+ // stack.
state = &stack_state;
}
@@ -300,14 +300,14 @@
uint8_t seed[CTR_DRBG_ENTROPY_LEN];
rand_get_seed(state, seed);
#if defined(BORINGSSL_FIPS)
- /* Take a read lock around accesses to |state->drbg|. This is needed to
- * avoid returning bad entropy if we race with
- * |rand_thread_state_clear_all|.
- *
- * This lock must be taken after any calls to |CRYPTO_sysrand| to avoid a
- * bug on ppc64le. glibc may implement pthread locks by wrapping user code
- * in a hardware transaction, but, on some older versions of glibc and the
- * kernel, syscalls made with |syscall| did not abort the transaction. */
+ // Take a read lock around accesses to |state->drbg|. This is needed to
+ // avoid returning bad entropy if we race with
+ // |rand_thread_state_clear_all|.
+ //
+ // This lock must be taken after any calls to |CRYPTO_sysrand| to avoid a
+ // bug on ppc64le. glibc may implement pthread locks by wrapping user code
+ // in a hardware transaction, but, on some older versions of glibc and the
+ // kernel, syscalls made with |syscall| did not abort the transaction.
CRYPTO_STATIC_MUTEX_lock_read(thread_states_list_lock_bss_get());
#endif
if (!CTR_DRBG_reseed(&state->drbg, seed, NULL, 0)) {
diff --git a/crypto/fipsmodule/rand/urandom.c b/crypto/fipsmodule/rand/urandom.c
index 8cbf727..5430968 100644
--- a/crypto/fipsmodule/rand/urandom.c
+++ b/crypto/fipsmodule/rand/urandom.c
@@ -13,7 +13,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#if !defined(_GNU_SOURCE)
-#define _GNU_SOURCE /* needed for syscall() on Linux. */
+#define _GNU_SOURCE // needed for syscall() on Linux.
#endif
#include <openssl/rand.h>
@@ -65,40 +65,40 @@
#error "system call number for getrandom is not the expected value"
#endif
-#else /* __NR_getrandom */
+#else // __NR_getrandom
#define __NR_getrandom EXPECTED_NR_getrandom
-#endif /* __NR_getrandom */
+#endif // __NR_getrandom
-#endif /* EXPECTED_NR_getrandom */
+#endif // EXPECTED_NR_getrandom
#if !defined(GRND_NONBLOCK)
#define GRND_NONBLOCK 1
#endif
-#endif /* OPENSSL_LINUX */
+#endif // OPENSSL_LINUX
-/* rand_lock is used to protect the |*_requested| variables. */
+// rand_lock is used to protect the |*_requested| variables.
DEFINE_STATIC_MUTEX(rand_lock);
-/* The following constants are magic values of |urandom_fd|. */
+// The following constants are magic values of |urandom_fd|.
static const int kUnset = 0;
static const int kHaveGetrandom = -3;
-/* urandom_fd_requested is set by |RAND_set_urandom_fd|. It's protected by
- * |rand_lock|. */
+// urandom_fd_requested is set by |RAND_set_urandom_fd|. It's protected by
+// |rand_lock|.
DEFINE_BSS_GET(int, urandom_fd_requested);
-/* urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|. */
+// urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|.
DEFINE_BSS_GET(int, urandom_fd);
DEFINE_STATIC_ONCE(rand_once);
#if defined(USE_NR_getrandom) || defined(BORINGSSL_FIPS)
-/* message writes |msg| to stderr. We use this because referencing |stderr|
- * with |fprintf| generates relocations, which is a problem inside the FIPS
- * module. */
+// message writes |msg| to stderr. We use this because referencing |stderr|
+// with |fprintf| generates relocations, which is a problem inside the FIPS
+// module.
static void message(const char *msg) {
ssize_t r;
do {
@@ -107,10 +107,10 @@
}
#endif
-/* init_once initializes the state of this module to values previously
- * requested. This is the only function that modifies |urandom_fd| and
- * |urandom_buffering|, whose values may be read safely after calling the
- * once. */
+// init_once initializes the state of this module to values previously
+// requested. This is the only function that modifies |urandom_fd| and
+// |urandom_buffering|, whose values may be read safely after calling the
+// once.
static void init_once(void) {
CRYPTO_STATIC_MUTEX_lock_read(rand_lock_bss_get());
int fd = *urandom_fd_requested_bss_get();
@@ -140,7 +140,7 @@
return;
}
}
-#endif /* USE_NR_getrandom */
+#endif // USE_NR_getrandom
if (fd == kUnset) {
do {
@@ -154,9 +154,9 @@
assert(kUnset == 0);
if (fd == kUnset) {
- /* Because we want to keep |urandom_fd| in the BSS, we have to initialise
- * it to zero. But zero is a valid file descriptor too. Thus if open
- * returns zero for /dev/urandom, we dup it to get a non-zero number. */
+ // Because we want to keep |urandom_fd| in the BSS, we have to initialise
+ // it to zero. But zero is a valid file descriptor too. Thus if open
+ // returns zero for /dev/urandom, we dup it to get a non-zero number.
fd = dup(fd);
close(kUnset);
@@ -166,10 +166,10 @@
}
#if defined(BORINGSSL_FIPS)
- /* In FIPS mode we ensure that the kernel has sufficient entropy before
- * continuing. This is automatically handled by getrandom, which requires
- * that the entropy pool has been initialised, but for urandom we have to
- * poll. */
+ // In FIPS mode we ensure that the kernel has sufficient entropy before
+ // continuing. This is automatically handled by getrandom, which requires
+ // that the entropy pool has been initialised, but for urandom we have to
+ // poll.
for (;;) {
int entropy_bits;
if (ioctl(fd, RNDGETENTCNT, &entropy_bits)) {
@@ -190,7 +190,7 @@
int flags = fcntl(fd, F_GETFD);
if (flags == -1) {
- /* Native Client doesn't implement |fcntl|. */
+ // Native Client doesn't implement |fcntl|.
if (errno != ENOSYS) {
abort();
}
@@ -211,9 +211,9 @@
assert(kUnset == 0);
if (fd == kUnset) {
- /* Because we want to keep |urandom_fd| in the BSS, we have to initialise
- * it to zero. But zero is a valid file descriptor too. Thus if dup
- * returned zero we dup it again to get a non-zero number. */
+ // Because we want to keep |urandom_fd| in the BSS, we have to initialise
+ // it to zero. But zero is a valid file descriptor too. Thus if dup
+ // returned zero we dup it again to get a non-zero number.
fd = dup(fd);
close(kUnset);
@@ -238,8 +238,8 @@
void __msan_unpoison(void *, size_t);
#endif
-/* fill_with_entropy writes |len| bytes of entropy into |out|. It returns one
- * on success and zero on error. */
+// fill_with_entropy writes |len| bytes of entropy into |out|. It returns one
+// on success and zero on error.
static char fill_with_entropy(uint8_t *out, size_t len) {
while (len > 0) {
ssize_t r;
@@ -252,13 +252,13 @@
#if defined(OPENSSL_MSAN)
if (r > 0) {
- /* MSAN doesn't recognise |syscall| and thus doesn't notice that we
- * have initialised the output buffer. */
+ // MSAN doesn't recognise |syscall| and thus doesn't notice that we
+ // have initialised the output buffer.
__msan_unpoison(out, r);
}
-#endif /* OPENSSL_MSAN */
+#endif // OPENSSL_MSAN
-#else /* USE_NR_getrandom */
+#else // USE_NR_getrandom
abort();
#endif
} else {
@@ -277,7 +277,7 @@
return 1;
}
-/* CRYPTO_sysrand puts |requested| random bytes into |out|. */
+// CRYPTO_sysrand puts |requested| random bytes into |out|.
void CRYPTO_sysrand(uint8_t *out, size_t requested) {
if (requested == 0) {
return;
diff --git a/crypto/fipsmodule/rsa/blinding.c b/crypto/fipsmodule/rsa/blinding.c
index 71feb3b..d956057 100644
--- a/crypto/fipsmodule/rsa/blinding.c
+++ b/crypto/fipsmodule/rsa/blinding.c
@@ -121,8 +121,8 @@
#define BN_BLINDING_COUNTER 32
struct bn_blinding_st {
- BIGNUM *A; /* The base blinding factor, Montgomery-encoded. */
- BIGNUM *Ai; /* The inverse of the blinding factor, Montgomery-encoded. */
+ BIGNUM *A; // The base blinding factor, Montgomery-encoded.
+ BIGNUM *Ai; // The inverse of the blinding factor, Montgomery-encoded.
unsigned counter;
};
@@ -147,7 +147,7 @@
goto err;
}
- /* The blinding values need to be created before this blinding can be used. */
+ // The blinding values need to be created before this blinding can be used.
ret->counter = BN_BLINDING_COUNTER - 1;
return ret;
@@ -170,7 +170,7 @@
static int bn_blinding_update(BN_BLINDING *b, const BIGNUM *e,
const BN_MONT_CTX *mont, BN_CTX *ctx) {
if (++b->counter == BN_BLINDING_COUNTER) {
- /* re-create blinding parameters */
+ // re-create blinding parameters
if (!bn_blinding_create_param(b, e, mont, ctx)) {
goto err;
}
@@ -185,10 +185,10 @@
return 1;
err:
- /* |A| and |Ai| may be in an inconsistent state so they both need to be
- * replaced the next time this blinding is used. Note that this is only
- * sufficient because support for |BN_BLINDING_NO_UPDATE| and
- * |BN_BLINDING_NO_RECREATE| was previously dropped. */
+ // |A| and |Ai| may be in an inconsistent state so they both need to be
+ // replaced the next time this blinding is used. Note that this is only
+ // sufficient because support for |BN_BLINDING_NO_UPDATE| and
+ // |BN_BLINDING_NO_RECREATE| was previously dropped.
b->counter = BN_BLINDING_COUNTER - 1;
return 0;
@@ -196,9 +196,8 @@
int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, const BIGNUM *e,
const BN_MONT_CTX *mont, BN_CTX *ctx) {
- /* |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery|
- * cancels one Montgomery factor, so the resulting value of |n| is unencoded.
- */
+ // |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery|
+ // cancels one Montgomery factor, so the resulting value of |n| is unencoded.
if (!bn_blinding_update(b, e, mont, ctx) ||
!BN_mod_mul_montgomery(n, n, b->A, mont, ctx)) {
return 0;
@@ -209,9 +208,8 @@
int BN_BLINDING_invert(BIGNUM *n, const BN_BLINDING *b, BN_MONT_CTX *mont,
BN_CTX *ctx) {
- /* |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery|
- * cancels one Montgomery factor, so the resulting value of |n| is unencoded.
- */
+ // |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery|
+ // cancels one Montgomery factor, so the resulting value of |n| is unencoded.
return BN_mod_mul_montgomery(n, n, b->Ai, mont, ctx);
}
@@ -225,8 +223,8 @@
return 0;
}
- /* |BN_from_montgomery| + |BN_mod_inverse_blinded| is equivalent to, but
- * more efficient than, |BN_mod_inverse_blinded| + |BN_to_montgomery|. */
+ // |BN_from_montgomery| + |BN_mod_inverse_blinded| is equivalent to, but
+ // more efficient than, |BN_mod_inverse_blinded| + |BN_to_montgomery|.
if (!BN_from_montgomery(b->Ai, b->A, mont, ctx)) {
OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR);
return 0;
@@ -242,8 +240,8 @@
return 0;
}
- /* For reasonably-sized RSA keys, it should almost never be the case that a
- * random value doesn't have an inverse. */
+ // For reasonably-sized RSA keys, it should almost never be the case that a
+ // random value doesn't have an inverse.
if (retry_counter-- == 0) {
OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_MANY_ITERATIONS);
return 0;
diff --git a/crypto/fipsmodule/rsa/internal.h b/crypto/fipsmodule/rsa/internal.h
index fb5ffff..67f2cb9 100644
--- a/crypto/fipsmodule/rsa/internal.h
+++ b/crypto/fipsmodule/rsa/internal.h
@@ -67,7 +67,7 @@
#endif
-/* Default implementations of RSA operations. */
+// Default implementations of RSA operations.
const RSA_METHOD *RSA_default_method(void);
@@ -107,29 +107,29 @@
int RSA_padding_add_none(uint8_t *to, size_t to_len, const uint8_t *from,
size_t from_len);
-/* RSA_private_transform calls either the method-specific |private_transform|
- * function (if given) or the generic one. See the comment for
- * |private_transform| in |rsa_meth_st|. */
+// RSA_private_transform calls either the method-specific |private_transform|
+// function (if given) or the generic one. See the comment for
+// |private_transform| in |rsa_meth_st|.
int RSA_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in,
size_t len);
-/* The following utility functions are exported for test purposes. */
+// The following utility functions are exported for test purposes.
extern const BN_ULONG kBoringSSLRSASqrtTwo[];
extern const size_t kBoringSSLRSASqrtTwoLen;
-/* rsa_less_than_words returns one if |a| < |b| and zero otherwise, where |a|
- * and |b| both are |len| words long. It runs in constant time. */
+// rsa_less_than_words returns one if |a| < |b| and zero otherwise, where |a|
+// and |b| both are |len| words long. It runs in constant time.
int rsa_less_than_words(const BN_ULONG *a, const BN_ULONG *b, size_t len);
-/* rsa_greater_than_pow2 returns one if |b| is greater than 2^|n| and zero
- * otherwise. */
+// rsa_greater_than_pow2 returns one if |b| is greater than 2^|n| and zero
+// otherwise.
int rsa_greater_than_pow2(const BIGNUM *b, int n);
#if defined(__cplusplus)
-} /* extern C */
+} // extern C
#endif
-#endif /* OPENSSL_HEADER_RSA_INTERNAL_H */
+#endif // OPENSSL_HEADER_RSA_INTERNAL_H
diff --git a/crypto/fipsmodule/rsa/padding.c b/crypto/fipsmodule/rsa/padding.c
index 9f002d2..9d88dba 100644
--- a/crypto/fipsmodule/rsa/padding.c
+++ b/crypto/fipsmodule/rsa/padding.c
@@ -74,7 +74,7 @@
int RSA_padding_add_PKCS1_type_1(uint8_t *to, size_t to_len,
const uint8_t *from, size_t from_len) {
- /* See RFC 8017, section 9.2. */
+ // See RFC 8017, section 9.2.
if (to_len < RSA_PKCS1_PADDING_SIZE) {
OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL);
return 0;
@@ -96,20 +96,20 @@
int RSA_padding_check_PKCS1_type_1(uint8_t *out, size_t *out_len,
size_t max_out, const uint8_t *from,
size_t from_len) {
- /* See RFC 8017, section 9.2. This is part of signature verification and thus
- * does not need to run in constant-time. */
+ // See RFC 8017, section 9.2. This is part of signature verification and thus
+ // does not need to run in constant-time.
if (from_len < 2) {
OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL);
return 0;
}
- /* Check the header. */
+ // Check the header.
if (from[0] != 0 || from[1] != 1) {
OPENSSL_PUT_ERROR(RSA, RSA_R_BLOCK_TYPE_IS_NOT_01);
return 0;
}
- /* Scan over padded data, looking for the 00. */
+ // Scan over padded data, looking for the 00.
size_t pad;
for (pad = 2 /* header */; pad < from_len; pad++) {
if (from[pad] == 0x00) {
@@ -132,7 +132,7 @@
return 0;
}
- /* Skip over the 00. */
+ // Skip over the 00.
pad++;
if (from_len - pad > max_out) {
@@ -163,7 +163,7 @@
int RSA_padding_add_PKCS1_type_2(uint8_t *to, size_t to_len,
const uint8_t *from, size_t from_len) {
- /* See RFC 8017, section 7.2.1. */
+ // See RFC 8017, section 7.2.1.
if (to_len < RSA_PKCS1_PADDING_SIZE) {
OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL);
return 0;
@@ -195,11 +195,11 @@
return 0;
}
- /* PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography
- * Standard", section 7.2.2. */
+ // PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography
+ // Standard", section 7.2.2.
if (from_len < RSA_PKCS1_PADDING_SIZE) {
- /* |from| is zero-padded to the size of the RSA modulus, a public value, so
- * this can be rejected in non-constant time. */
+ // |from| is zero-padded to the size of the RSA modulus, a public value, so
+ // this can be rejected in non-constant time.
OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL);
return 0;
}
@@ -215,24 +215,24 @@
looking_for_index = constant_time_select_w(equals0, 0, looking_for_index);
}
- /* The input must begin with 00 02. */
+ // The input must begin with 00 02.
crypto_word_t valid_index = first_byte_is_zero;
valid_index &= second_byte_is_two;
- /* We must have found the end of PS. */
+ // We must have found the end of PS.
valid_index &= ~looking_for_index;
- /* PS must be at least 8 bytes long, and it starts two bytes into |from|. */
+ // PS must be at least 8 bytes long, and it starts two bytes into |from|.
valid_index &= constant_time_ge_w(zero_index, 2 + 8);
- /* Skip the zero byte. */
+ // Skip the zero byte.
zero_index++;
- /* NOTE: Although this logic attempts to be constant time, the API contracts
- * of this function and |RSA_decrypt| with |RSA_PKCS1_PADDING| make it
- * impossible to completely avoid Bleichenbacher's attack. Consumers should
- * use |RSA_PADDING_NONE| and perform the padding check in constant-time
- * combined with a swap to a random session key or other mitigation. */
+ // NOTE: Although this logic attempts to be constant time, the API contracts
+ // of this function and |RSA_decrypt| with |RSA_PKCS1_PADDING| make it
+ // impossible to completely avoid Bleichenbacher's attack. Consumers should
+ // use |RSA_PADDING_NONE| and perform the padding check in constant-time
+ // combined with a swap to a random session key or other mitigation.
if (!valid_index) {
OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR);
return 0;
@@ -240,8 +240,8 @@
const size_t msg_len = from_len - zero_index;
if (msg_len > max_out) {
- /* This shouldn't happen because this function is always called with
- * |max_out| as the key size and |from_len| is bounded by the key size. */
+ // This shouldn't happen because this function is always called with
+ // |max_out| as the key size and |from_len| is bounded by the key size.
OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR);
return 0;
}
@@ -397,12 +397,12 @@
size_t mdlen = EVP_MD_size(md);
- /* The encoded message is one byte smaller than the modulus to ensure that it
- * doesn't end up greater than the modulus. Thus there's an extra "+1" here
- * compared to https://tools.ietf.org/html/rfc2437#section-9.1.1.2. */
+ // The encoded message is one byte smaller than the modulus to ensure that it
+ // doesn't end up greater than the modulus. Thus there's an extra "+1" here
+ // compared to https://tools.ietf.org/html/rfc2437#section-9.1.1.2.
if (from_len < 1 + 2*mdlen + 1) {
- /* 'from_len' is the length of the modulus, i.e. does not depend on the
- * particular ciphertext. */
+ // 'from_len' is the length of the modulus, i.e. does not depend on the
+ // particular ciphertext.
goto decoding_err;
}
@@ -470,8 +470,8 @@
return 1;
decoding_err:
- /* to avoid chosen ciphertext attacks, the error message should not reveal
- * which kind of decoding error happened */
+ // to avoid chosen ciphertext attacks, the error message should not reveal
+ // which kind of decoding error happened
OPENSSL_PUT_ERROR(RSA, RSA_R_OAEP_DECODING_ERROR);
err:
OPENSSL_free(db);
@@ -499,10 +499,10 @@
hLen = EVP_MD_size(Hash);
- /* Negative sLen has special meanings:
- * -1 sLen == hLen
- * -2 salt length is autorecovered from signature
- * -N reserved */
+ // Negative sLen has special meanings:
+ // -1 sLen == hLen
+ // -2 salt length is autorecovered from signature
+ // -N reserved
if (sLen == -1) {
sLen = hLen;
} else if (sLen == -2) {
@@ -523,7 +523,7 @@
emLen--;
}
if (emLen < (int)hLen + 2 || emLen < ((int)hLen + sLen + 2)) {
- /* sLen can be small negative */
+ // sLen can be small negative
OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE);
goto err;
}
@@ -612,10 +612,10 @@
goto err;
}
- /* Negative sLenRequested has special meanings:
- * -1 sLen == hLen
- * -2 salt length is maximized
- * -N reserved */
+ // Negative sLenRequested has special meanings:
+ // -1 sLen == hLen
+ // -2 salt length is maximized
+ // -N reserved
size_t sLen;
if (sLenRequested == -1) {
sLen = hLen;
@@ -658,16 +658,16 @@
goto err;
}
- /* Generate dbMask in place then perform XOR on it */
+ // Generate dbMask in place then perform XOR on it
if (!PKCS1_MGF1(EM, maskedDBLen, H, hLen, mgf1Hash)) {
goto err;
}
p = EM;
- /* Initial PS XORs with all zeroes which is a NOP so just update
- * pointer. Note from a test above this value is guaranteed to
- * be non-negative. */
+ // Initial PS XORs with all zeroes which is a NOP so just update
+ // pointer. Note from a test above this value is guaranteed to
+ // be non-negative.
p += emLen - sLen - hLen - 2;
*p++ ^= 0x1;
if (sLen > 0) {
@@ -679,7 +679,7 @@
EM[0] &= 0xFF >> (8 - MSBits);
}
- /* H is already in place so just set final 0xbc */
+ // H is already in place so just set final 0xbc
EM[emLen - 1] = 0xbc;
diff --git a/crypto/fipsmodule/rsa/rsa.c b/crypto/fipsmodule/rsa/rsa.c
index a434cb1..17348c1 100644
--- a/crypto/fipsmodule/rsa/rsa.c
+++ b/crypto/fipsmodule/rsa/rsa.c
@@ -301,25 +301,25 @@
return CRYPTO_get_ex_data(&rsa->ex_data, idx);
}
-/* SSL_SIG_LENGTH is the size of an SSL/TLS (prior to TLS 1.2) signature: it's
- * the length of an MD5 and SHA1 hash. */
+// SSL_SIG_LENGTH is the size of an SSL/TLS (prior to TLS 1.2) signature: it's
+// the length of an MD5 and SHA1 hash.
static const unsigned SSL_SIG_LENGTH = 36;
-/* pkcs1_sig_prefix contains the ASN.1, DER encoded prefix for a hash that is
- * to be signed with PKCS#1. */
+// pkcs1_sig_prefix contains the ASN.1, DER encoded prefix for a hash that is
+// to be signed with PKCS#1.
struct pkcs1_sig_prefix {
- /* nid identifies the hash function. */
+ // nid identifies the hash function.
int nid;
- /* hash_len is the expected length of the hash function. */
+ // hash_len is the expected length of the hash function.
uint8_t hash_len;
- /* len is the number of bytes of |bytes| which are valid. */
+ // len is the number of bytes of |bytes| which are valid.
uint8_t len;
- /* bytes contains the DER bytes. */
+ // bytes contains the DER bytes.
uint8_t bytes[19];
};
-/* kPKCS1SigPrefixes contains the ASN.1 prefixes for PKCS#1 signatures with
- * different hash functions. */
+// kPKCS1SigPrefixes contains the ASN.1 prefixes for PKCS#1 signatures with
+// different hash functions.
static const struct pkcs1_sig_prefix kPKCS1SigPrefixes[] = {
{
NID_md5,
@@ -374,7 +374,7 @@
unsigned i;
if (hash_nid == NID_md5_sha1) {
- /* Special case: SSL signature, just check the length. */
+ // Special case: SSL signature, just check the length.
if (msg_len != SSL_SIG_LENGTH) {
OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH);
return 0;
@@ -516,8 +516,8 @@
goto out;
}
- /* Check that no other information follows the hash value (FIPS 186-4 Section
- * 5.5) and it matches the expected hash. */
+ // Check that no other information follows the hash value (FIPS 186-4 Section
+ // 5.5) and it matches the expected hash.
if (len != signed_msg_len || OPENSSL_memcmp(buf, signed_msg, len) != 0) {
OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_SIGNATURE);
goto out;
@@ -571,7 +571,7 @@
int ok = 0, has_crt_values;
if (RSA_is_opaque(key)) {
- /* Opaque keys can't be checked. */
+ // Opaque keys can't be checked.
return 1;
}
@@ -586,8 +586,8 @@
}
if (!key->d || !key->p) {
- /* For a public key, or without p and q, there's nothing that can be
- * checked. */
+ // For a public key, or without p and q, there's nothing that can be
+ // checked.
return 1;
}
@@ -608,7 +608,7 @@
BN_init(&iqmp_times_q);
if (!BN_mul(&n, key->p, key->q, ctx) ||
- /* lcm = lcm(p, q) */
+ // lcm = lcm(p, q)
!BN_sub(&pm1, key->p, BN_value_one()) ||
!BN_sub(&qm1, key->q, BN_value_one()) ||
!BN_mul(&lcm, &pm1, &qm1, ctx) ||
@@ -619,7 +619,7 @@
if (!BN_div(&lcm, NULL, &lcm, &gcd, ctx) ||
!BN_gcd(&gcd, &pm1, &qm1, ctx) ||
- /* de = d*e mod lcm(p, q). */
+ // de = d*e mod lcm(p, q).
!BN_mod_mul(&de, key->d, key->e, &lcm, ctx)) {
OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN);
goto out;
@@ -643,11 +643,11 @@
}
if (has_crt_values) {
- if (/* dmp1 = d mod (p-1) */
+ if (// dmp1 = d mod (p-1)
!BN_mod(&dmp1, key->d, &pm1, ctx) ||
- /* dmq1 = d mod (q-1) */
+ // dmq1 = d mod (q-1)
!BN_mod(&dmq1, key->d, &qm1, ctx) ||
- /* iqmp = q^-1 mod p */
+ // iqmp = q^-1 mod p
!BN_mod_mul(&iqmp_times_q, key->iqmp, key->q, key->p, ctx)) {
OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN);
goto out;
@@ -680,7 +680,7 @@
}
-/* This is the product of the 132 smallest odd primes, from 3 to 751. */
+// This is the product of the 132 smallest odd primes, from 3 to 751.
static const BN_ULONG kSmallFactorsLimbs[] = {
TOBN(0xc4309333, 0x3ef4e3e1), TOBN(0x71161eb6, 0xcd2d655f),
TOBN(0x95e2238c, 0x0bf94862), TOBN(0x3eb233d3, 0x24f7912b),
@@ -703,7 +703,7 @@
int RSA_check_fips(RSA *key) {
if (RSA_is_opaque(key)) {
- /* Opaque keys can't be checked. */
+ // Opaque keys can't be checked.
OPENSSL_PUT_ERROR(RSA, RSA_R_PUBLIC_KEY_VALIDATION_FAILED);
return 0;
}
@@ -723,7 +723,7 @@
int ret = 1;
- /* Perform partial public key validation of RSA keys (SP 800-89 5.3.3). */
+ // Perform partial public key validation of RSA keys (SP 800-89 5.3.3).
enum bn_primality_result_t primality_result;
if (BN_num_bits(key->e) <= 16 ||
BN_num_bits(key->e) > 256 ||
@@ -742,15 +742,15 @@
BN_CTX_free(ctx);
if (!ret || key->d == NULL || key->p == NULL) {
- /* On a failure or on only a public key, there's nothing else can be
- * checked. */
+ // On a failure or on only a public key, there's nothing else can be
+ // checked.
return ret;
}
- /* FIPS pairwise consistency test (FIPS 140-2 4.9.2). Per FIPS 140-2 IG,
- * section 9.9, it is not known whether |rsa| will be used for signing or
- * encryption, so either pair-wise consistency self-test is acceptable. We
- * perform a signing test. */
+ // FIPS pairwise consistency test (FIPS 140-2 4.9.2). Per FIPS 140-2 IG,
+ // section 9.9, it is not known whether |rsa| will be used for signing or
+ // encryption, so either pair-wise consistency self-test is acceptable. We
+ // perform a signing test.
uint8_t data[32] = {0};
unsigned sig_len = RSA_size(key);
uint8_t *sig = OPENSSL_malloc(sig_len);
diff --git a/crypto/fipsmodule/rsa/rsa_impl.c b/crypto/fipsmodule/rsa/rsa_impl.c
index b126164..f8cb9e3 100644
--- a/crypto/fipsmodule/rsa/rsa_impl.c
+++ b/crypto/fipsmodule/rsa/rsa_impl.c
@@ -80,15 +80,15 @@
return 0;
}
- /* Mitigate DoS attacks by limiting the exponent size. 33 bits was chosen as
- * the limit based on the recommendations in [1] and [2]. Windows CryptoAPI
- * doesn't support values larger than 32 bits [3], so it is unlikely that
- * exponents larger than 32 bits are being used for anything Windows commonly
- * does.
- *
- * [1] https://www.imperialviolet.org/2012/03/16/rsae.html
- * [2] https://www.imperialviolet.org/2012/03/17/rsados.html
- * [3] https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx */
+ // Mitigate DoS attacks by limiting the exponent size. 33 bits was chosen as
+ // the limit based on the recommendations in [1] and [2]. Windows CryptoAPI
+ // doesn't support values larger than 32 bits [3], so it is unlikely that
+ // exponents larger than 32 bits are being used for anything Windows commonly
+ // does.
+ //
+ // [1] https://www.imperialviolet.org/2012/03/16/rsae.html
+ // [2] https://www.imperialviolet.org/2012/03/17/rsados.html
+ // [3] https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx
static const unsigned kMaxExponentBits = 33;
if (BN_num_bits(rsa->e) > kMaxExponentBits) {
@@ -96,10 +96,10 @@
return 0;
}
- /* Verify |n > e|. Comparing |rsa_bits| to |kMaxExponentBits| is a small
- * shortcut to comparing |n| and |e| directly. In reality, |kMaxExponentBits|
- * is much smaller than the minimum RSA key size that any application should
- * accept. */
+ // Verify |n > e|. Comparing |rsa_bits| to |kMaxExponentBits| is a small
+ // shortcut to comparing |n| and |e| directly. In reality, |kMaxExponentBits|
+ // is much smaller than the minimum RSA key size that any application should
+ // accept.
if (rsa_bits <= kMaxExponentBits) {
OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL);
return 0;
@@ -154,7 +154,7 @@
i = RSA_padding_add_PKCS1_type_2(buf, rsa_size, in, in_len);
break;
case RSA_PKCS1_OAEP_PADDING:
- /* Use the default parameters: SHA-1 for both hashes and no label. */
+ // Use the default parameters: SHA-1 for both hashes and no label.
i = RSA_padding_add_PKCS1_OAEP_mgf1(buf, rsa_size, in, in_len,
NULL, 0, NULL, NULL);
break;
@@ -175,7 +175,7 @@
}
if (BN_ucmp(f, rsa->n) >= 0) {
- /* usually the padding functions would catch this */
+ // usually the padding functions would catch this
OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE);
goto err;
}
@@ -185,8 +185,8 @@
goto err;
}
- /* put in leading 0 bytes if the number is less than the length of the
- * modulus */
+ // put in leading 0 bytes if the number is less than the length of the
+ // modulus
if (!BN_bn2bin_padded(out, rsa_size, result)) {
OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR);
goto err;
@@ -208,18 +208,18 @@
return ret;
}
-/* MAX_BLINDINGS_PER_RSA defines the maximum number of cached BN_BLINDINGs per
- * RSA*. Then this limit is exceeded, BN_BLINDING objects will be created and
- * destroyed as needed. */
+// MAX_BLINDINGS_PER_RSA defines the maximum number of cached BN_BLINDINGs per
+// RSA*. Then this limit is exceeded, BN_BLINDING objects will be created and
+// destroyed as needed.
#define MAX_BLINDINGS_PER_RSA 1024
-/* rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by
- * allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If
- * none are free, the cache will be extended by a extra element and the new
- * BN_BLINDING is returned.
- *
- * On success, the index of the assigned BN_BLINDING is written to
- * |*index_used| and must be passed to |rsa_blinding_release| when finished. */
+// rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by
+// allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If
+// none are free, the cache will be extended by a extra element and the new
+// BN_BLINDING is returned.
+//
+// On success, the index of the assigned BN_BLINDING is written to
+// |*index_used| and must be passed to |rsa_blinding_release| when finished.
static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used,
BN_CTX *ctx) {
assert(ctx != NULL);
@@ -249,8 +249,8 @@
overflow = rsa->num_blindings >= MAX_BLINDINGS_PER_RSA;
- /* We didn't find a free BN_BLINDING to use so increase the length of
- * the arrays by one and use the newly created element. */
+ // We didn't find a free BN_BLINDING to use so increase the length of
+ // the arrays by one and use the newly created element.
CRYPTO_MUTEX_unlock_write(&rsa->lock);
ret = BN_BLINDING_new();
@@ -259,8 +259,8 @@
}
if (overflow) {
- /* We cannot add any more cached BN_BLINDINGs so we use |ret|
- * and mark it for destruction in |rsa_blinding_release|. */
+ // We cannot add any more cached BN_BLINDINGs so we use |ret|
+ // and mark it for destruction in |rsa_blinding_release|.
*index_used = MAX_BLINDINGS_PER_RSA;
return ret;
}
@@ -302,12 +302,12 @@
return NULL;
}
-/* rsa_blinding_release marks the cached BN_BLINDING at the given index as free
- * for other threads to use. */
+// rsa_blinding_release marks the cached BN_BLINDING at the given index as free
+// for other threads to use.
static void rsa_blinding_release(RSA *rsa, BN_BLINDING *blinding,
unsigned blinding_index) {
if (blinding_index == MAX_BLINDINGS_PER_RSA) {
- /* This blinding wasn't cached. */
+ // This blinding wasn't cached.
BN_BLINDING_free(blinding);
return;
}
@@ -317,7 +317,7 @@
CRYPTO_MUTEX_unlock_write(&rsa->lock);
}
-/* signing */
+// signing
int rsa_default_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out,
size_t max_out, const uint8_t *in, size_t in_len,
int padding) {
@@ -382,7 +382,7 @@
if (padding == RSA_NO_PADDING) {
buf = out;
} else {
- /* Allocate a temporary buffer to hold the padded plaintext. */
+ // Allocate a temporary buffer to hold the padded plaintext.
buf = OPENSSL_malloc(rsa_size);
if (buf == NULL) {
OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE);
@@ -405,7 +405,7 @@
RSA_padding_check_PKCS1_type_2(out, out_len, rsa_size, buf, rsa_size);
break;
case RSA_PKCS1_OAEP_PADDING:
- /* Use the default parameters: SHA-1 for both hashes and no label. */
+ // Use the default parameters: SHA-1 for both hashes and no label.
ret = RSA_padding_check_PKCS1_OAEP_mgf1(out, out_len, rsa_size, buf,
rsa_size, NULL, 0, NULL, NULL);
break;
@@ -476,7 +476,7 @@
if (padding == RSA_NO_PADDING) {
buf = out;
} else {
- /* Allocate a temporary buffer to hold the padded plaintext. */
+ // Allocate a temporary buffer to hold the padded plaintext.
buf = OPENSSL_malloc(rsa_size);
if (buf == NULL) {
OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE);
@@ -562,7 +562,7 @@
}
if (BN_ucmp(f, rsa->n) >= 0) {
- /* Usually the padding functions would catch this. */
+ // Usually the padding functions would catch this.
OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE);
goto err;
}
@@ -575,10 +575,10 @@
const int do_blinding = (rsa->flags & RSA_FLAG_NO_BLINDING) == 0;
if (rsa->e == NULL && do_blinding) {
- /* We cannot do blinding or verification without |e|, and continuing without
- * those countermeasures is dangerous. However, the Java/Android RSA API
- * requires support for keys where only |d| and |n| (and not |e|) are known.
- * The callers that require that bad behavior set |RSA_FLAG_NO_BLINDING|. */
+ // We cannot do blinding or verification without |e|, and continuing without
+ // those countermeasures is dangerous. However, the Java/Android RSA API
+ // requires support for keys where only |d| and |n| (and not |e|) are known.
+ // The callers that require that bad behavior set |RSA_FLAG_NO_BLINDING|.
OPENSSL_PUT_ERROR(RSA, RSA_R_NO_PUBLIC_EXPONENT);
goto err;
}
@@ -604,15 +604,15 @@
goto err;
}
- /* Verify the result to protect against fault attacks as described in the
- * 1997 paper "On the Importance of Checking Cryptographic Protocols for
- * Faults" by Dan Boneh, Richard A. DeMillo, and Richard J. Lipton. Some
- * implementations do this only when the CRT is used, but we do it in all
- * cases. Section 6 of the aforementioned paper describes an attack that
- * works when the CRT isn't used. That attack is much less likely to succeed
- * than the CRT attack, but there have likely been improvements since 1997.
- *
- * This check is cheap assuming |e| is small; it almost always is. */
+ // Verify the result to protect against fault attacks as described in the
+ // 1997 paper "On the Importance of Checking Cryptographic Protocols for
+ // Faults" by Dan Boneh, Richard A. DeMillo, and Richard J. Lipton. Some
+ // implementations do this only when the CRT is used, but we do it in all
+ // cases. Section 6 of the aforementioned paper describes an attack that
+ // works when the CRT isn't used. That attack is much less likely to succeed
+ // than the CRT attack, but there have likely been improvements since 1997.
+ //
+ // This check is cheap assuming |e| is small; it almost always is.
if (rsa->e != NULL) {
BIGNUM *vrfy = BN_CTX_get(ctx);
if (vrfy == NULL ||
@@ -682,22 +682,22 @@
goto err;
}
- /* compute I mod q */
+ // compute I mod q
if (!BN_mod(r1, I, rsa->q, ctx)) {
goto err;
}
- /* compute r1^dmq1 mod q */
+ // compute r1^dmq1 mod q
if (!BN_mod_exp_mont_consttime(m1, r1, rsa->dmq1, rsa->q, ctx, rsa->mont_q)) {
goto err;
}
- /* compute I mod p */
+ // compute I mod p
if (!BN_mod(r1, I, rsa->p, ctx)) {
goto err;
}
- /* compute r1^dmp1 mod p */
+ // compute r1^dmp1 mod p
if (!BN_mod_exp_mont_consttime(r0, r1, rsa->dmp1, rsa->p, ctx, rsa->mont_p)) {
goto err;
}
@@ -705,8 +705,8 @@
if (!BN_sub(r0, r0, m1)) {
goto err;
}
- /* This will help stop the size of r0 increasing, which does
- * affect the multiply if it optimised for a power of 2 size */
+ // This will help stop the size of r0 increasing, which does
+ // affect the multiply if it optimised for a power of 2 size
if (BN_is_negative(r0)) {
if (!BN_add(r0, r0, rsa->p)) {
goto err;
@@ -721,12 +721,12 @@
goto err;
}
- /* If p < q it is occasionally possible for the correction of
- * adding 'p' if r0 is negative above to leave the result still
- * negative. This can break the private key operations: the following
- * second correction should *always* correct this rare occurrence.
- * This will *never* happen with OpenSSL generated keys because
- * they ensure p > q [steve] */
+ // If p < q it is occasionally possible for the correction of
+ // adding 'p' if r0 is negative above to leave the result still
+ // negative. This can break the private key operations: the following
+ // second correction should *always* correct this rare occurrence.
+ // This will *never* happen with OpenSSL generated keys because
+ // they ensure p > q [steve]
if (BN_is_negative(r0)) {
if (!BN_add(r0, r0, rsa->p)) {
goto err;
@@ -753,20 +753,19 @@
return *out != NULL;
}
-/* kBoringSSLRSASqrtTwo is the BIGNUM representation of ⌊2¹⁵³⁵×√2⌋. This is
- * chosen to give enough precision for 3072-bit RSA, the largest key size FIPS
- * specifies. Key sizes beyond this will round up.
- *
- * To verify this number, check that n² < 2³⁰⁷¹ < (n+1)², where n is value
- * represented here. Note the components are listed in little-endian order. Here
- * is some sample Python code to check:
- *
- * >>> TOBN = lambda a, b: a << 32 | b
- * >>> l = [ <paste the contents of kSqrtTwo> ]
- * >>> n = sum(a * 2**(64*i) for i, a in enumerate(l))
- * >>> n**2 < 2**3071 < (n+1)**2
- * True
- */
+// kBoringSSLRSASqrtTwo is the BIGNUM representation of ⌊2¹⁵³⁵×√2⌋. This is
+// chosen to give enough precision for 3072-bit RSA, the largest key size FIPS
+// specifies. Key sizes beyond this will round up.
+//
+// To verify this number, check that n² < 2³⁰⁷¹ < (n+1)², where n is value
+// represented here. Note the components are listed in little-endian order. Here
+// is some sample Python code to check:
+//
+// >>> TOBN = lambda a, b: a << 32 | b
+// >>> l = [ <paste the contents of kSqrtTwo> ]
+// >>> n = sum(a * 2**(64*i) for i, a in enumerate(l))
+// >>> n**2 < 2**3071 < (n+1)**2
+// True
const BN_ULONG kBoringSSLRSASqrtTwo[] = {
TOBN(0xdea06241, 0xf7aa81c2), TOBN(0xf6a1be3f, 0xca221307),
TOBN(0x332a5e9f, 0x7bda1ebf), TOBN(0x0104dc01, 0xfe32352f),
@@ -787,7 +786,7 @@
OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t),
crypto_word_t_too_small);
int ret = 0;
- /* Process the words in little-endian order. */
+ // Process the words in little-endian order.
for (size_t i = 0; i < len; i++) {
crypto_word_t eq = constant_time_eq_w(a[i], b[i]);
crypto_word_t lt = constant_time_lt_w(a[i], b[i]);
@@ -805,9 +804,9 @@
return b_bits > n + 1 || (b_bits == n + 1 && !BN_is_pow2(b));
}
-/* generate_prime sets |out| to a prime with length |bits| such that |out|-1 is
- * relatively prime to |e|. If |p| is non-NULL, |out| will also not be close to
- * |p|. */
+// generate_prime sets |out| to a prime with length |bits| such that |out|-1 is
+// relatively prime to |e|. If |p| is non-NULL, |out| will also not be close to
+// |p|.
static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e,
const BIGNUM *p, BN_CTX *ctx, BN_GENCB *cb) {
if (bits < 128 || (bits % BN_BITS2) != 0) {
@@ -815,7 +814,7 @@
return 0;
}
- /* Ensure the bound on |tries| does not overflow. */
+ // Ensure the bound on |tries| does not overflow.
if (bits >= INT_MAX/5) {
OPENSSL_PUT_ERROR(RSA, RSA_R_MODULUS_TOO_LARGE);
return 0;
@@ -828,19 +827,19 @@
goto err;
}
- /* See FIPS 186-4 appendix B.3.3, steps 4 and 5. Note |bits| here is
- * nlen/2. */
+ // See FIPS 186-4 appendix B.3.3, steps 4 and 5. Note |bits| here is
+ // nlen/2.
for (;;) {
- /* Generate a random number of length |bits| where the bottom bit is set
- * (steps 4.2, 4.3, 5.2 and 5.3) and the top bit is set (implied by the
- * bound checked below in steps 4.4 and 5.5). */
+ // Generate a random number of length |bits| where the bottom bit is set
+ // (steps 4.2, 4.3, 5.2 and 5.3) and the top bit is set (implied by the
+ // bound checked below in steps 4.4 and 5.5).
if (!BN_rand(out, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ODD) ||
!BN_GENCB_call(cb, BN_GENCB_GENERATED, rand_tries++)) {
goto err;
}
if (p != NULL) {
- /* If |p| and |out| are too close, try again (step 5.4). */
+ // If |p| and |out| are too close, try again (step 5.4).
if (!BN_sub(tmp, out, p)) {
goto err;
}
@@ -850,21 +849,21 @@
}
}
- /* If out < 2^(bits-1)×√2, try again (steps 4.4 and 5.5).
- *
- * We check the most significant words, so we retry if ⌊out/2^k⌋ <= ⌊b/2^k⌋,
- * where b = 2^(bits-1)×√2 and k = max(0, bits - 1536). For key sizes up to
- * 3072 (bits = 1536), k = 0, so we are testing that ⌊out⌋ <= ⌊b⌋. out is an
- * integer and b is not, so this is equivalent to out < b. That is, the
- * comparison is exact for FIPS key sizes.
- *
- * For larger keys, the comparison is approximate, leaning towards
- * retrying. That is, we reject a negligible fraction of primes that are
- * within the FIPS bound, but we will never accept a prime outside the
- * bound, ensuring the resulting RSA key is the right size. Specifically, if
- * the FIPS bound holds, we have ⌊out/2^k⌋ < out/2^k < b/2^k. This implies
- * ⌊out/2^k⌋ <= ⌊b/2^k⌋. That is, the FIPS bound implies our bound and so we
- * are slightly tighter. */
+ // If out < 2^(bits-1)×√2, try again (steps 4.4 and 5.5).
+ //
+ // We check the most significant words, so we retry if ⌊out/2^k⌋ <= ⌊b/2^k⌋,
+ // where b = 2^(bits-1)×√2 and k = max(0, bits - 1536). For key sizes up to
+ // 3072 (bits = 1536), k = 0, so we are testing that ⌊out⌋ <= ⌊b⌋. out is an
+ // integer and b is not, so this is equivalent to out < b. That is, the
+ // comparison is exact for FIPS key sizes.
+ //
+ // For larger keys, the comparison is approximate, leaning towards
+ // retrying. That is, we reject a negligible fraction of primes that are
+ // within the FIPS bound, but we will never accept a prime outside the
+ // bound, ensuring the resulting RSA key is the right size. Specifically, if
+ // the FIPS bound holds, we have ⌊out/2^k⌋ < out/2^k < b/2^k. This implies
+ // ⌊out/2^k⌋ <= ⌊b/2^k⌋. That is, the FIPS bound implies our bound and so we
+ // are slightly tighter.
size_t out_len = (size_t)out->top;
assert(out_len == (size_t)bits / BN_BITS2);
size_t to_check = kBoringSSLRSASqrtTwoLen;
@@ -877,13 +876,13 @@
continue;
}
- /* Check gcd(out-1, e) is one (steps 4.5 and 5.6). */
+ // Check gcd(out-1, e) is one (steps 4.5 and 5.6).
if (!BN_sub(tmp, out, BN_value_one()) ||
!BN_gcd(tmp, tmp, e, ctx)) {
goto err;
}
if (BN_is_one(tmp)) {
- /* Test |out| for primality (steps 4.5.1 and 5.6.1). */
+ // Test |out| for primality (steps 4.5.1 and 5.6.1).
int is_probable_prime;
if (!BN_primality_test(&is_probable_prime, out, BN_prime_checks, ctx, 1,
cb)) {
@@ -895,8 +894,8 @@
}
}
- /* If we've tried too many times to find a prime, abort (steps 4.7 and
- * 5.8). */
+ // If we've tried too many times to find a prime, abort (steps 4.7 and
+ // 5.8).
tries++;
if (tries >= bits * 5) {
OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_MANY_ITERATIONS);
@@ -913,15 +912,15 @@
}
int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) {
- /* See FIPS 186-4 appendix B.3. This function implements a generalized version
- * of the FIPS algorithm. |RSA_generate_key_fips| performs additional checks
- * for FIPS-compliant key generation. */
+ // See FIPS 186-4 appendix B.3. This function implements a generalized version
+ // of the FIPS algorithm. |RSA_generate_key_fips| performs additional checks
+ // for FIPS-compliant key generation.
- /* Always generate RSA keys which are a multiple of 128 bits. Round |bits|
- * down as needed. */
+ // Always generate RSA keys which are a multiple of 128 bits. Round |bits|
+ // down as needed.
bits &= ~127;
- /* Reject excessively small keys. */
+ // Reject excessively small keys.
if (bits < 256) {
OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL);
return 0;
@@ -941,7 +940,7 @@
goto bn_err;
}
- /* We need the RSA components non-NULL. */
+ // We need the RSA components non-NULL.
if (!ensure_bignum(&rsa->n) ||
!ensure_bignum(&rsa->d) ||
!ensure_bignum(&rsa->e) ||
@@ -959,8 +958,8 @@
int prime_bits = bits / 2;
do {
- /* Generate p and q, each of size |prime_bits|, using the steps outlined in
- * appendix FIPS 186-4 appendix B.3.3. */
+ // Generate p and q, each of size |prime_bits|, using the steps outlined in
+ // appendix FIPS 186-4 appendix B.3.3.
if (!generate_prime(rsa->p, prime_bits, rsa->e, NULL, ctx, cb) ||
!BN_GENCB_call(cb, 3, 0) ||
!generate_prime(rsa->q, prime_bits, rsa->e, rsa->p, ctx, cb) ||
@@ -974,13 +973,13 @@
rsa->q = tmp;
}
- /* Calculate d = e^(-1) (mod lcm(p-1, q-1)), per FIPS 186-4. This differs
- * from typical RSA implementations which use (p-1)*(q-1).
- *
- * Note this means the size of d might reveal information about p-1 and
- * q-1. However, we do operations with Chinese Remainder Theorem, so we only
- * use d (mod p-1) and d (mod q-1) as exponents. Using a minimal totient
- * does not affect those two values. */
+ // Calculate d = e^(-1) (mod lcm(p-1, q-1)), per FIPS 186-4. This differs
+ // from typical RSA implementations which use (p-1)*(q-1).
+ //
+ // Note this means the size of d might reveal information about p-1 and
+ // q-1. However, we do operations with Chinese Remainder Theorem, so we only
+ // use d (mod p-1) and d (mod q-1) as exponents. Using a minimal totient
+ // does not affect those two values.
if (!BN_sub(pm1, rsa->p, BN_value_one()) ||
!BN_sub(qm1, rsa->q, BN_value_one()) ||
!BN_mul(totient, pm1, qm1, ctx) ||
@@ -990,39 +989,39 @@
goto bn_err;
}
- /* Check that |rsa->d| > 2^|prime_bits| and try again if it fails. See
- * appendix B.3.1's guidance on values for d. */
+ // Check that |rsa->d| > 2^|prime_bits| and try again if it fails. See
+ // appendix B.3.1's guidance on values for d.
} while (!rsa_greater_than_pow2(rsa->d, prime_bits));
- if (/* Calculate n. */
+ if (// Calculate n.
!BN_mul(rsa->n, rsa->p, rsa->q, ctx) ||
- /* Calculate d mod (p-1). */
+ // Calculate d mod (p-1).
!BN_mod(rsa->dmp1, rsa->d, pm1, ctx) ||
- /* Calculate d mod (q-1) */
+ // Calculate d mod (q-1)
!BN_mod(rsa->dmq1, rsa->d, qm1, ctx)) {
goto bn_err;
}
- /* Sanity-check that |rsa->n| has the specified size. This is implied by
- * |generate_prime|'s bounds. */
+ // Sanity-check that |rsa->n| has the specified size. This is implied by
+ // |generate_prime|'s bounds.
if (BN_num_bits(rsa->n) != (unsigned)bits) {
OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR);
goto err;
}
- /* Calculate inverse of q mod p. Note that although RSA key generation is far
- * from constant-time, |bn_mod_inverse_secret_prime| uses the same modular
- * exponentation logic as in RSA private key operations and, if the RSAZ-1024
- * code is enabled, will be optimized for common RSA prime sizes. */
+ // Calculate inverse of q mod p. Note that although RSA key generation is far
+ // from constant-time, |bn_mod_inverse_secret_prime| uses the same modular
+ // exponentation logic as in RSA private key operations and, if the RSAZ-1024
+ // code is enabled, will be optimized for common RSA prime sizes.
if (!BN_MONT_CTX_set_locked(&rsa->mont_p, &rsa->lock, rsa->p, ctx) ||
!bn_mod_inverse_secret_prime(rsa->iqmp, rsa->q, rsa->p, ctx,
rsa->mont_p)) {
goto bn_err;
}
- /* The key generation process is complex and thus error-prone. It could be
- * disastrous to generate and then use a bad key so double-check that the key
- * makes sense. */
+ // The key generation process is complex and thus error-prone. It could be
+ // disastrous to generate and then use a bad key so double-check that the key
+ // makes sense.
if (!RSA_check_key(rsa)) {
OPENSSL_PUT_ERROR(RSA, RSA_R_INTERNAL_ERROR);
goto err;
@@ -1043,8 +1042,8 @@
}
int RSA_generate_key_fips(RSA *rsa, int bits, BN_GENCB *cb) {
- /* FIPS 186-4 allows 2048-bit and 3072-bit RSA keys (1024-bit and 1536-bit
- * primes, respectively) with the prime generation method we use. */
+ // FIPS 186-4 allows 2048-bit and 3072-bit RSA keys (1024-bit and 1536-bit
+ // primes, respectively) with the prime generation method we use.
if (bits != 2048 && bits != 3072) {
OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS);
return 0;
@@ -1060,9 +1059,9 @@
}
DEFINE_METHOD_FUNCTION(RSA_METHOD, RSA_default_method) {
- /* All of the methods are NULL to make it easier for the compiler/linker to
- * drop unused functions. The wrapper functions will select the appropriate
- * |rsa_default_*| implementation. */
+ // All of the methods are NULL to make it easier for the compiler/linker to
+ // drop unused functions. The wrapper functions will select the appropriate
+ // |rsa_default_*| implementation.
OPENSSL_memset(out, 0, sizeof(RSA_METHOD));
out->common.is_static = 1;
out->flags = RSA_FLAG_CACHE_PUBLIC | RSA_FLAG_CACHE_PRIVATE;
diff --git a/crypto/fipsmodule/sha/sha1-altivec.c b/crypto/fipsmodule/sha/sha1-altivec.c
index 14e2bae..3152827 100644
--- a/crypto/fipsmodule/sha/sha1-altivec.c
+++ b/crypto/fipsmodule/sha/sha1-altivec.c
@@ -54,14 +54,14 @@
* copied and put under another distribution licence
* [including the GNU Public Licence.] */
-/* Altivec-optimized SHA1 in C. This is tested on ppc64le only.
- *
- * References:
- * https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1
- * http://arctic.org/~dean/crypto/sha1.html
- *
- * This code used the generic SHA-1 from OpenSSL as a basis and AltiVec
- * optimisations were added on top. */
+// Altivec-optimized SHA1 in C. This is tested on ppc64le only.
+//
+// References:
+// https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1
+// http://arctic.org/~dean/crypto/sha1.html
+//
+// This code used the generic SHA-1 from OpenSSL as a basis and AltiVec
+// optimisations were added on top.
#include <openssl/sha.h>
@@ -76,11 +76,11 @@
typedef vector unsigned int vec_uint32_t;
typedef vector unsigned char vec_uint8_t;
-/* Vector constants */
+// Vector constants
static const vec_uint8_t k_swap_endianness = {3, 2, 1, 0, 7, 6, 5, 4,
11, 10, 9, 8, 15, 14, 13, 12};
-/* Shift amounts for byte and bit shifts and rotations */
+// Shift amounts for byte and bit shifts and rotations
static const vec_uint8_t k_4_bytes = {32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32};
static const vec_uint8_t k_12_bytes = {96, 96, 96, 96, 96, 96, 96, 96,
@@ -91,18 +91,18 @@
#define K_40_59 0x8f1bbcdcUL
#define K_60_79 0xca62c1d6UL
-/* Vector versions of the above. */
+// Vector versions of the above.
static const vec_uint32_t K_00_19_x_4 = {K_00_19, K_00_19, K_00_19, K_00_19};
static const vec_uint32_t K_20_39_x_4 = {K_20_39, K_20_39, K_20_39, K_20_39};
static const vec_uint32_t K_40_59_x_4 = {K_40_59, K_40_59, K_40_59, K_40_59};
static const vec_uint32_t K_60_79_x_4 = {K_60_79, K_60_79, K_60_79, K_60_79};
-/* vector message scheduling: compute message schedule for round i..i+3 where i
- * is divisible by 4. We return the schedule w[i..i+3] as a vector. In
- * addition, we also precompute sum w[i..+3] and an additive constant K. This
- * is done to offload some computation of f() in the integer execution units.
- *
- * Byte shifting code below may not be correct for big-endian systems. */
+// vector message scheduling: compute message schedule for round i..i+3 where i
+// is divisible by 4. We return the schedule w[i..i+3] as a vector. In
+// addition, we also precompute sum w[i..+3] and an additive constant K. This
+// is done to offload some computation of f() in the integer execution units.
+//
+// Byte shifting code below may not be correct for big-endian systems.
static vec_uint32_t sched_00_15(vec_uint32_t *pre_added, const void *data,
vec_uint32_t k) {
const vector unsigned char unaligned_data =
@@ -113,17 +113,17 @@
return w;
}
-/* Compute w[i..i+3] using these steps for i in [16, 20, 24, 28]
- *
- * w'[i ] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1
- * w'[i+1] = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1
- * w'[i+2] = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1
- * w'[i+3] = ( 0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1
- *
- * w[ i] = w'[ i]
- * w[i+1] = w'[i+1]
- * w[i+2] = w'[i+2]
- * w[i+3] = w'[i+3] ^ (w'[i] <<< 1) */
+// Compute w[i..i+3] using these steps for i in [16, 20, 24, 28]
+//
+// w'[i ] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1
+// w'[i+1] = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1
+// w'[i+2] = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1
+// w'[i+3] = ( 0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1
+//
+// w[ i] = w'[ i]
+// w[i+1] = w'[i+1]
+// w[i+2] = w'[i+2]
+// w[i+3] = w'[i+3] ^ (w'[i] <<< 1)
static vec_uint32_t sched_16_31(vec_uint32_t *pre_added, vec_uint32_t minus_4,
vec_uint32_t minus_8, vec_uint32_t minus_12,
vec_uint32_t minus_16, vec_uint32_t k) {
@@ -138,8 +138,8 @@
return w;
}
-/* Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76]
- * w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2 */
+// Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76]
+// w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2
static vec_uint32_t sched_32_79(vec_uint32_t *pre_added, vec_uint32_t minus_4,
vec_uint32_t minus_8, vec_uint32_t minus_16,
vec_uint32_t minus_28, vec_uint32_t minus_32,
@@ -152,17 +152,17 @@
return w;
}
-/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
- * to the code in F_00_19. Wei attributes these optimisations to Peter
- * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
- * F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another
- * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */
+// As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
+// to the code in F_00_19. Wei attributes these optimisations to Peter
+// Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
+// F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another
+// tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a
#define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
#define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
#define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
#define F_60_79(b, c, d) F_20_39(b, c, d)
-/* We pre-added the K constants during message scheduling. */
+// We pre-added the K constants during message scheduling.
#define BODY_00_19(i, a, b, c, d, e, f) \
do { \
(f) = w[i] + (e) + rotate((a), 5) + F_00_19((b), (c), (d)); \
@@ -318,7 +318,7 @@
BODY_60_79(74, E, T, A, B, C, D);
BODY_60_79(75, D, E, T, A, B, C);
- /* We don't use the last value */
+ // We don't use the last value
(void)sched_32_79(vw + 19, w72, w68, w60, w48, w44, k);
BODY_60_79(76, C, D, E, T, A, B);
BODY_60_79(77, B, C, D, E, T, A);
@@ -345,7 +345,7 @@
}
}
-#endif /* OPENSSL_PPC64LE */
+#endif // OPENSSL_PPC64LE
#undef K_00_19
#undef K_20_39
diff --git a/crypto/fipsmodule/sha/sha1.c b/crypto/fipsmodule/sha/sha1.c
index 7b44563..7ce0193 100644
--- a/crypto/fipsmodule/sha/sha1.c
+++ b/crypto/fipsmodule/sha/sha1.c
@@ -131,11 +131,11 @@
#define K_40_59 0x8f1bbcdcUL
#define K_60_79 0xca62c1d6UL
-/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
- * to the code in F_00_19. Wei attributes these optimisations to Peter
- * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
- * F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another
- * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */
+// As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
+// to the code in F_00_19. Wei attributes these optimisations to Peter
+// Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
+// F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another
+// tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a
#define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
#define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
#define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
diff --git a/crypto/fipsmodule/sha/sha256.c b/crypto/fipsmodule/sha/sha256.c
index cd6becb..6d709a6 100644
--- a/crypto/fipsmodule/sha/sha256.c
+++ b/crypto/fipsmodule/sha/sha256.c
@@ -128,15 +128,15 @@
#define HASH_CTX SHA256_CTX
#define HASH_CBLOCK 64
-/* Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
- * default: case below covers for it. It's not clear however if it's permitted
- * to truncate to amount of bytes not divisible by 4. I bet not, but if it is,
- * then default: case shall be extended. For reference. Idea behind separate
- * cases for pre-defined lenghts is to let the compiler decide if it's
- * appropriate to unroll small loops.
- *
- * TODO(davidben): The small |md_len| case is one of the few places a low-level
- * hash 'final' function can fail. This should never happen. */
+// Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
+// default: case below covers for it. It's not clear however if it's permitted
+// to truncate to amount of bytes not divisible by 4. I bet not, but if it is,
+// then default: case shall be extended. For reference. Idea behind separate
+// cases for pre-defined lenghts is to let the compiler decide if it's
+// appropriate to unroll small loops.
+//
+// TODO(davidben): The small |md_len| case is one of the few places a low-level
+// hash 'final' function can fail. This should never happen.
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
@@ -196,9 +196,9 @@
#define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n))))
-/* FIPS specification refers to right rotations, while our ROTATE macro
- * is left one. This is why you might notice that rotation coefficients
- * differ from those observed in FIPS document by 32-N... */
+// FIPS specification refers to right rotations, while our ROTATE macro
+// is left one. This is why you might notice that rotation coefficients
+// differ from those observed in FIPS document by 32-N...
#define Sigma0(x) (ROTATE((x), 30) ^ ROTATE((x), 19) ^ ROTATE((x), 10))
#define Sigma1(x) (ROTATE((x), 26) ^ ROTATE((x), 21) ^ ROTATE((x), 7))
#define sigma0(x) (ROTATE((x), 25) ^ ROTATE((x), 14) ^ ((x) >> 3))
@@ -314,7 +314,7 @@
}
}
-#endif /* !SHA256_ASM */
+#endif // !SHA256_ASM
#undef DATA_ORDER_IS_BIG_ENDIAN
#undef HASH_CTX
diff --git a/crypto/fipsmodule/sha/sha512.c b/crypto/fipsmodule/sha/sha512.c
index 6e1f79b..3902f50 100644
--- a/crypto/fipsmodule/sha/sha512.c
+++ b/crypto/fipsmodule/sha/sha512.c
@@ -63,17 +63,17 @@
#include "../../internal.h"
-/* IMPLEMENTATION NOTES.
- *
- * The 32-bit hash algorithms share a common byte-order neutral collector and
- * padding function implementations that operate on unaligned data,
- * ../md32_common.h. This SHA-512 implementation does not. Reasons
- * [in reverse order] are:
- *
- * - It's the only 64-bit hash algorithm for the moment of this writing,
- * there is no need for common collector/padding implementation [yet];
- * - By supporting only a transform function that operates on *aligned* data
- * the collector/padding function is simpler and easier to optimize. */
+// IMPLEMENTATION NOTES.
+//
+// The 32-bit hash algorithms share a common byte-order neutral collector and
+// padding function implementations that operate on unaligned data,
+// ../md32_common.h. This SHA-512 implementation does not. Reasons
+// [in reverse order] are:
+//
+// - It's the only 64-bit hash algorithm for the moment of this writing,
+// there is no need for common collector/padding implementation [yet];
+// - By supporting only a transform function that operates on *aligned* data
+// the collector/padding function is simpler and easier to optimize.
#if !defined(OPENSSL_NO_ASM) && \
(defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
@@ -227,7 +227,7 @@
uint8_t *p = (uint8_t *)sha->u.p;
size_t n = sha->num;
- p[n] = 0x80; /* There always is a room for one */
+ p[n] = 0x80; // There always is a room for one
n++;
if (n > (sizeof(sha->u) - 16)) {
OPENSSL_memset(p + n, 0, sizeof(sha->u) - n);
@@ -256,13 +256,13 @@
sha512_block_data_order(sha->h, (uint64_t *)p, 1);
if (md == NULL) {
- /* TODO(davidben): This NULL check is absent in other low-level hash 'final'
- * functions and is one of the few places one can fail. */
+ // TODO(davidben): This NULL check is absent in other low-level hash 'final'
+ // functions and is one of the few places one can fail.
return 0;
}
switch (sha->md_len) {
- /* Let compiler decide if it's appropriate to unroll... */
+ // Let compiler decide if it's appropriate to unroll...
case SHA384_DIGEST_LENGTH:
for (n = 0; n < SHA384_DIGEST_LENGTH / 8; n++) {
uint64_t t = sha->h[n];
@@ -291,10 +291,10 @@
*(md++) = (uint8_t)(t);
}
break;
- /* ... as well as make sure md_len is not abused. */
+ // ... as well as make sure md_len is not abused.
default:
- /* TODO(davidben): This bad |md_len| case is one of the few places a
- * low-level hash 'final' function can fail. This should never happen. */
+ // TODO(davidben): This bad |md_len| case is one of the few places a
+ // low-level hash 'final' function can fail. This should never happen.
return 0;
}
@@ -392,7 +392,7 @@
#endif
#endif
#elif defined(_MSC_VER)
-#if defined(_WIN64) /* applies to both IA-64 and AMD64 */
+#if defined(_WIN64) // applies to both IA-64 and AMD64
#pragma intrinsic(_rotr64)
#define ROTR(a, n) _rotr64((a), n)
#endif
@@ -432,10 +432,8 @@
#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
-/*
- * This code should give better results on 32-bit CPU with less than
- * ~24 registers, both size and performance wise...
- */
+// This code should give better results on 32-bit CPU with less than
+// ~24 registers, both size and performance wise...
static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
size_t num) {
uint64_t A, E, T;
@@ -593,7 +591,7 @@
#endif
-#endif /* !SHA512_ASM */
+#endif // !SHA512_ASM
#undef ROTR
#undef PULL64