Remove |EC_POINTs_mul| & simplify p256-x86_64.
Without |EC_POINTs_mul|, there's never more than one variable point
passed to a |EC_METHOD|'s |mul| method. This allows them to be
simplified considerably. In this commit, the p256-x86_64 implementation
has been simplified to eliminate the heap allocation and looping
related that was previously necessary to deal with the possibility of
there being multiple input points. The other implementations were left
mostly as-is; they should be similarly simplified in the future.
Change-Id: I70751d1d5296be2562af0730e7ccefdba7a1acae
Reviewed-on: https://boringssl-review.googlesource.com/6493
Reviewed-by: Adam Langley <agl@google.com>
diff --git a/crypto/ec/p256-64.c b/crypto/ec/p256-64.c
index d91d3ae..613c8dd 100644
--- a/crypto/ec/p256-64.c
+++ b/crypto/ec/p256-64.c
@@ -1701,12 +1701,16 @@
(void (*)(void *, const void *))smallfelem_assign);
}
-/* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
- * values Result is stored in r (r can equal one of the inputs). */
int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r,
- const BIGNUM *scalar, size_t num,
- const EC_POINT *points[],
- const BIGNUM *scalars[], BN_CTX *ctx) {
+ const BIGNUM *g_scalar, const EC_POINT *p_,
+ const BIGNUM *p_scalar_, BN_CTX *ctx) {
+ /* TODO: This function used to take |points| and |scalars| as arrays of
+ * |num| elements. The code below should be simplified to work in terms of |p|
+ * and |p_scalar|. */
+ size_t num = p_ != NULL ? 1 : 0;
+ const EC_POINT **points = p_ != NULL ? &p_ : NULL;
+ BIGNUM const *const *scalars = p_ != NULL ? &p_scalar_ : NULL;
+
int ret = 0;
int j;
int mixed = 0;
@@ -1765,14 +1769,14 @@
if (i == num) {
/* we didn't have a valid precomputation, so we pick the generator. */
p = EC_GROUP_get0_generator(group);
- p_scalar = scalar;
+ p_scalar = g_scalar;
} else {
/* the i^th point */
p = points[i];
p_scalar = scalars[i];
}
if (p_scalar != NULL && p != NULL) {
- /* reduce scalar to 0 <= scalar < 2^256 */
+ /* reduce g_scalar to 0 <= g_scalar < 2^256 */
if (BN_num_bits(p_scalar) > 256 || BN_is_negative(p_scalar)) {
/* this is an unusual input, and we don't guarantee
* constant-timeness. */
@@ -1814,25 +1818,24 @@
}
}
- /* the scalar for the generator */
- if (scalar != NULL) {
+ if (g_scalar != NULL) {
memset(g_secret, 0, sizeof(g_secret));
- /* reduce scalar to 0 <= scalar < 2^256 */
- if (BN_num_bits(scalar) > 256 || BN_is_negative(scalar)) {
+ /* reduce g_scalar to 0 <= g_scalar < 2^256 */
+ if (BN_num_bits(g_scalar) > 256 || BN_is_negative(g_scalar)) {
/* this is an unusual input, and we don't guarantee
* constant-timeness. */
- if (!BN_nnmod(tmp_scalar, scalar, &group->order, ctx)) {
+ if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) {
OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB);
goto err;
}
num_bytes = BN_bn2bin(tmp_scalar, tmp);
} else {
- num_bytes = BN_bn2bin(scalar, tmp);
+ num_bytes = BN_bn2bin(g_scalar, tmp);
}
flip_endian(g_secret, tmp, num_bytes);
}
batch_mul(x_out, y_out, z_out, (const felem_bytearray(*))secrets,
- num_points, scalar != NULL ? g_secret : NULL, mixed,
+ num_points, g_scalar != NULL ? g_secret : NULL, mixed,
(const smallfelem(*)[17][3])pre_comp);
/* reduce the output to its unique minimal representation */