Harden ASN.1 BIO handling of large amounts of data.
If the ASN.1 BIO is presented with a large length field read it in
chunks of increasing size checking for EOF on each read. This prevents
small files allocating excessive amounts of data.
CVE-2016-2109
Thanks to Brian Carpenter for reporting this issue.
(Imported from upstream's f32774087f7b3db1f789688368d16d917757421e)
Change-Id: Id1b0d4436c4879d0ba7d3b7482b937cafffa28f7
Reviewed-on: https://boringssl-review.googlesource.com/7741
Reviewed-by: David Benjamin <davidben@google.com>
diff --git a/crypto/asn1/a_d2i_fp.c b/crypto/asn1/a_d2i_fp.c
index f8845d8..b544971 100644
--- a/crypto/asn1/a_d2i_fp.c
+++ b/crypto/asn1/a_d2i_fp.c
@@ -141,6 +141,7 @@
#endif
#define HEADER_SIZE 8
+#define ASN1_CHUNK_INITIAL_SIZE (16 * 1024)
static int asn1_d2i_read_bio(BIO *in, BUF_MEM **pb)
{
BUF_MEM *b;
@@ -217,28 +218,42 @@
/* suck in c.slen bytes of data */
want = c.slen;
if (want > (len - off)) {
+ size_t chunk_max = ASN1_CHUNK_INITIAL_SIZE;
want -= (len - off);
if (want > INT_MAX /* BIO_read takes an int length */ ||
len + want < len) {
OPENSSL_PUT_ERROR(ASN1, ASN1_R_TOO_LONG);
goto err;
}
- if (!BUF_MEM_grow_clean(b, len + want)) {
- OPENSSL_PUT_ERROR(ASN1, ERR_R_MALLOC_FAILURE);
- goto err;
- }
while (want > 0) {
- i = BIO_read(in, &(b->data[len]), want);
- if (i <= 0) {
- OPENSSL_PUT_ERROR(ASN1, ASN1_R_NOT_ENOUGH_DATA);
+ /*
+ * Read content in chunks of increasing size
+ * so we can return an error for EOF without
+ * having to allocate the entire content length
+ * in one go.
+ */
+ size_t chunk = want > chunk_max ? chunk_max : want;
+
+ if (!BUF_MEM_grow_clean(b, len + chunk)) {
+ OPENSSL_PUT_ERROR(ASN1, ERR_R_MALLOC_FAILURE);
goto err;
}
- /*
- * This can't overflow because |len+want| didn't
- * overflow.
- */
- len += i;
- want -= i;
+ want -= chunk;
+ while (chunk > 0) {
+ i = BIO_read(in, &(b->data[len]), chunk);
+ if (i <= 0) {
+ OPENSSL_PUT_ERROR(ASN1, ASN1_R_NOT_ENOUGH_DATA);
+ goto err;
+ }
+ /*
+ * This can't overflow because |len+want| didn't
+ * overflow.
+ */
+ len += i;
+ chunk -= i;
+ }
+ if (chunk_max < INT_MAX/2)
+ chunk_max *= 2;
}
}
if (off + c.slen < off) {