bn/asm/x86[_64]-mont*.pl: complement alloca with page-walking.

(Imports upstream's adc4f1fc25b2cac90076f1e1695b05b7aeeae501.)

Some OSes, *cough*-dows, insist on stack being "wired" to
physical memory in strictly sequential manner, i.e. if stack
allocation spans two pages, then reference to farmost one can
be punishable by SEGV. But page walking can do good even on
other OSes, because it guarantees that villain thread hits
the guard page before it can make damage to innocent one...

Change-Id: Ie1e278eb5982f26e596783b3d7820a71295688ec
Reviewed-on: https://boringssl-review.googlesource.com/13768
Commit-Queue: Adam Langley <agl@google.com>
Reviewed-by: David Benjamin <davidben@google.com>
diff --git a/crypto/bn/asm/x86-mont.pl b/crypto/bn/asm/x86-mont.pl
old mode 100644
new mode 100755
index 4b5d05d..1f86c2b
--- a/crypto/bn/asm/x86-mont.pl
+++ b/crypto/bn/asm/x86-mont.pl
@@ -88,6 +88,21 @@
 
 	&and	("esp",-64);		# align to cache line
 
+	# Some OSes, *cough*-dows, insist on stack being "wired" to
+	# physical memory in strictly sequential manner, i.e. if stack
+	# allocation spans two pages, then reference to farmost one can
+	# be punishable by SEGV. But page walking can do good even on
+	# other OSes, because it guarantees that villain thread hits
+	# the guard page before it can make damage to innocent one...
+	&mov	("eax","ebp");
+	&sub	("eax","esp");
+	&and	("eax",-4096);
+&set_label("page_walk");
+	&mov	("edx",&DWP(0,"esp","eax"));
+	&sub	("eax",4096);
+	&data_byte(0x2e);
+	&jnc	(&label("page_walk"));
+
 	################################# load argument block...
 	&mov	("eax",&DWP(0*4,"esi"));# BN_ULONG *rp
 	&mov	("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap
diff --git a/crypto/bn/asm/x86_64-mont.pl b/crypto/bn/asm/x86_64-mont.pl
index 60e0111..fde8fc4 100755
--- a/crypto/bn/asm/x86_64-mont.pl
+++ b/crypto/bn/asm/x86_64-mont.pl
@@ -117,6 +117,20 @@
 
 	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
 .Lmul_body:
+	# Some OSes, *cough*-dows, insist on stack being "wired" to
+	# physical memory in strictly sequential manner, i.e. if stack
+	# allocation spans two pages, then reference to farmost one can
+	# be punishable by SEGV. But page walking can do good even on
+	# other OSes, because it guarantees that villain thread hits
+	# the guard page before it can make damage to innocent one...
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmul_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x66,0x2e		# predict non-taken
+	jnc	.Lmul_page_walk
+
 	mov	$bp,%r12		# reassign $bp
 ___
 		$bp="%r12";
@@ -328,6 +342,14 @@
 
 	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
 .Lmul4x_body:
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmul4x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lmul4x_page_walk
+
 	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
 	mov	%rdx,%r12		# reassign $bp
 ___
@@ -781,6 +803,15 @@
 	sub	%r11,%rsp
 .Lsqr8x_sp_done:
 	and	\$-64,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lsqr8x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lsqr8x_page_walk
+
 	mov	$num,%r10
 	neg	$num
 
@@ -918,8 +949,17 @@
 	sub	$num,%r10		# -$num
 	mov	($n0),$n0		# *n0
 	lea	-72(%rsp,%r10),%rsp	# alloca(frame+$num+8)
-	lea	($bp,$num),%r10
 	and	\$-128,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmulx4x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x66,0x2e		# predict non-taken
+	jnc	.Lmulx4x_page_walk
+
+	lea	($bp,$num),%r10
 	##############################################################
 	# Stack layout
 	# +0	num
diff --git a/crypto/bn/asm/x86_64-mont5.pl b/crypto/bn/asm/x86_64-mont5.pl
index 61fde2d..6670215 100755
--- a/crypto/bn/asm/x86_64-mont5.pl
+++ b/crypto/bn/asm/x86_64-mont5.pl
@@ -102,6 +102,20 @@
 
 	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
 .Lmul_body:
+	# Some OSes, *cough*-dows, insist on stack being "wired" to
+	# physical memory in strictly sequential manner, i.e. if stack
+	# allocation spans two pages, then reference to farmost one can
+	# be punishable by SEGV. But page walking can do good even on
+	# other OSes, because it guarantees that villain thread hits
+	# the guard page before it can make damage to innocent one...
+	sub	%rsp,%rax
+	and	\$-4096,%rax
+.Lmul_page_walk:
+	mov	(%rsp,%rax),%r11
+	sub	\$4096,%rax
+	.byte	0x2e			# predict non-taken
+	jnc	.Lmul_page_walk
+
 	lea	128($bp),%r12		# reassign $bp (+size optimization)
 ___
 		$bp="%r12";
@@ -455,6 +469,15 @@
 	sub	%r11,%rsp
 .Lmul4xsp_done:
 	and	\$-64,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmul4x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lmul4x_page_walk
+
 	neg	$num
 
 	mov	%rax,40(%rsp)
@@ -1044,6 +1067,15 @@
 	sub	%r11,%rsp
 .Lpwr_sp_done:
 	and	\$-64,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lpwr_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lpwr_page_walk
+
 	mov	$num,%r10	
 	neg	$num
 
@@ -2013,7 +2045,16 @@
 	sub	%r11,%rsp
 .Lfrom_sp_done:
 	and	\$-64,%rsp
-	mov	$num,%r10	
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lfrom_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lfrom_page_walk
+
+	mov	$num,%r10
 	neg	$num
 
 	##############################################################
@@ -2158,6 +2199,15 @@
 	sub	%r11,%rsp
 .Lmulx4xsp_done:	
 	and	\$-64,%rsp		# ensure alignment
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmulx4x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lmulx4x_page_walk
+
 	##############################################################
 	# Stack layout
 	# +0	-num
@@ -2604,6 +2654,15 @@
 	sub	%r11,%rsp
 .Lpwrx_sp_done:
 	and	\$-64,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lpwrx_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lpwrx_page_walk
+
 	mov	$num,%r10	
 	neg	$num