Don't make assumptions about GCM128_CONTEXT layout in aesni-gcm-x86_64.pl

This is a little trickier because Intel architectures are so
inconveniently register-starved. This code was already using every
register. However, since Xi is only needed at the start and end of the
function, I just swapped the Xi and Htable parameters. Xi is passed on
the stack, so we don't need to explicitly spill it.

Change-Id: I2ef4552fc181a5350c9b1c733cf2319377a06b74
Reviewed-on: https://boringssl-review.googlesource.com/c/boringssl/+/59525
Reviewed-by: Adam Langley <agl@google.com>
Commit-Queue: David Benjamin <davidben@google.com>
diff --git a/crypto/fipsmodule/modes/asm/aesni-gcm-x86_64.pl b/crypto/fipsmodule/modes/asm/aesni-gcm-x86_64.pl
index b0b65bf..7c235f8 100644
--- a/crypto/fipsmodule/modes/asm/aesni-gcm-x86_64.pl
+++ b/crypto/fipsmodule/modes/asm/aesni-gcm-x86_64.pl
@@ -73,10 +73,19 @@
 
 # On Windows, only four parameters are passed in registers. The last two
 # parameters will be manually loaded into %rdi and %rsi.
-my ($inp, $out, $len, $key, $ivp, $Xip) =
+my ($inp, $out, $len, $key, $ivp, $Htable) =
     $win64 ? ("%rcx", "%rdx", "%r8", "%r9", "%rdi", "%rsi") :
              ("%rdi", "%rsi", "%rdx", "%rcx", "%r8", "%r9");
 
+# The offset from %rbp to the Xip parameter. On Windows, all parameters have
+# corresponding stack positions, not just ones passed on the stack.
+# (0x40 = 6*8 + 0x10)
+#
+# Xip only needs to be accessed at the beginning and end of the function, and
+# this function is short on registers, so we make it the last parameter for
+# convenience.
+my $Xip_offset = $win64 ? 0x40 : 0x10;
+
 ($Ii,$T1,$T2,$Hkey,
  $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
 
@@ -108,7 +117,7 @@
 .Loop6x:
 	add		\$`6<<24`,$counter
 	jc		.Lhandle_ctr32		# discard $inout[1-5]?
-	vmovdqu		0x00-0x20($Xip),$Hkey	# $Hkey^1
+	vmovdqu		0x00-0x20($Htable),$Hkey	# $Hkey^1
 	  vpaddb	$T2,$inout5,$T1		# next counter value
 	  vpxor		$rndkey,$inout1,$inout1
 	  vpxor		$rndkey,$inout2,$inout2
@@ -148,7 +157,7 @@
 	setnc		%r12b
 	vpclmulqdq	\$0x11,$Hkey,$Z3,$Z3
 	  vaesenc	$T2,$inout2,$inout2
-	vmovdqu		0x10-0x20($Xip),$Hkey	# $Hkey^2
+	vmovdqu		0x10-0x20($Htable),$Hkey	# $Hkey^2
 	neg		%r12
 	  vaesenc	$T2,$inout3,$inout3
 	 vpxor		$Z1,$Z2,$Z2
@@ -175,7 +184,7 @@
 	mov		%r13,0x20+8(%rsp)
 	  vaesenc	$rndkey,$inout4,$inout4
 	mov		%r12,0x28+8(%rsp)
-	vmovdqu		0x30-0x20($Xip),$Z1	# borrow $Z1 for $Hkey^3
+	vmovdqu		0x30-0x20($Htable),$Z1	# borrow $Z1 for $Hkey^3
 	  vaesenc	$rndkey,$inout5,$inout5
 
 	  vmovups	0x30-0x80($key),$rndkey
@@ -193,7 +202,7 @@
 	  vaesenc	$rndkey,$inout3,$inout3
 	  vaesenc	$rndkey,$inout4,$inout4
 	 vpxor		$T1,$Z0,$Z0
-	vmovdqu		0x40-0x20($Xip),$T1	# borrow $T1 for $Hkey^4
+	vmovdqu		0x40-0x20($Htable),$T1	# borrow $T1 for $Hkey^4
 	  vaesenc	$rndkey,$inout5,$inout5
 
 	  vmovups	0x40-0x80($key),$rndkey
@@ -215,7 +224,7 @@
 	  vaesenc	$rndkey,$inout4,$inout4
 	mov		%r12,0x38+8(%rsp)
 	 vpxor		$T2,$Z0,$Z0
-	vmovdqu		0x60-0x20($Xip),$T2	# borrow $T2 for $Hkey^5
+	vmovdqu		0x60-0x20($Htable),$T2	# borrow $T2 for $Hkey^5
 	  vaesenc	$rndkey,$inout5,$inout5
 
 	  vmovups	0x50-0x80($key),$rndkey
@@ -237,7 +246,7 @@
 	  vaesenc	$rndkey,$inout4,$inout4
 	mov		%r12,0x48+8(%rsp)
 	 vpxor		$Hkey,$Z0,$Z0
-	 vmovdqu	0x70-0x20($Xip),$Hkey	# $Hkey^6
+	 vmovdqu	0x70-0x20($Htable),$Hkey	# $Hkey^6
 	  vaesenc	$rndkey,$inout5,$inout5
 
 	  vmovups	0x60-0x80($key),$rndkey
@@ -339,7 +348,7 @@
 	  vmovdqu	0x30($const),$Z1	# borrow $Z1, .Ltwo_lsb
 	  vpaddd	0x40($const),$Z2,$inout1	# .Lone_lsb
 	  vpaddd	$Z1,$Z2,$inout2
-	vmovdqu		0x00-0x20($Xip),$Hkey	# $Hkey^1
+	vmovdqu		0x00-0x20($Htable),$Hkey	# $Hkey^1
 	  vpaddd	$Z1,$inout1,$inout3
 	  vpshufb	$Ii,$inout1,$inout1
 	  vpaddd	$Z1,$inout2,$inout4
@@ -424,8 +433,8 @@
 ######################################################################
 #
 # size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
-#		const AES_KEY *key, unsigned char iv[16],
-#		struct { u128 Xi,H,Htbl[9]; } *Xip);
+#		const AES_KEY *key, unsigned char iv[16], const u128 *Htbl[9],
+#		u128 *Xip);
 $code.=<<___;
 .globl	aesni_gcm_decrypt
 .type	aesni_gcm_decrypt,\@abi-omnipotent
@@ -474,7 +483,7 @@
 	mov	%rsi, 0x18(%rbp)
 .seh_savereg	%rsi, 0xa8+5*8+0x18
 	mov	0x30(%rbp), $ivp
-	mov	0x38(%rbp), $Xip
+	mov	0x38(%rbp), $Htable
 	# Save non-volatile XMM registers.
 	movaps	%xmm6,-0xd0(%rbp)
 .seh_savexmm128	%xmm6, 0xa8+5*8-0xd0
@@ -501,17 +510,18 @@
 $code.=<<___;
 	vzeroupper
 
+	mov		$Xip_offset(%rbp), %r12
 	vmovdqu		($ivp),$T1		# input counter value
 	add		\$-128,%rsp
 	mov		12($ivp),$counter
 	lea		.Lbswap_mask(%rip),$const
 	lea		-0x80($key),$in0	# borrow $in0
 	mov		\$0xf80,$end0		# borrow $end0
-	vmovdqu		($Xip),$Xi		# load Xi
+	vmovdqu		(%r12),$Xi		# load Xi
 	and		\$-128,%rsp		# ensure stack alignment
 	vmovdqu		($const),$Ii		# borrow $Ii for .Lbswap_mask
 	lea		0x80($key),$key		# size optimization
-	lea		0x20+0x20($Xip),$Xip	# size optimization
+	lea		0x20($Htable),$Htable	# size optimization
 	mov		0xf0-0x80($key),$rounds
 	vpshufb		$Ii,$Xi,$Xi
 
@@ -556,6 +566,7 @@
 
 	call		_aesni_ctr32_ghash_6x
 
+	mov		$Xip_offset(%rbp), %r12
 	vmovups		$inout0,-0x60($out)	# save output
 	vmovups		$inout1,-0x50($out)
 	vmovups		$inout2,-0x40($out)
@@ -564,7 +575,7 @@
 	vmovups		$inout5,-0x10($out)
 
 	vpshufb		($const),$Xi,$Xi	# .Lbswap_mask
-	vmovdqu		$Xi,-0x40($Xip)		# output Xi
+	vmovdqu		$Xi,(%r12)		# output Xi
 
 	vzeroupper
 ___
@@ -750,7 +761,7 @@
 	mov	%rsi, 0x18(%rbp)
 .seh_savereg	%rsi, 0xa8+5*8+0x18
 	mov	0x30(%rbp), $ivp
-	mov	0x38(%rbp), $Xip
+	mov	0x38(%rbp), $Htable
 	# Save non-volatile XMM registers.
 	movaps	%xmm6,-0xd0(%rbp)
 .seh_savexmm128	%xmm6, 0xa8+5*8-0xd0
@@ -825,8 +836,9 @@
 
 	call		_aesni_ctr32_6x
 
-	vmovdqu		($Xip),$Xi		# load Xi
-	lea		0x20+0x20($Xip),$Xip	# size optimization
+	mov		$Xip_offset(%rbp), %r12
+	lea		0x20($Htable),$Htable	# size optimization
+	vmovdqu		(%r12),$Xi		# load Xi
 	sub		\$12,$len
 	mov		\$0x60*2,%rax
 	vpshufb		$Ii,$Xi,$Xi
@@ -834,9 +846,9 @@
 	call		_aesni_ctr32_ghash_6x
 	vmovdqu		0x20(%rsp),$Z3		# I[5]
 	 vmovdqu	($const),$Ii		# borrow $Ii for .Lbswap_mask
-	vmovdqu		0x00-0x20($Xip),$Hkey	# $Hkey^1
+	vmovdqu		0x00-0x20($Htable),$Hkey	# $Hkey^1
 	vpunpckhqdq	$Z3,$Z3,$T1
-	vmovdqu		0x20-0x20($Xip),$rndkey	# borrow $rndkey for $HK
+	vmovdqu		0x20-0x20($Htable),$rndkey	# borrow $rndkey for $HK
 	 vmovups	$inout0,-0x60($out)	# save output
 	 vpshufb	$Ii,$inout0,$inout0	# but keep bswapped copy
 	vpxor		$Z3,$T1,$T1
@@ -856,7 +868,7 @@
 
 $code.=<<___;
 	 vmovdqu	0x30(%rsp),$Z2		# I[4]
-	 vmovdqu	0x10-0x20($Xip),$Ii	# borrow $Ii for $Hkey^2
+	 vmovdqu	0x10-0x20($Htable),$Ii	# borrow $Ii for $Hkey^2
 	 vpunpckhqdq	$Z2,$Z2,$T2
 	vpclmulqdq	\$0x00,$Hkey,$Z3,$Z1
 	 vpxor		$Z2,$T2,$T2
@@ -865,19 +877,19 @@
 
 	 vmovdqu	0x40(%rsp),$T3		# I[3]
 	vpclmulqdq	\$0x00,$Ii,$Z2,$Z0
-	 vmovdqu	0x30-0x20($Xip),$Hkey	# $Hkey^3
+	 vmovdqu	0x30-0x20($Htable),$Hkey	# $Hkey^3
 	vpxor		$Z1,$Z0,$Z0
 	 vpunpckhqdq	$T3,$T3,$Z1
 	vpclmulqdq	\$0x11,$Ii,$Z2,$Z2
 	 vpxor		$T3,$Z1,$Z1
 	vpxor		$Z3,$Z2,$Z2
 	vpclmulqdq	\$0x10,$HK,$T2,$T2
-	 vmovdqu	0x50-0x20($Xip),$HK
+	 vmovdqu	0x50-0x20($Htable),$HK
 	vpxor		$T1,$T2,$T2
 
 	 vmovdqu	0x50(%rsp),$T1		# I[2]
 	vpclmulqdq	\$0x00,$Hkey,$T3,$Z3
-	 vmovdqu	0x40-0x20($Xip),$Ii	# borrow $Ii for $Hkey^4
+	 vmovdqu	0x40-0x20($Htable),$Ii	# borrow $Ii for $Hkey^4
 	vpxor		$Z0,$Z3,$Z3
 	 vpunpckhqdq	$T1,$T1,$Z0
 	vpclmulqdq	\$0x11,$Hkey,$T3,$T3
@@ -888,19 +900,19 @@
 
 	 vmovdqu	0x60(%rsp),$T2		# I[1]
 	vpclmulqdq	\$0x00,$Ii,$T1,$Z2
-	 vmovdqu	0x60-0x20($Xip),$Hkey	# $Hkey^5
+	 vmovdqu	0x60-0x20($Htable),$Hkey	# $Hkey^5
 	vpxor		$Z3,$Z2,$Z2
 	 vpunpckhqdq	$T2,$T2,$Z3
 	vpclmulqdq	\$0x11,$Ii,$T1,$T1
 	 vpxor		$T2,$Z3,$Z3
 	vpxor		$T3,$T1,$T1
 	vpclmulqdq	\$0x10,$HK,$Z0,$Z0
-	 vmovdqu	0x80-0x20($Xip),$HK
+	 vmovdqu	0x80-0x20($Htable),$HK
 	vpxor		$Z1,$Z0,$Z0
 
 	 vpxor		0x70(%rsp),$Xi,$Xi	# accumulate I[0]
 	vpclmulqdq	\$0x00,$Hkey,$T2,$Z1
-	 vmovdqu	0x70-0x20($Xip),$Ii	# borrow $Ii for $Hkey^6
+	 vmovdqu	0x70-0x20($Htable),$Ii	# borrow $Ii for $Hkey^6
 	 vpunpckhqdq	$Xi,$Xi,$T3
 	vpxor		$Z2,$Z1,$Z1
 	vpclmulqdq	\$0x11,$Hkey,$T2,$T2
@@ -910,17 +922,17 @@
 	vpxor		$Z0,$Z3,$Z0
 
 	vpclmulqdq	\$0x00,$Ii,$Xi,$Z2
-	 vmovdqu	0x00-0x20($Xip),$Hkey	# $Hkey^1
+	 vmovdqu	0x00-0x20($Htable),$Hkey	# $Hkey^1
 	 vpunpckhqdq	$inout5,$inout5,$T1
 	vpclmulqdq	\$0x11,$Ii,$Xi,$Xi
 	 vpxor		$inout5,$T1,$T1
 	vpxor		$Z1,$Z2,$Z1
 	vpclmulqdq	\$0x10,$HK,$T3,$T3
-	 vmovdqu	0x20-0x20($Xip),$HK
+	 vmovdqu	0x20-0x20($Htable),$HK
 	vpxor		$T2,$Xi,$Z3
 	vpxor		$Z0,$T3,$Z2
 
-	 vmovdqu	0x10-0x20($Xip),$Ii	# borrow $Ii for $Hkey^2
+	 vmovdqu	0x10-0x20($Htable),$Ii	# borrow $Ii for $Hkey^2
 	  vpxor		$Z1,$Z3,$T3		# aggregated Karatsuba post-processing
 	vpclmulqdq	\$0x00,$Hkey,$inout5,$Z0
 	  vpxor		$T3,$Z2,$Z2
@@ -934,7 +946,7 @@
 	  vpxor		$Z2,$Z3,$Z3
 
 	vpclmulqdq	\$0x00,$Ii,$inout4,$Z1
-	 vmovdqu	0x30-0x20($Xip),$Hkey	# $Hkey^3
+	 vmovdqu	0x30-0x20($Htable),$Hkey	# $Hkey^3
 	vpxor		$Z0,$Z1,$Z1
 	 vpunpckhqdq	$inout3,$inout3,$T3
 	vpclmulqdq	\$0x11,$Ii,$inout4,$inout4
@@ -942,11 +954,11 @@
 	vpxor		$inout5,$inout4,$inout4
 	  vpalignr	\$8,$Xi,$Xi,$inout5	# 1st phase
 	vpclmulqdq	\$0x10,$HK,$T2,$T2
-	 vmovdqu	0x50-0x20($Xip),$HK
+	 vmovdqu	0x50-0x20($Htable),$HK
 	vpxor		$T1,$T2,$T2
 
 	vpclmulqdq	\$0x00,$Hkey,$inout3,$Z0
-	 vmovdqu	0x40-0x20($Xip),$Ii	# borrow $Ii for $Hkey^4
+	 vmovdqu	0x40-0x20($Htable),$Ii	# borrow $Ii for $Hkey^4
 	vpxor		$Z1,$Z0,$Z0
 	 vpunpckhqdq	$inout2,$inout2,$T1
 	vpclmulqdq	\$0x11,$Hkey,$inout3,$inout3
@@ -960,7 +972,7 @@
 	  vxorps	$inout5,$Xi,$Xi
 
 	vpclmulqdq	\$0x00,$Ii,$inout2,$Z1
-	 vmovdqu	0x60-0x20($Xip),$Hkey	# $Hkey^5
+	 vmovdqu	0x60-0x20($Htable),$Hkey	# $Hkey^5
 	vpxor		$Z0,$Z1,$Z1
 	 vpunpckhqdq	$inout1,$inout1,$T2
 	vpclmulqdq	\$0x11,$Ii,$inout2,$inout2
@@ -968,7 +980,7 @@
 	  vpalignr	\$8,$Xi,$Xi,$inout5	# 2nd phase
 	vpxor		$inout3,$inout2,$inout2
 	vpclmulqdq	\$0x10,$HK,$T1,$T1
-	 vmovdqu	0x80-0x20($Xip),$HK
+	 vmovdqu	0x80-0x20($Htable),$HK
 	vpxor		$T3,$T1,$T1
 
 	  vxorps	$Z3,$inout5,$inout5
@@ -976,7 +988,7 @@
 	  vxorps	$inout5,$Xi,$Xi
 
 	vpclmulqdq	\$0x00,$Hkey,$inout1,$Z0
-	 vmovdqu	0x70-0x20($Xip),$Ii	# borrow $Ii for $Hkey^6
+	 vmovdqu	0x70-0x20($Htable),$Ii	# borrow $Ii for $Hkey^6
 	vpxor		$Z1,$Z0,$Z0
 	 vpunpckhqdq	$Xi,$Xi,$T3
 	vpclmulqdq	\$0x11,$Hkey,$inout1,$inout1
@@ -1011,8 +1023,9 @@
 ___
 }
 $code.=<<___;
+	mov		$Xip_offset(%rbp), %r12
 	vpshufb		($const),$Xi,$Xi	# .Lbswap_mask
-	vmovdqu		$Xi,-0x40($Xip)		# output Xi
+	vmovdqu		$Xi,(%r12)		# output Xi
 
 	vzeroupper
 ___
diff --git a/crypto/fipsmodule/modes/gcm.c b/crypto/fipsmodule/modes/gcm.c
index 33374d0..5932d60 100644
--- a/crypto/fipsmodule/modes/gcm.c
+++ b/crypto/fipsmodule/modes/gcm.c
@@ -136,17 +136,13 @@
 static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
                              const AES_KEY *key, uint8_t ivec[16], uint64_t *Xi,
                              const u128 Htable[16]) {
-  // TODO(davidben): |aesni_gcm_encrypt| accesses |Htable| but does so assuming
-  // it is a known offset from |Xi|.
-  return aesni_gcm_encrypt(in, out, len, key, ivec, Xi);
+  return aesni_gcm_encrypt(in, out, len, key, ivec, Htable, Xi);
 }
 
 static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
                              const AES_KEY *key, uint8_t ivec[16], uint64_t *Xi,
                              const u128 Htable[16]) {
-  // TODO(davidben): |aesni_gcm_decrypt| accesses |Htable| but does so assuming
-  // it is a known offset from |Xi|.
-  return aesni_gcm_decrypt(in, out, len, key, ivec, Xi);
+  return aesni_gcm_decrypt(in, out, len, key, ivec, Htable, Xi);
 }
 #endif  // HW_GCM && X86_64
 
diff --git a/crypto/fipsmodule/modes/gcm_test.cc b/crypto/fipsmodule/modes/gcm_test.cc
index 6ed3e72..cfdccb2 100644
--- a/crypto/fipsmodule/modes/gcm_test.cc
+++ b/crypto/fipsmodule/modes/gcm_test.cc
@@ -162,28 +162,21 @@
       if (hwaes_capable()) {
         AES_KEY aes_key;
         static const uint8_t kKey[16] = {0};
-
-        // aesni_gcm_* makes assumptions about |GCM128_CONTEXT|'s layout.
-        GCM128_CONTEXT gcm;
-        memset(&gcm, 0, sizeof(gcm));
-        memcpy(&gcm.gcm_key.H, kH, sizeof(kH));
-        memcpy(&gcm.gcm_key.Htable, Htable, sizeof(Htable));
-        memcpy(&gcm.Xi, X, sizeof(X));
         uint8_t iv[16] = {0};
 
         aes_hw_set_encrypt_key(kKey, 128, &aes_key);
         for (size_t blocks : kBlockCounts) {
           CHECK_ABI_SEH(aesni_gcm_encrypt, buf, buf, blocks * 16, &aes_key, iv,
-                        gcm.Xi.u);
+                        Htable, X);
           CHECK_ABI_SEH(aesni_gcm_encrypt, buf, buf, blocks * 16 + 7, &aes_key,
-                        iv, gcm.Xi.u);
+                        iv, Htable, X);
         }
         aes_hw_set_decrypt_key(kKey, 128, &aes_key);
         for (size_t blocks : kBlockCounts) {
           CHECK_ABI_SEH(aesni_gcm_decrypt, buf, buf, blocks * 16, &aes_key, iv,
-                        gcm.Xi.u);
+                        Htable, X);
           CHECK_ABI_SEH(aesni_gcm_decrypt, buf, buf, blocks * 16 + 7, &aes_key,
-                        iv, gcm.Xi.u);
+                        iv, Htable, X);
         }
       }
     }
diff --git a/crypto/fipsmodule/modes/internal.h b/crypto/fipsmodule/modes/internal.h
index 0daad55..3a28591 100644
--- a/crypto/fipsmodule/modes/internal.h
+++ b/crypto/fipsmodule/modes/internal.h
@@ -269,9 +269,11 @@
 
 #define HW_GCM
 size_t aesni_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
-                         const AES_KEY *key, uint8_t ivec[16], uint64_t *Xi);
+                         const AES_KEY *key, uint8_t ivec[16],
+                         const u128 Htable[16], uint64_t *Xi);
 size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
-                         const AES_KEY *key, uint8_t ivec[16], uint64_t *Xi);
+                         const AES_KEY *key, uint8_t ivec[16],
+                         const u128 Htable[16], uint64_t *Xi);
 #endif  // OPENSSL_X86_64
 
 #if defined(OPENSSL_X86)