Make vpaes-armv8.pl compatible with XOM.

Change-Id: I27413467e5cac4e16ecbbb8d9a238ba5a8bcb9e7
Reviewed-on: https://boringssl-review.googlesource.com/c/boringssl/+/35284
Commit-Queue: Adam Langley <agl@google.com>
Reviewed-by: Adam Langley <agl@google.com>
diff --git a/crypto/fipsmodule/aes/asm/vpaes-armv8.pl b/crypto/fipsmodule/aes/asm/vpaes-armv8.pl
index 49eaf0d..5fa06d8 100755
--- a/crypto/fipsmodule/aes/asm/vpaes-armv8.pl
+++ b/crypto/fipsmodule/aes/asm/vpaes-armv8.pl
@@ -49,7 +49,7 @@
 *STDOUT=*OUT;
 
 $code.=<<___;
-.text
+.section	.rodata
 
 .type	_vpaes_consts,%object
 .align	7	// totally strategic alignment
@@ -140,6 +140,8 @@
 .asciz  "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)"
 .size	_vpaes_consts,.-_vpaes_consts
 .align	6
+
+.text
 ___
 
 {
@@ -159,7 +161,8 @@
 .type	_vpaes_encrypt_preheat,%function
 .align	4
 _vpaes_encrypt_preheat:
-	adr	x10, .Lk_inv
+	adrp	x10, :pg_hi21:.Lk_inv
+	add	x10, x10, :lo12:.Lk_inv
 	movi	v17.16b, #0x0f
 	ld1	{v18.2d-v19.2d}, [x10],#32	// .Lk_inv
 	ld1	{v20.2d-v23.2d}, [x10],#64	// .Lk_ipt, .Lk_sbo
@@ -187,7 +190,8 @@
 _vpaes_encrypt_core:
 	mov	x9, $key
 	ldr	w8, [$key,#240]			// pull rounds
-	adr	x11, .Lk_mc_forward+16
+	adrp	x11, :pg_hi21:.Lk_mc_forward+16
+	add	x11, x11, :lo12:.Lk_mc_forward+16
 						// vmovdqa	.Lk_ipt(%rip),	%xmm2	# iptlo
 	ld1	{v16.2d}, [x9], #16		// vmovdqu	(%r9),	%xmm5		# round0 key
 	and	v1.16b, v7.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1
@@ -272,7 +276,8 @@
 _vpaes_encrypt_2x:
 	mov	x9, $key
 	ldr	w8, [$key,#240]			// pull rounds
-	adr	x11, .Lk_mc_forward+16
+	adrp	x11, :pg_hi21:.Lk_mc_forward+16
+	add	x11, x11, :lo12:.Lk_mc_forward+16
 						// vmovdqa	.Lk_ipt(%rip),	%xmm2	# iptlo
 	ld1	{v16.2d}, [x9], #16		// vmovdqu	(%r9),	%xmm5		# round0 key
 	and	v1.16b,  v14.16b,  v17.16b	// vpand	%xmm9,	%xmm0,	%xmm1
@@ -375,9 +380,11 @@
 .type	_vpaes_decrypt_preheat,%function
 .align	4
 _vpaes_decrypt_preheat:
-	adr	x10, .Lk_inv
+	adrp	x10, :pg_hi21:.Lk_inv
+	add	x10, x10, :lo12:.Lk_inv
 	movi	v17.16b, #0x0f
-	adr	x11, .Lk_dipt
+	adrp	x11, :pg_hi21:.Lk_dipt
+	add	x11, x11, :lo12:.Lk_dipt
 	ld1	{v18.2d-v19.2d}, [x10],#32	// .Lk_inv
 	ld1	{v20.2d-v23.2d}, [x11],#64	// .Lk_dipt, .Lk_dsbo
 	ld1	{v24.2d-v27.2d}, [x11],#64	// .Lk_dsb9, .Lk_dsbd
@@ -399,10 +406,12 @@
 						// vmovdqa	.Lk_dipt(%rip), %xmm2	# iptlo
 	lsl	x11, x8, #4			// mov	%rax,	%r11;	shl	\$4, %r11
 	eor	x11, x11, #0x30			// xor		\$0x30,	%r11
-	adr	x10, .Lk_sr
+	adrp	x10, :pg_hi21:.Lk_sr
+	add	x10, x10, :lo12:.Lk_sr
 	and	x11, x11, #0x30			// and		\$0x30,	%r11
 	add	x11, x11, x10
-	adr	x10, .Lk_mc_forward+48
+	adrp	x10, :pg_hi21:.Lk_mc_forward+48
+	add	x10, x10, :lo12:.Lk_mc_forward+48
 
 	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm4		# round0 key
 	and	v1.16b, v7.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1
@@ -508,10 +517,12 @@
 						// vmovdqa	.Lk_dipt(%rip), %xmm2	# iptlo
 	lsl	x11, x8, #4			// mov	%rax,	%r11;	shl	\$4, %r11
 	eor	x11, x11, #0x30			// xor		\$0x30,	%r11
-	adr	x10, .Lk_sr
+	adrp	x10, :pg_hi21:.Lk_sr
+	add	x10, x10, :lo12:.Lk_sr
 	and	x11, x11, #0x30			// and		\$0x30,	%r11
 	add	x11, x11, x10
-	adr	x10, .Lk_mc_forward+48
+	adrp	x10, :pg_hi21:.Lk_mc_forward+48
+	add	x10, x10, :lo12:.Lk_mc_forward+48
 
 	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm4		# round0 key
 	and	v1.16b,  v14.16b, v17.16b	// vpand	%xmm9,	%xmm0,	%xmm1
@@ -647,14 +658,18 @@
 .type	_vpaes_key_preheat,%function
 .align	4
 _vpaes_key_preheat:
-	adr	x10, .Lk_inv
+	adrp	x10, :pg_hi21:.Lk_inv
+	add	x10, x10, :lo12:.Lk_inv
 	movi	v16.16b, #0x5b			// .Lk_s63
-	adr	x11, .Lk_sb1
+	adrp	x11, :pg_hi21:.Lk_sb1
+	add	x11, x11, :lo12:.Lk_sb1
 	movi	v17.16b, #0x0f			// .Lk_s0F
 	ld1	{v18.2d-v21.2d}, [x10]		// .Lk_inv, .Lk_ipt
-	adr	x10, .Lk_dksd
+	adrp	x10, :pg_hi21:.Lk_dksd
+	add	x10, x10, :lo12:.Lk_dksd
 	ld1	{v22.2d-v23.2d}, [x11]		// .Lk_sb1
-	adr	x11, .Lk_mc_forward
+	adrp	x11, :pg_hi21:.Lk_mc_forward
+	add	x11, x11, :lo12:.Lk_mc_forward
 	ld1	{v24.2d-v27.2d}, [x10],#64	// .Lk_dksd, .Lk_dksb
 	ld1	{v28.2d-v31.2d}, [x10],#64	// .Lk_dkse, .Lk_dks9
 	ld1	{v8.2d}, [x10]			// .Lk_rcon
@@ -677,7 +692,9 @@
 	bl	_vpaes_schedule_transform
 	mov	v7.16b, v0.16b			// vmovdqa	%xmm0,	%xmm7
 
-	adr	x10, .Lk_sr			// lea	.Lk_sr(%rip),%r10
+	adrp	x10, :pg_hi21:.Lk_sr		// lea	.Lk_sr(%rip),%r10
+	add	x10, x10, :lo12:.Lk_sr
+
 	add	x8, x8, x10
 	cbnz	$dir, .Lschedule_am_decrypting
 
@@ -803,12 +820,15 @@
 .align	4
 .Lschedule_mangle_last:
 	// schedule last round key from xmm0
-	adr	x11, .Lk_deskew			// lea	.Lk_deskew(%rip),%r11	# prepare to deskew
+	adrp	x11, :pg_hi21:.Lk_deskew	// lea	.Lk_deskew(%rip),%r11	# prepare to deskew
+	add	x11, x11, :lo12:.Lk_deskew
+
 	cbnz	$dir, .Lschedule_mangle_last_dec
 
 	// encrypting
 	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),%xmm1
-	adr	x11, .Lk_opt			// lea	.Lk_opt(%rip),	%r11		# prepare to output transform
+	adrp	x11, :pg_hi21:.Lk_opt		// lea	.Lk_opt(%rip),	%r11		# prepare to output transform
+	add	x11, x11, :lo12:.Lk_opt
 	add	$out, $out, #32			// add	\$32,	%rdx
 	tbl	v0.16b, {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm0		# output permute