Remove explicit .hiddens from x86_64 perlasm files.
This reverts the non-ARM portions of 97999919bbe85ab213d283e18e597e028f8685d1.
x86_64 perlasm already makes .globl imply .hidden. (Confusingly, ARM does not.)
Since we don't need it, revert those to minimize divergence with upstream.
Change-Id: I2d205cfb1183e65d4f18a62bde187d206b1a96de
Reviewed-on: https://boringssl-review.googlesource.com/3610
Reviewed-by: Adam Langley <agl@google.com>
diff --git a/crypto/aes/asm/aes-x86_64.pl b/crypto/aes/asm/aes-x86_64.pl
index f1afbfb..4b6e1b4 100644
--- a/crypto/aes/asm/aes-x86_64.pl
+++ b/crypto/aes/asm/aes-x86_64.pl
@@ -1283,7 +1283,6 @@
.align 16
.globl asm_AES_set_encrypt_key
.type asm_AES_set_encrypt_key,\@function,3
-.hidden asm_AES_set_encrypt_key
asm_AES_set_encrypt_key:
push %rbx
push %rbp
@@ -1549,7 +1548,6 @@
.align 16
.globl asm_AES_set_decrypt_key
.type asm_AES_set_decrypt_key,\@function,3
-.hidden asm_AES_set_decrypt_key
asm_AES_set_decrypt_key:
push %rbx
push %rbp
diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl
index 918e125..5f61746 100644
--- a/crypto/aes/asm/aesni-x86_64.pl
+++ b/crypto/aes/asm/aesni-x86_64.pl
@@ -256,7 +256,6 @@
$code.=<<___;
.globl ${PREFIX}_encrypt
.type ${PREFIX}_encrypt,\@abi-omnipotent
-.hidden ${PREFIX}_encrypt
.align 16
${PREFIX}_encrypt:
movups ($inp),$inout0 # load input
@@ -270,7 +269,6 @@
.globl ${PREFIX}_decrypt
.type ${PREFIX}_decrypt,\@abi-omnipotent
-.hidden ${PREFIX}_decrypt
.align 16
${PREFIX}_decrypt:
movups ($inp),$inout0 # load input
@@ -584,7 +582,6 @@
$code.=<<___;
.globl aesni_ecb_encrypt
.type aesni_ecb_encrypt,\@function,5
-.hidden aesni_ecb_encrypt
.align 16
aesni_ecb_encrypt:
___
@@ -909,7 +906,6 @@
$code.=<<___;
.globl aesni_ccm64_encrypt_blocks
.type aesni_ccm64_encrypt_blocks,\@function,6
-.hidden aesni_ccm64_encrypt_blocks
.align 16
aesni_ccm64_encrypt_blocks:
___
@@ -990,7 +986,6 @@
$code.=<<___;
.globl aesni_ccm64_decrypt_blocks
.type aesni_ccm64_decrypt_blocks,\@function,6
-.hidden aesni_ccm64_decrypt_blocks
.align 16
aesni_ccm64_decrypt_blocks:
___
@@ -1105,7 +1100,6 @@
$code.=<<___;
.globl aesni_ctr32_encrypt_blocks
.type aesni_ctr32_encrypt_blocks,\@function,5
-.hidden aesni_ctr32_encrypt_blocks
.align 16
aesni_ctr32_encrypt_blocks:
lea (%rsp),%rax
@@ -1617,7 +1611,6 @@
$code.=<<___;
.globl aesni_xts_encrypt
.type aesni_xts_encrypt,\@function,6
-.hidden aesni_xts_encrypt
.align 16
aesni_xts_encrypt:
lea (%rsp),%rax
@@ -2052,7 +2045,6 @@
$code.=<<___;
.globl aesni_xts_decrypt
.type aesni_xts_decrypt,\@function,6
-.hidden aesni_xts_decrypt
.align 16
aesni_xts_decrypt:
lea (%rsp),%rax
@@ -2524,7 +2516,6 @@
$code.=<<___;
.globl ${PREFIX}_cbc_encrypt
.type ${PREFIX}_cbc_encrypt,\@function,6
-.hidden ${PREFIX}_cbc_encrypt
.align 16
${PREFIX}_cbc_encrypt:
test $len,$len # check length
@@ -2982,7 +2973,6 @@
$code.=<<___;
.globl ${PREFIX}_set_decrypt_key
.type ${PREFIX}_set_decrypt_key,\@abi-omnipotent
-.hidden ${PREFIX}_set_decrypt_key
.align 16
${PREFIX}_set_decrypt_key:
.byte 0x48,0x83,0xEC,0x08 # sub rsp,8
@@ -3033,7 +3023,6 @@
$code.=<<___;
.globl ${PREFIX}_set_encrypt_key
.type ${PREFIX}_set_encrypt_key,\@abi-omnipotent
-.hidden ${PREFIX}_set_encrypt_key
.align 16
${PREFIX}_set_encrypt_key:
__aesni_set_encrypt_key:
diff --git a/crypto/aes/asm/aesv8-armx.pl b/crypto/aes/asm/aesv8-armx.pl
index 1e3d662..1e93f86 100644
--- a/crypto/aes/asm/aesv8-armx.pl
+++ b/crypto/aes/asm/aesv8-armx.pl
@@ -63,7 +63,6 @@
.globl ${prefix}_set_encrypt_key
.type ${prefix}_set_encrypt_key,%function
-.hidden ${prefix}_set_encrypt_key
.align 5
${prefix}_set_encrypt_key:
.Lenc_key:
@@ -235,7 +234,6 @@
.globl ${prefix}_set_decrypt_key
.type ${prefix}_set_decrypt_key,%function
-.hidden ${prefix}_set_decrypt_key
.align 5
${prefix}_set_decrypt_key:
___
@@ -300,7 +298,6 @@
$code.=<<___;
.globl ${prefix}_${dir}crypt
.type ${prefix}_${dir}crypt,%function
-.hidden ${prefix}_${dir}crypt
.align 5
${prefix}_${dir}crypt:
ldr $rounds,[$key,#240]
@@ -345,7 +342,6 @@
$code.=<<___;
.globl ${prefix}_cbc_encrypt
.type ${prefix}_cbc_encrypt,%function
-.hidden ${prefix}_cbc_encrypt
.align 5
${prefix}_cbc_encrypt:
___
@@ -653,7 +649,6 @@
$code.=<<___;
.globl ${prefix}_ctr32_encrypt_blocks
.type ${prefix}_ctr32_encrypt_blocks,%function
-.hidden ${prefix}_ctr32_encrypt_blocks
.align 5
${prefix}_ctr32_encrypt_blocks:
___
diff --git a/crypto/aes/asm/bsaes-x86_64.pl b/crypto/aes/asm/bsaes-x86_64.pl
index 8c6e66a..3f7d33c 100644
--- a/crypto/aes/asm/bsaes-x86_64.pl
+++ b/crypto/aes/asm/bsaes-x86_64.pl
@@ -1049,7 +1049,6 @@
$code.=<<___;
.globl bsaes_enc_key_convert
.type bsaes_enc_key_convert,\@function,2
-.hidden bsaes_enc_key_convert
.align 16
bsaes_enc_key_convert:
mov 240($inp),%r10d # pass rounds
@@ -1063,7 +1062,6 @@
.globl bsaes_encrypt_128
.type bsaes_encrypt_128,\@function,4
-.hidden bsaes_encrypt_128
.align 16
bsaes_encrypt_128:
.Lenc128_loop:
@@ -1097,7 +1095,6 @@
.globl bsaes_dec_key_convert
.type bsaes_dec_key_convert,\@function,2
-.hidden bsaes_dec_key_convert
.align 16
bsaes_dec_key_convert:
mov 240($inp),%r10d # pass rounds
@@ -1112,7 +1109,6 @@
.globl bsaes_decrypt_128
.type bsaes_decrypt_128,\@function,4
-.hidden bsaes_decrypt_128
.align 16
bsaes_decrypt_128:
.Ldec128_loop:
@@ -1158,7 +1154,6 @@
$code.=<<___;
.globl bsaes_ecb_encrypt_blocks
.type bsaes_ecb_encrypt_blocks,\@abi-omnipotent
-.hidden bsaes_ecb_encrypt_blocks
.align 16
bsaes_ecb_encrypt_blocks:
mov %rsp, %rax
@@ -1360,7 +1355,6 @@
.globl bsaes_ecb_decrypt_blocks
.type bsaes_ecb_decrypt_blocks,\@abi-omnipotent
-.hidden bsaes_ecb_decrypt_blocks
.align 16
bsaes_ecb_decrypt_blocks:
mov %rsp, %rax
@@ -1566,7 +1560,6 @@
.extern asm_AES_cbc_encrypt
.globl bsaes_cbc_encrypt
.type bsaes_cbc_encrypt,\@abi-omnipotent
-.hidden bsaes_cbc_encrypt
.align 16
bsaes_cbc_encrypt:
___
@@ -1854,7 +1847,6 @@
.globl bsaes_ctr32_encrypt_blocks
.type bsaes_ctr32_encrypt_blocks,\@abi-omnipotent
-.hidden bsaes_ctr32_encrypt_blocks
.align 16
bsaes_ctr32_encrypt_blocks:
mov %rsp, %rax
@@ -2096,7 +2088,6 @@
$code.=<<___;
.globl bsaes_xts_encrypt
.type bsaes_xts_encrypt,\@abi-omnipotent
-.hidden bsaes_xts_encrypt
.align 16
bsaes_xts_encrypt:
mov %rsp, %rax
@@ -2478,7 +2469,6 @@
.globl bsaes_xts_decrypt
.type bsaes_xts_decrypt,\@abi-omnipotent
-.hidden bsaes_xts_decrypt
.align 16
bsaes_xts_decrypt:
mov %rsp, %rax
diff --git a/crypto/aes/asm/vpaes-x86_64.pl b/crypto/aes/asm/vpaes-x86_64.pl
index a647b92..f2ef318 100644
--- a/crypto/aes/asm/vpaes-x86_64.pl
+++ b/crypto/aes/asm/vpaes-x86_64.pl
@@ -671,7 +671,6 @@
#
.globl ${PREFIX}_set_encrypt_key
.type ${PREFIX}_set_encrypt_key,\@function,3
-.hidden ${PREFIX}_set_encrypt_key
.align 16
${PREFIX}_set_encrypt_key:
___
@@ -720,7 +719,6 @@
.globl ${PREFIX}_set_decrypt_key
.type ${PREFIX}_set_decrypt_key,\@function,3
-.hidden ${PREFIX}_set_decrypt_key
.align 16
${PREFIX}_set_decrypt_key:
___
@@ -774,7 +772,6 @@
.globl ${PREFIX}_encrypt
.type ${PREFIX}_encrypt,\@function,3
-.hidden ${PREFIX}_encrypt
.align 16
${PREFIX}_encrypt:
___
@@ -818,7 +815,6 @@
.globl ${PREFIX}_decrypt
.type ${PREFIX}_decrypt,\@function,3
-.hidden ${PREFIX}_decrypt
.align 16
${PREFIX}_decrypt:
___
@@ -868,7 +864,6 @@
$code.=<<___;
.globl ${PREFIX}_cbc_encrypt
.type ${PREFIX}_cbc_encrypt,\@function,6
-.hidden ${PREFIX}_cbc_encrypt
.align 16
${PREFIX}_cbc_encrypt:
xchg $key,$len
diff --git a/crypto/bn/asm/rsaz-avx2.pl b/crypto/bn/asm/rsaz-avx2.pl
index 9a9223b..3b6ccf8 100644
--- a/crypto/bn/asm/rsaz-avx2.pl
+++ b/crypto/bn/asm/rsaz-avx2.pl
@@ -159,7 +159,6 @@
.globl rsaz_1024_sqr_avx2
.type rsaz_1024_sqr_avx2,\@function,5
-.hidden rsaz_1024_sqr_avx2
.align 64
rsaz_1024_sqr_avx2: # 702 cycles, 14% faster than rsaz_1024_mul_avx2
lea (%rsp), %rax
@@ -892,7 +891,6 @@
$code.=<<___;
.globl rsaz_1024_mul_avx2
.type rsaz_1024_mul_avx2,\@function,5
-.hidden rsaz_1024_mul_avx2
.align 64
rsaz_1024_mul_avx2:
lea (%rsp), %rax
@@ -1486,7 +1484,6 @@
$code.=<<___;
.globl rsaz_1024_red2norm_avx2
.type rsaz_1024_red2norm_avx2,\@abi-omnipotent
-.hidden rsaz_1024_red2norm_avx2
.align 32
rsaz_1024_red2norm_avx2:
sub \$-128,$inp # size optimization
@@ -1526,7 +1523,6 @@
.globl rsaz_1024_norm2red_avx2
.type rsaz_1024_norm2red_avx2,\@abi-omnipotent
-.hidden rsaz_1024_norm2red_avx2
.align 32
rsaz_1024_norm2red_avx2:
sub \$-128,$out # size optimization
@@ -1569,7 +1565,6 @@
$code.=<<___;
.globl rsaz_1024_scatter5_avx2
.type rsaz_1024_scatter5_avx2,\@abi-omnipotent
-.hidden rsaz_1024_scatter5_avx2
.align 32
rsaz_1024_scatter5_avx2:
vzeroupper
@@ -1595,7 +1590,6 @@
.globl rsaz_1024_gather5_avx2
.type rsaz_1024_gather5_avx2,\@abi-omnipotent
-.hidden rsaz_1024_gather5_avx2
.align 32
rsaz_1024_gather5_avx2:
___
@@ -1690,7 +1684,6 @@
.extern OPENSSL_ia32cap_P
.globl rsaz_avx2_eligible
.type rsaz_avx2_eligible,\@abi-omnipotent
-.hidden rsaz_avx2_eligible
.align 32
rsaz_avx2_eligible:
mov OPENSSL_ia32cap_P+8(%rip),%eax
@@ -1878,7 +1871,6 @@
.globl rsaz_avx2_eligible
.type rsaz_avx2_eligible,\@abi-omnipotent
-.hidden rsaz_avx2_eligible
rsaz_avx2_eligible:
xor %eax,%eax
ret
@@ -1890,12 +1882,6 @@
.globl rsaz_1024_red2norm_avx2
.globl rsaz_1024_scatter5_avx2
.globl rsaz_1024_gather5_avx2
-.hidden rsaz_1024_sqr_avx2
-.hidden rsaz_1024_mul_avx2
-.hidden rsaz_1024_norm2red_avx2
-.hidden rsaz_1024_red2norm_avx2
-.hidden rsaz_1024_scatter5_avx2
-.hidden rsaz_1024_gather5_avx2
.type rsaz_1024_sqr_avx2,\@abi-omnipotent
rsaz_1024_sqr_avx2:
rsaz_1024_mul_avx2:
diff --git a/crypto/bn/asm/x86_64-mont.pl b/crypto/bn/asm/x86_64-mont.pl
index 38af80a..39476ab 100644
--- a/crypto/bn/asm/x86_64-mont.pl
+++ b/crypto/bn/asm/x86_64-mont.pl
@@ -90,7 +90,6 @@
.globl bn_mul_mont
.type bn_mul_mont,\@function,6
-.hidden bn_mul_mont
.align 16
bn_mul_mont:
test \$3,${num}d
diff --git a/crypto/cpu-x86_64-asm.pl b/crypto/cpu-x86_64-asm.pl
index af1c7a5..89d7a6c 100644
--- a/crypto/cpu-x86_64-asm.pl
+++ b/crypto/cpu-x86_64-asm.pl
@@ -22,7 +22,6 @@
.globl OPENSSL_ia32_cpuid
.type OPENSSL_ia32_cpuid,\@function,1
-.hidden OPENSSL_ia32_cpuid
.align 16
OPENSSL_ia32_cpuid:
# On Windows, $arg1 is rcx, but that will be clobbered. So make Windows
diff --git a/crypto/md5/asm/md5-x86_64.pl b/crypto/md5/asm/md5-x86_64.pl
index 45f23c0..77a6e01 100644
--- a/crypto/md5/asm/md5-x86_64.pl
+++ b/crypto/md5/asm/md5-x86_64.pl
@@ -129,7 +129,6 @@
.globl md5_block_asm_data_order
.type md5_block_asm_data_order,\@function,3
-.hidden md5_block_asm_data_order
md5_block_asm_data_order:
push %rbp
push %rbx
diff --git a/crypto/modes/asm/aesni-gcm-x86_64.pl b/crypto/modes/asm/aesni-gcm-x86_64.pl
index f4ff1f5..7e4e04e 100644
--- a/crypto/modes/asm/aesni-gcm-x86_64.pl
+++ b/crypto/modes/asm/aesni-gcm-x86_64.pl
@@ -397,7 +397,6 @@
$code.=<<___;
.globl aesni_gcm_decrypt
.type aesni_gcm_decrypt,\@function,6
-.hidden aesni_gcm_decrypt
.align 32
aesni_gcm_decrypt:
xor $ret,$ret
@@ -608,7 +607,6 @@
.globl aesni_gcm_encrypt
.type aesni_gcm_encrypt,\@function,6
-.hidden aesni_gcm_encrypt
.align 32
aesni_gcm_encrypt:
xor $ret,$ret
@@ -1038,7 +1036,6 @@
.globl aesni_gcm_encrypt
.type aesni_gcm_encrypt,\@abi-omnipotent
-.hidden aesni_gcm_encrypt
aesni_gcm_encrypt:
xor %eax,%eax
ret
@@ -1046,7 +1043,6 @@
.globl aesni_gcm_decrypt
.type aesni_gcm_decrypt,\@abi-omnipotent
-.hidden aesni_gcm_decrypt
aesni_gcm_decrypt:
xor %eax,%eax
ret
diff --git a/crypto/modes/asm/ghash-x86_64.pl b/crypto/modes/asm/ghash-x86_64.pl
index aacce2d..6e656ca 100644
--- a/crypto/modes/asm/ghash-x86_64.pl
+++ b/crypto/modes/asm/ghash-x86_64.pl
@@ -225,7 +225,6 @@
.globl gcm_gmult_4bit
.type gcm_gmult_4bit,\@function,2
-.hidden gcm_gmult_4bit
.align 16
gcm_gmult_4bit:
push %rbx
@@ -256,7 +255,6 @@
$code.=<<___;
.globl gcm_ghash_4bit
.type gcm_ghash_4bit,\@function,4
-.hidden gcm_ghash_4bit
.align 16
gcm_ghash_4bit:
push %rbx
@@ -484,7 +482,6 @@
$code.=<<___;
.globl gcm_init_clmul
.type gcm_init_clmul,\@abi-omnipotent
-.hidden gcm_init_clmul
.align 16
gcm_init_clmul:
.L_init_clmul:
@@ -565,7 +562,6 @@
$code.=<<___;
.globl gcm_gmult_clmul
.type gcm_gmult_clmul,\@abi-omnipotent
-.hidden gcm_gmult_clmul
.align 16
gcm_gmult_clmul:
.L_gmult_clmul:
@@ -615,7 +611,6 @@
$code.=<<___;
.globl gcm_ghash_clmul
.type gcm_ghash_clmul,\@abi-omnipotent
-.hidden gcm_ghash_clmul
.align 32
gcm_ghash_clmul:
.L_ghash_clmul:
@@ -972,7 +967,6 @@
$code.=<<___;
.globl gcm_init_avx
.type gcm_init_avx,\@abi-omnipotent
-.hidden gcm_init_avx
.align 32
gcm_init_avx:
___
@@ -1115,7 +1109,6 @@
$code.=<<___;
.globl gcm_gmult_avx
.type gcm_gmult_avx,\@abi-omnipotent
-.hidden gcm_gmult_avx
.align 32
gcm_gmult_avx:
jmp .L_gmult_clmul
@@ -1125,7 +1118,6 @@
$code.=<<___;
.globl gcm_ghash_avx
.type gcm_ghash_avx,\@abi-omnipotent
-.hidden gcm_ghash_avx
.align 32
gcm_ghash_avx:
___
diff --git a/crypto/rc4/asm/rc4-md5-x86_64.pl b/crypto/rc4/asm/rc4-md5-x86_64.pl
index 8ebf405..272fa91 100644
--- a/crypto/rc4/asm/rc4-md5-x86_64.pl
+++ b/crypto/rc4/asm/rc4-md5-x86_64.pl
@@ -110,7 +110,6 @@
.globl $func
.type $func,\@function,$nargs
-.hidden $func
$func:
cmp \$0,$len
je .Labort
diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl
index 14e4da1..db46242 100644
--- a/crypto/rc4/asm/rc4-x86_64.pl
+++ b/crypto/rc4/asm/rc4-x86_64.pl
@@ -127,7 +127,6 @@
.globl asm_RC4
.type asm_RC4,\@function,4
-.hidden asm_RC4
.align 16
asm_RC4:
or $len,$len
@@ -434,7 +433,6 @@
$code.=<<___;
.globl asm_RC4_set_key
.type asm_RC4_set_key,\@function,3
-.hidden asm_RC4_set_key
.align 16
asm_RC4_set_key:
lea 8($dat),$dat
diff --git a/crypto/sha/asm/sha1-armv8.pl b/crypto/sha/asm/sha1-armv8.pl
index a8efe4f..deb1238 100644
--- a/crypto/sha/asm/sha1-armv8.pl
+++ b/crypto/sha/asm/sha1-armv8.pl
@@ -156,7 +156,6 @@
.globl sha1_block_data_order
.type sha1_block_data_order,%function
-.hidden sha1_block_data_order
.align 6
sha1_block_data_order:
ldr x16,.LOPENSSL_armcap_P
diff --git a/crypto/sha/asm/sha512-armv8.pl b/crypto/sha/asm/sha512-armv8.pl
index 570b084..5a9c812 100644
--- a/crypto/sha/asm/sha512-armv8.pl
+++ b/crypto/sha/asm/sha512-armv8.pl
@@ -154,7 +154,6 @@
.globl $func
.type $func,%function
-.hidden $func
.align 6
$func:
___
diff --git a/crypto/sha/asm/sha512-x86_64.pl b/crypto/sha/asm/sha512-x86_64.pl
index 93f0c9c..6660a88 100644
--- a/crypto/sha/asm/sha512-x86_64.pl
+++ b/crypto/sha/asm/sha512-x86_64.pl
@@ -258,7 +258,6 @@
.extern OPENSSL_ia32cap_P
.globl $func
.type $func,\@function,3
-.hidden $func
.align 16
$func:
___