Rename avx10 and avx10_512 to avx512

Intel has made a late change to the AVX10 specification that removes
support for maximum vector lengths other than 512 bits.  Therefore,
there won't be any such thing as AVX10/256.  AVX10 will just be what was
originally planned to be called AVX10/512, effectively just
consolidating the AVX512 CPUID feature bits.

In light of this development and the fact that the 256-bit support in
aes-gcm-avx10-x86_64.pl didn't end up being used, the early adoption of
the "avx10", "avx10_256", and "avx10_512" names no longer makes sense.
So let's just use "avx512" instead.

Rename file names and function names accordingly, and update some
comments.  No functional changes.

Change-Id: I2d59912b72d5ca0679b0ea54ae770a672ca36dea
Reviewed-on: https://boringssl-review.googlesource.com/c/boringssl/+/77847
Reviewed-by: David Benjamin <davidben@google.com>
Commit-Queue: David Benjamin <davidben@google.com>
diff --git a/build.json b/build.json
index 63dca5c..78ae3bd 100644
--- a/build.json
+++ b/build.json
@@ -142,8 +142,8 @@
         ],
         "perlasm_x86_64": [
             {"src": "crypto/fipsmodule/aes/asm/aesni-gcm-x86_64.pl"},
-            {"src": "crypto/fipsmodule/aes/asm/aes-gcm-avx10-x86_64.pl"},
             {"src": "crypto/fipsmodule/aes/asm/aes-gcm-avx2-x86_64.pl"},
+            {"src": "crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl"},
             {"src": "crypto/fipsmodule/aes/asm/aesni-x86_64.pl"},
             {"src": "crypto/fipsmodule/aes/asm/ghash-ssse3-x86_64.pl"},
             {"src": "crypto/fipsmodule/aes/asm/ghash-x86_64.pl"},
diff --git a/crypto/cpu_intel.cc b/crypto/cpu_intel.cc
index 05acad2..0648bdb 100644
--- a/crypto/cpu_intel.cc
+++ b/crypto/cpu_intel.cc
@@ -207,10 +207,6 @@
     // 128-bit or 256-bit vectors, and also volume 2a section 2.7.11 ("#UD
     // Equations for EVEX") which says that all EVEX-coded instructions raise an
     // undefined-instruction exception if any of these XCR0 bits is zero.
-    //
-    // AVX10 fixes this by reorganizing the features that used to be part of
-    // "AVX512" and allowing them to be used independently of 512-bit support.
-    // TODO: add AVX10 detection.
     extended_features[0] &= ~(1u << 16);  // AVX512F
     extended_features[0] &= ~(1u << 17);  // AVX512DQ
     extended_features[0] &= ~(1u << 21);  // AVX512IFMA
diff --git a/crypto/fipsmodule/aes/asm/aes-gcm-avx2-x86_64.pl b/crypto/fipsmodule/aes/asm/aes-gcm-avx2-x86_64.pl
index 76d1152..7a9d228 100644
--- a/crypto/fipsmodule/aes/asm/aes-gcm-avx2-x86_64.pl
+++ b/crypto/fipsmodule/aes/asm/aes-gcm-avx2-x86_64.pl
@@ -15,14 +15,15 @@
 #
 #------------------------------------------------------------------------------
 #
-# VAES and VPCLMULQDQ optimized AES-GCM for x86_64 (AVX2 version)
+# This is an AES-GCM implementation for x86_64 CPUs that support the following
+# CPU features: VAES && VPCLMULQDQ && AVX2.
 #
-# This is similar to aes-gcm-avx10-x86_64.pl, but it uses AVX2 instead of AVX512
-# / AVX10.  This means it can only use 16 vector registers instead of 32, the
+# This is similar to aes-gcm-avx512-x86_64.pl, but it uses AVX2 instead of
+# AVX512.  This means it can only use 16 vector registers instead of 32, the
 # maximum vector length is 32 bytes, and some instructions such as vpternlogd
 # and masked loads/stores are unavailable.  However, it is able to run on CPUs
-# that have VAES without AVX512 / AVX10, namely AMD Zen 3 (including "Milan"
-# server processors) and some Intel client CPUs such as Alder Lake.
+# that have VAES without AVX512, namely AMD Zen 3 (including "Milan" server
+# processors) and some Intel client CPUs such as Alder Lake.
 #
 # This implementation also uses Karatsuba multiplication instead of schoolbook
 # multiplication for GHASH in its main loop.  This does not help much on Intel,
diff --git a/crypto/fipsmodule/aes/asm/aes-gcm-avx10-x86_64.pl b/crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl
similarity index 95%
rename from crypto/fipsmodule/aes/asm/aes-gcm-avx10-x86_64.pl
rename to crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl
index ccc15b6..99c2c27 100644
--- a/crypto/fipsmodule/aes/asm/aes-gcm-avx10-x86_64.pl
+++ b/crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl
@@ -15,7 +15,8 @@
 #
 #------------------------------------------------------------------------------
 #
-# VAES and VPCLMULQDQ optimized AES-GCM for x86_64
+# This is an AES-GCM implementation for x86_64 CPUs that support the following
+# CPU features: VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI2.
 #
 # This file is based on aes-gcm-avx10-x86_64.S from the Linux kernel
 # (https://git.kernel.org/linus/b06affb1cb580e13).  The following notable
@@ -38,38 +39,7 @@
 #
 # - Added optimization for large amounts of AAD.
 #
-#------------------------------------------------------------------------------
-#
-# This file implements AES-GCM (Galois/Counter Mode) for x86_64 CPUs that
-# support VAES (vector AES), VPCLMULQDQ (vector carryless multiplication), and
-# either AVX512 or AVX10.  Some of the functions, notably the encryption and
-# decryption update functions which are the most performance-critical, are
-# provided in two variants generated from a macro: one using 256-bit vectors
-# (suffix: vaes_avx10_256) and one using 512-bit vectors (vaes_avx10_512).  The
-# other, "shared" functions (vaes_avx10) use at most 256-bit vectors.
-#
-# The functions that use 512-bit vectors are intended for CPUs that support
-# 512-bit vectors *and* where using them doesn't cause significant
-# downclocking.  They require the following CPU features:
-#
-#       VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/512)
-#
-# The other functions require the following CPU features:
-#
-#       VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/256)
-#
-# Note that we use "avx10" in the names of the functions as a shorthand to
-# really mean "AVX10 or a certain set of AVX512 features".  Due to Intel's
-# introduction of AVX512 and then its replacement by AVX10, there doesn't seem
-# to be a simple way to name things that makes sense on all CPUs.
-#
-# Note that the macros that support both 256-bit and 512-bit vectors could
-# fairly easily be changed to support 128-bit too.  However, this would *not*
-# be sufficient to allow the code to run on CPUs without AVX512 or AVX10,
-# because the code heavily uses several features of these extensions other than
-# the vector length: the increase in the number of SIMD registers from 16 to
-# 32, masking support, and new instructions such as vpternlogd (which can do a
-# three-argument XOR).  These features are very useful for AES-GCM.
+# - Removed support for maximum vector lengths other than 512 bits.
 
 use strict;
 
@@ -708,8 +678,8 @@
     return $code;
 }
 
-# void gcm_gmult_vpclmulqdq_avx10(uint8_t Xi[16], const u128 Htable[16]);
-$code .= _begin_func "gcm_gmult_vpclmulqdq_avx10", 1;
+# void gcm_gmult_vpclmulqdq_avx512(uint8_t Xi[16], const u128 Htable[16]);
+$code .= _begin_func "gcm_gmult_vpclmulqdq_avx512", 1;
 {
     my ( $GHASH_ACC_PTR, $HTABLE ) = @argregs[ 0 .. 1 ];
     my ( $GHASH_ACC, $BSWAP_MASK, $H_POW1, $GFPOLY, $T0, $T1, $T2 ) =
@@ -1406,19 +1376,19 @@
 
 _set_veclen 64;
 
-$code .= _begin_func "gcm_init_vpclmulqdq_avx10_512", 0;
+$code .= _begin_func "gcm_init_vpclmulqdq_avx512", 0;
 $code .= _aes_gcm_init;
 $code .= _end_func;
 
-$code .= _begin_func "gcm_ghash_vpclmulqdq_avx10_512", 1;
+$code .= _begin_func "gcm_ghash_vpclmulqdq_avx512", 1;
 $code .= _ghash_update;
 $code .= _end_func;
 
-$code .= _begin_func "aes_gcm_enc_update_vaes_avx10_512", 1;
+$code .= _begin_func "aes_gcm_enc_update_vaes_avx512", 1;
 $code .= _aes_gcm_update 1;
 $code .= _end_func;
 
-$code .= _begin_func "aes_gcm_dec_update_vaes_avx10_512", 1;
+$code .= _begin_func "aes_gcm_dec_update_vaes_avx512", 1;
 $code .= _aes_gcm_update 0;
 $code .= _end_func;
 
diff --git a/crypto/fipsmodule/aes/gcm.cc.inc b/crypto/fipsmodule/aes/gcm.cc.inc
index 0b9b2ac..30855ee 100644
--- a/crypto/fipsmodule/aes/gcm.cc.inc
+++ b/crypto/fipsmodule/aes/gcm.cc.inc
@@ -109,9 +109,9 @@
       aes_gcm_enc_update_vaes_avx2(in, out, len, key, ivec, Htable, Xi);
       CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
       return len;
-    case gcm_x86_vaes_avx10_512:
+    case gcm_x86_vaes_avx512:
       len &= kSizeTWithoutLower4Bits;
-      aes_gcm_enc_update_vaes_avx10_512(in, out, len, key, ivec, Htable, Xi);
+      aes_gcm_enc_update_vaes_avx512(in, out, len, key, ivec, Htable, Xi);
       CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
       return len;
     default:
@@ -129,9 +129,9 @@
       aes_gcm_dec_update_vaes_avx2(in, out, len, key, ivec, Htable, Xi);
       CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
       return len;
-    case gcm_x86_vaes_avx10_512:
+    case gcm_x86_vaes_avx512:
       len &= kSizeTWithoutLower4Bits;
-      aes_gcm_dec_update_vaes_avx10_512(in, out, len, key, ivec, Htable, Xi);
+      aes_gcm_dec_update_vaes_avx512(in, out, len, key, ivec, Htable, Xi);
       CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
       return len;
     default:
@@ -179,9 +179,9 @@
     if (CRYPTO_is_VPCLMULQDQ_capable() && CRYPTO_is_AVX2_capable()) {
       if (CRYPTO_is_AVX512BW_capable() && CRYPTO_is_AVX512VL_capable() &&
           CRYPTO_is_BMI2_capable() && !CRYPTO_cpu_avoid_zmm_registers()) {
-        gcm_init_vpclmulqdq_avx10_512(out_table, H);
-        *out_mult = gcm_gmult_vpclmulqdq_avx10;
-        *out_hash = gcm_ghash_vpclmulqdq_avx10_512;
+        gcm_init_vpclmulqdq_avx512(out_table, H);
+        *out_mult = gcm_gmult_vpclmulqdq_avx512;
+        *out_hash = gcm_ghash_vpclmulqdq_avx512;
         return;
       }
       gcm_init_vpclmulqdq_avx2(out_table, H);
@@ -266,9 +266,9 @@
 
 #if !defined(OPENSSL_NO_ASM)
 #if defined(OPENSSL_X86_64)
-  if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx10_512 &&
+  if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx512 &&
       CRYPTO_is_VAES_capable()) {
-    gcm_key->impl = gcm_x86_vaes_avx10_512;
+    gcm_key->impl = gcm_x86_vaes_avx512;
   } else if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx2 &&
              CRYPTO_is_VAES_capable()) {
     gcm_key->impl = gcm_x86_vaes_avx2;
diff --git a/crypto/fipsmodule/aes/gcm_test.cc b/crypto/fipsmodule/aes/gcm_test.cc
index 5b8ce5e..4a40fad 100644
--- a/crypto/fipsmodule/aes/gcm_test.cc
+++ b/crypto/fipsmodule/aes/gcm_test.cc
@@ -111,26 +111,25 @@
       static const uint8_t kKey[16] = {0};
       uint8_t iv[16] = {0};
 
-      CHECK_ABI_SEH(gcm_init_vpclmulqdq_avx10_512, Htable, kH);
-      CHECK_ABI_SEH(gcm_gmult_vpclmulqdq_avx10, X, Htable);
+      CHECK_ABI_SEH(gcm_init_vpclmulqdq_avx512, Htable, kH);
+      CHECK_ABI_SEH(gcm_gmult_vpclmulqdq_avx512, X, Htable);
       for (size_t blocks : kBlockCounts) {
-        CHECK_ABI_SEH(gcm_ghash_vpclmulqdq_avx10_512, X, Htable, buf,
-                      16 * blocks);
+        CHECK_ABI_SEH(gcm_ghash_vpclmulqdq_avx512, X, Htable, buf, 16 * blocks);
       }
 
       aes_hw_set_encrypt_key(kKey, 128, &aes_key);
       for (size_t blocks : kBlockCounts) {
-        CHECK_ABI_SEH(aes_gcm_enc_update_vaes_avx10_512, buf, buf, blocks * 16,
+        CHECK_ABI_SEH(aes_gcm_enc_update_vaes_avx512, buf, buf, blocks * 16,
                       &aes_key, iv, Htable, X);
-        CHECK_ABI_SEH(aes_gcm_enc_update_vaes_avx10_512, buf, buf,
-                      blocks * 16 + 7, &aes_key, iv, Htable, X);
+        CHECK_ABI_SEH(aes_gcm_enc_update_vaes_avx512, buf, buf, blocks * 16 + 7,
+                      &aes_key, iv, Htable, X);
       }
       aes_hw_set_decrypt_key(kKey, 128, &aes_key);
       for (size_t blocks : kBlockCounts) {
-        CHECK_ABI_SEH(aes_gcm_dec_update_vaes_avx10_512, buf, buf, blocks * 16,
+        CHECK_ABI_SEH(aes_gcm_dec_update_vaes_avx512, buf, buf, blocks * 16,
                       &aes_key, iv, Htable, X);
-        CHECK_ABI_SEH(aes_gcm_dec_update_vaes_avx10_512, buf, buf,
-                      blocks * 16 + 7, &aes_key, iv, Htable, X);
+        CHECK_ABI_SEH(aes_gcm_dec_update_vaes_avx512, buf, buf, blocks * 16 + 7,
+                      &aes_key, iv, Htable, X);
       }
     }
 #endif  // GHASH_ASM_X86_64
diff --git a/crypto/fipsmodule/aes/internal.h b/crypto/fipsmodule/aes/internal.h
index 6bb6e48..b71c20d 100644
--- a/crypto/fipsmodule/aes/internal.h
+++ b/crypto/fipsmodule/aes/internal.h
@@ -313,7 +313,7 @@
   gcm_separate = 0,  // No combined AES-GCM, but may have AES-CTR and GHASH.
   gcm_x86_aesni,
   gcm_x86_vaes_avx2,
-  gcm_x86_vaes_avx10_512,
+  gcm_x86_vaes_avx512,
   gcm_arm64_aes,
 };
 
@@ -454,18 +454,16 @@
                                   const AES_KEY *key, const uint8_t ivec[16],
                                   const u128 Htable[16], uint8_t Xi[16]);
 
-void gcm_init_vpclmulqdq_avx10_512(u128 Htable[16], const uint64_t H[2]);
-void gcm_gmult_vpclmulqdq_avx10(uint8_t Xi[16], const u128 Htable[16]);
-void gcm_ghash_vpclmulqdq_avx10_512(uint8_t Xi[16], const u128 Htable[16],
-                                    const uint8_t *in, size_t len);
-void aes_gcm_enc_update_vaes_avx10_512(const uint8_t *in, uint8_t *out,
-                                       size_t len, const AES_KEY *key,
-                                       const uint8_t ivec[16],
-                                       const u128 Htable[16], uint8_t Xi[16]);
-void aes_gcm_dec_update_vaes_avx10_512(const uint8_t *in, uint8_t *out,
-                                       size_t len, const AES_KEY *key,
-                                       const uint8_t ivec[16],
-                                       const u128 Htable[16], uint8_t Xi[16]);
+void gcm_init_vpclmulqdq_avx512(u128 Htable[16], const uint64_t H[2]);
+void gcm_gmult_vpclmulqdq_avx512(uint8_t Xi[16], const u128 Htable[16]);
+void gcm_ghash_vpclmulqdq_avx512(uint8_t Xi[16], const u128 Htable[16],
+                                 const uint8_t *in, size_t len);
+void aes_gcm_enc_update_vaes_avx512(const uint8_t *in, uint8_t *out, size_t len,
+                                    const AES_KEY *key, const uint8_t ivec[16],
+                                    const u128 Htable[16], uint8_t Xi[16]);
+void aes_gcm_dec_update_vaes_avx512(const uint8_t *in, uint8_t *out, size_t len,
+                                    const AES_KEY *key, const uint8_t ivec[16],
+                                    const u128 Htable[16], uint8_t Xi[16]);
 
 #endif  // OPENSSL_X86_64
 
diff --git a/crypto/impl_dispatch_test.cc b/crypto/impl_dispatch_test.cc
index 7172a3c..60afa81 100644
--- a/crypto/impl_dispatch_test.cc
+++ b/crypto/impl_dispatch_test.cc
@@ -38,8 +38,8 @@
     ssse3_ = CRYPTO_is_SSSE3_capable();
     vaes_ = CRYPTO_is_VAES_capable() && CRYPTO_is_VPCLMULQDQ_capable() &&
             CRYPTO_is_AVX2_capable();
-    avx10_ = CRYPTO_is_AVX512BW_capable() && CRYPTO_is_AVX512VL_capable() &&
-             CRYPTO_is_BMI2_capable();
+    avx512_ = CRYPTO_is_AVX512BW_capable() && CRYPTO_is_AVX512VL_capable() &&
+              CRYPTO_is_BMI2_capable();
     avoid_zmm_ = CRYPTO_cpu_avoid_zmm_registers();
     is_x86_64_ =
 #if defined(OPENSSL_X86_64)
@@ -81,7 +81,7 @@
   bool ssse3_ = false;
   bool is_x86_64_ = false;
   bool vaes_ = false;
-  bool avx10_ = false;
+  bool avx512_ = false;
   bool avoid_zmm_ = false;
 #endif
 };
@@ -95,7 +95,7 @@
 constexpr size_t kFlag_aes_hw_set_encrypt_key = 3;
 constexpr size_t kFlag_vpaes_encrypt = 4;
 constexpr size_t kFlag_vpaes_set_encrypt_key = 5;
-constexpr size_t kFlag_aes_gcm_enc_update_vaes_avx10_512 = 7;
+constexpr size_t kFlag_aes_gcm_enc_update_vaes_avx512 = 7;
 constexpr size_t kFlag_aes_gcm_enc_update_vaes_avx2 = 8;
 
 TEST_F(ImplDispatchTest, AEAD_AES_GCM) {
@@ -108,10 +108,10 @@
            is_x86_64_ && aesni_ && avx_movbe_ && !vaes_},
           {kFlag_vpaes_encrypt, ssse3_ && !aesni_},
           {kFlag_vpaes_set_encrypt_key, ssse3_ && !aesni_},
-          {kFlag_aes_gcm_enc_update_vaes_avx10_512,
-           is_x86_64_ && vaes_ && avx10_ && !avoid_zmm_},
+          {kFlag_aes_gcm_enc_update_vaes_avx512,
+           is_x86_64_ && vaes_ && avx512_ && !avoid_zmm_},
           {kFlag_aes_gcm_enc_update_vaes_avx2,
-           is_x86_64_ && vaes_ && !(avx10_ && !avoid_zmm_)},
+           is_x86_64_ && vaes_ && !(avx512_ && !avoid_zmm_)},
       },
       [] {
         const uint8_t kZeros[16] = {0};
diff --git a/crypto/internal.h b/crypto/internal.h
index c41fa1b..5c57827 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -1416,7 +1416,7 @@
 //   4: vpaes_encrypt
 //   5: vpaes_set_encrypt_key
 //   6: aes_gcm_enc_update_vaes_avx10_256 [reserved]
-//   7: aes_gcm_enc_update_vaes_avx10_512
+//   7: aes_gcm_enc_update_vaes_avx512
 //   8: aes_gcm_enc_update_vaes_avx2
 extern uint8_t BORINGSSL_function_hit[9];
 #endif  // BORINGSSL_DISPATCH_TEST
diff --git a/gen/bcm/aes-gcm-avx10-x86_64-apple.S b/gen/bcm/aes-gcm-avx512-x86_64-apple.S
similarity index 97%
rename from gen/bcm/aes-gcm-avx10-x86_64-apple.S
rename to gen/bcm/aes-gcm-avx512-x86_64-apple.S
index 9106419..157feb5 100644
--- a/gen/bcm/aes-gcm-avx10-x86_64-apple.S
+++ b/gen/bcm/aes-gcm-avx512-x86_64-apple.S
@@ -39,11 +39,11 @@
 .quad	4, 0
 
 .text	
-.globl	_gcm_gmult_vpclmulqdq_avx10
-.private_extern _gcm_gmult_vpclmulqdq_avx10
+.globl	_gcm_gmult_vpclmulqdq_avx512
+.private_extern _gcm_gmult_vpclmulqdq_avx512
 
 .p2align	5
-_gcm_gmult_vpclmulqdq_avx10:
+_gcm_gmult_vpclmulqdq_avx512:
 
 
 _CET_ENDBR
@@ -77,11 +77,11 @@
 
 
 
-.globl	_gcm_init_vpclmulqdq_avx10_512
-.private_extern _gcm_init_vpclmulqdq_avx10_512
+.globl	_gcm_init_vpclmulqdq_avx512
+.private_extern _gcm_init_vpclmulqdq_avx512
 
 .p2align	5
-_gcm_init_vpclmulqdq_avx10_512:
+_gcm_init_vpclmulqdq_avx512:
 
 
 _CET_ENDBR
@@ -184,11 +184,11 @@
 
 
 
-.globl	_gcm_ghash_vpclmulqdq_avx10_512
-.private_extern _gcm_ghash_vpclmulqdq_avx10_512
+.globl	_gcm_ghash_vpclmulqdq_avx512
+.private_extern _gcm_ghash_vpclmulqdq_avx512
 
 .p2align	5
-_gcm_ghash_vpclmulqdq_avx10_512:
+_gcm_ghash_vpclmulqdq_avx512:
 
 
 _CET_ENDBR
@@ -344,11 +344,11 @@
 
 
 
-.globl	_aes_gcm_enc_update_vaes_avx10_512
-.private_extern _aes_gcm_enc_update_vaes_avx10_512
+.globl	_aes_gcm_enc_update_vaes_avx512
+.private_extern _aes_gcm_enc_update_vaes_avx512
 
 .p2align	5
-_aes_gcm_enc_update_vaes_avx10_512:
+_aes_gcm_enc_update_vaes_avx512:
 
 
 _CET_ENDBR
@@ -835,11 +835,11 @@
 
 
 
-.globl	_aes_gcm_dec_update_vaes_avx10_512
-.private_extern _aes_gcm_dec_update_vaes_avx10_512
+.globl	_aes_gcm_dec_update_vaes_avx512
+.private_extern _aes_gcm_dec_update_vaes_avx512
 
 .p2align	5
-_aes_gcm_dec_update_vaes_avx10_512:
+_aes_gcm_dec_update_vaes_avx512:
 
 
 _CET_ENDBR
diff --git a/gen/bcm/aes-gcm-avx10-x86_64-linux.S b/gen/bcm/aes-gcm-avx512-x86_64-linux.S
similarity index 95%
rename from gen/bcm/aes-gcm-avx10-x86_64-linux.S
rename to gen/bcm/aes-gcm-avx512-x86_64-linux.S
index ac95ba5..56f1a44 100644
--- a/gen/bcm/aes-gcm-avx10-x86_64-linux.S
+++ b/gen/bcm/aes-gcm-avx512-x86_64-linux.S
@@ -39,11 +39,11 @@
 .quad	4, 0
 
 .text	
-.globl	gcm_gmult_vpclmulqdq_avx10
-.hidden gcm_gmult_vpclmulqdq_avx10
-.type	gcm_gmult_vpclmulqdq_avx10,@function
+.globl	gcm_gmult_vpclmulqdq_avx512
+.hidden gcm_gmult_vpclmulqdq_avx512
+.type	gcm_gmult_vpclmulqdq_avx512,@function
 .align	32
-gcm_gmult_vpclmulqdq_avx10:
+gcm_gmult_vpclmulqdq_avx512:
 .cfi_startproc	
 
 _CET_ENDBR
@@ -76,12 +76,12 @@
 	ret
 
 .cfi_endproc	
-.size	gcm_gmult_vpclmulqdq_avx10, . - gcm_gmult_vpclmulqdq_avx10
-.globl	gcm_init_vpclmulqdq_avx10_512
-.hidden gcm_init_vpclmulqdq_avx10_512
-.type	gcm_init_vpclmulqdq_avx10_512,@function
+.size	gcm_gmult_vpclmulqdq_avx512, . - gcm_gmult_vpclmulqdq_avx512
+.globl	gcm_init_vpclmulqdq_avx512
+.hidden gcm_init_vpclmulqdq_avx512
+.type	gcm_init_vpclmulqdq_avx512,@function
 .align	32
-gcm_init_vpclmulqdq_avx10_512:
+gcm_init_vpclmulqdq_avx512:
 .cfi_startproc	
 
 _CET_ENDBR
@@ -183,12 +183,12 @@
 	ret
 
 .cfi_endproc	
-.size	gcm_init_vpclmulqdq_avx10_512, . - gcm_init_vpclmulqdq_avx10_512
-.globl	gcm_ghash_vpclmulqdq_avx10_512
-.hidden gcm_ghash_vpclmulqdq_avx10_512
-.type	gcm_ghash_vpclmulqdq_avx10_512,@function
+.size	gcm_init_vpclmulqdq_avx512, . - gcm_init_vpclmulqdq_avx512
+.globl	gcm_ghash_vpclmulqdq_avx512
+.hidden gcm_ghash_vpclmulqdq_avx512
+.type	gcm_ghash_vpclmulqdq_avx512,@function
 .align	32
-gcm_ghash_vpclmulqdq_avx10_512:
+gcm_ghash_vpclmulqdq_avx512:
 .cfi_startproc	
 
 _CET_ENDBR
@@ -343,12 +343,12 @@
 	ret
 
 .cfi_endproc	
-.size	gcm_ghash_vpclmulqdq_avx10_512, . - gcm_ghash_vpclmulqdq_avx10_512
-.globl	aes_gcm_enc_update_vaes_avx10_512
-.hidden aes_gcm_enc_update_vaes_avx10_512
-.type	aes_gcm_enc_update_vaes_avx10_512,@function
+.size	gcm_ghash_vpclmulqdq_avx512, . - gcm_ghash_vpclmulqdq_avx512
+.globl	aes_gcm_enc_update_vaes_avx512
+.hidden aes_gcm_enc_update_vaes_avx512
+.type	aes_gcm_enc_update_vaes_avx512,@function
 .align	32
-aes_gcm_enc_update_vaes_avx10_512:
+aes_gcm_enc_update_vaes_avx512:
 .cfi_startproc	
 
 _CET_ENDBR
@@ -837,12 +837,12 @@
 	ret
 
 .cfi_endproc	
-.size	aes_gcm_enc_update_vaes_avx10_512, . - aes_gcm_enc_update_vaes_avx10_512
-.globl	aes_gcm_dec_update_vaes_avx10_512
-.hidden aes_gcm_dec_update_vaes_avx10_512
-.type	aes_gcm_dec_update_vaes_avx10_512,@function
+.size	aes_gcm_enc_update_vaes_avx512, . - aes_gcm_enc_update_vaes_avx512
+.globl	aes_gcm_dec_update_vaes_avx512
+.hidden aes_gcm_dec_update_vaes_avx512
+.type	aes_gcm_dec_update_vaes_avx512,@function
 .align	32
-aes_gcm_dec_update_vaes_avx10_512:
+aes_gcm_dec_update_vaes_avx512:
 .cfi_startproc	
 
 _CET_ENDBR
@@ -1240,5 +1240,5 @@
 	ret
 
 .cfi_endproc	
-.size	aes_gcm_dec_update_vaes_avx10_512, . - aes_gcm_dec_update_vaes_avx10_512
+.size	aes_gcm_dec_update_vaes_avx512, . - aes_gcm_dec_update_vaes_avx512
 #endif
diff --git a/gen/bcm/aes-gcm-avx10-x86_64-win.asm b/gen/bcm/aes-gcm-avx512-x86_64-win.asm
similarity index 75%
rename from gen/bcm/aes-gcm-avx10-x86_64-win.asm
rename to gen/bcm/aes-gcm-avx512-x86_64-win.asm
index 6e85536..6e06094 100644
--- a/gen/bcm/aes-gcm-avx10-x86_64-win.asm
+++ b/gen/bcm/aes-gcm-avx512-x86_64-win.asm
@@ -47,19 +47,19 @@
 
 section	.text code align=64
 
-global	gcm_gmult_vpclmulqdq_avx10
+global	gcm_gmult_vpclmulqdq_avx512
 
 ALIGN	32
-gcm_gmult_vpclmulqdq_avx10:
+gcm_gmult_vpclmulqdq_avx512:
 
-$L$SEH_begin_gcm_gmult_vpclmulqdq_avx10_1:
+$L$SEH_begin_gcm_gmult_vpclmulqdq_avx512_1:
 _CET_ENDBR
 	sub	rsp,24
-$L$SEH_prologue_gcm_gmult_vpclmulqdq_avx10_2:
+$L$SEH_prologue_gcm_gmult_vpclmulqdq_avx512_2:
 	vmovdqa	XMMWORD[rsp],xmm6
-$L$SEH_prologue_gcm_gmult_vpclmulqdq_avx10_3:
+$L$SEH_prologue_gcm_gmult_vpclmulqdq_avx512_3:
 
-$L$SEH_endprologue_gcm_gmult_vpclmulqdq_avx10_4:
+$L$SEH_endprologue_gcm_gmult_vpclmulqdq_avx512_4:
 
 	vmovdqu	xmm0,XMMWORD[rcx]
 	vmovdqu	xmm1,XMMWORD[$L$bswap_mask]
@@ -87,13 +87,13 @@
 	vmovdqa	xmm6,XMMWORD[rsp]
 	add	rsp,24
 	ret
-$L$SEH_end_gcm_gmult_vpclmulqdq_avx10_5:
+$L$SEH_end_gcm_gmult_vpclmulqdq_avx512_5:
 
 
-global	gcm_init_vpclmulqdq_avx10_512
+global	gcm_init_vpclmulqdq_avx512
 
 ALIGN	32
-gcm_init_vpclmulqdq_avx10_512:
+gcm_init_vpclmulqdq_avx512:
 
 
 _CET_ENDBR
@@ -196,33 +196,33 @@
 
 
 
-global	gcm_ghash_vpclmulqdq_avx10_512
+global	gcm_ghash_vpclmulqdq_avx512
 
 ALIGN	32
-gcm_ghash_vpclmulqdq_avx10_512:
+gcm_ghash_vpclmulqdq_avx512:
 
-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1:
+$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1:
 _CET_ENDBR
 	sub	rsp,136
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_2:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_2:
 	vmovdqa	XMMWORD[rsp],xmm6
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_3:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_3:
 	vmovdqa	XMMWORD[16+rsp],xmm7
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_4:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_4:
 	vmovdqa	XMMWORD[32+rsp],xmm8
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_5:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_5:
 	vmovdqa	XMMWORD[48+rsp],xmm9
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_6:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_6:
 	vmovdqa	XMMWORD[64+rsp],xmm10
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_7:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_7:
 	vmovdqa	XMMWORD[80+rsp],xmm11
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_8:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_8:
 	vmovdqa	XMMWORD[96+rsp],xmm12
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_9:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_9:
 	vmovdqa	XMMWORD[112+rsp],xmm13
-$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_10:
+$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_10:
 
-$L$SEH_endprologue_gcm_ghash_vpclmulqdq_avx10_512_11:
+$L$SEH_endprologue_gcm_ghash_vpclmulqdq_avx512_11:
 
 
 
@@ -379,50 +379,50 @@
 	vmovdqa	xmm13,XMMWORD[112+rsp]
 	add	rsp,136
 	ret
-$L$SEH_end_gcm_ghash_vpclmulqdq_avx10_512_12:
+$L$SEH_end_gcm_ghash_vpclmulqdq_avx512_12:
 
 
-global	aes_gcm_enc_update_vaes_avx10_512
+global	aes_gcm_enc_update_vaes_avx512
 
 ALIGN	32
-aes_gcm_enc_update_vaes_avx10_512:
+aes_gcm_enc_update_vaes_avx512:
 
-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1:
+$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1:
 _CET_ENDBR
 	push	rsi
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_2:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_2:
 	push	rdi
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_3:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_3:
 	push	r12
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_4:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_4:
 
 	mov	rsi,QWORD[64+rsp]
 	mov	rdi,QWORD[72+rsp]
 	mov	r12,QWORD[80+rsp]
 	sub	rsp,160
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_5:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_5:
 	vmovdqa	XMMWORD[rsp],xmm6
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_6:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_6:
 	vmovdqa	XMMWORD[16+rsp],xmm7
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_7:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_7:
 	vmovdqa	XMMWORD[32+rsp],xmm8
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_8:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_8:
 	vmovdqa	XMMWORD[48+rsp],xmm9
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_9:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_9:
 	vmovdqa	XMMWORD[64+rsp],xmm10
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_10:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_10:
 	vmovdqa	XMMWORD[80+rsp],xmm11
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_11:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_11:
 	vmovdqa	XMMWORD[96+rsp],xmm12
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_12:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_12:
 	vmovdqa	XMMWORD[112+rsp],xmm13
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_13:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_13:
 	vmovdqa	XMMWORD[128+rsp],xmm14
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_14:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_14:
 	vmovdqa	XMMWORD[144+rsp],xmm15
-$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_15:
+$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_15:
 
-$L$SEH_endprologue_aes_gcm_enc_update_vaes_avx10_512_16:
+$L$SEH_endprologue_aes_gcm_enc_update_vaes_avx512_16:
 %ifdef BORINGSSL_DISPATCH_TEST
 EXTERN	BORINGSSL_function_hit
 	mov	BYTE[((BORINGSSL_function_hit+7))],1
@@ -911,50 +911,50 @@
 	pop	rdi
 	pop	rsi
 	ret
-$L$SEH_end_aes_gcm_enc_update_vaes_avx10_512_17:
+$L$SEH_end_aes_gcm_enc_update_vaes_avx512_17:
 
 
-global	aes_gcm_dec_update_vaes_avx10_512
+global	aes_gcm_dec_update_vaes_avx512
 
 ALIGN	32
-aes_gcm_dec_update_vaes_avx10_512:
+aes_gcm_dec_update_vaes_avx512:
 
-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1:
+$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1:
 _CET_ENDBR
 	push	rsi
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_2:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_2:
 	push	rdi
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_3:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_3:
 	push	r12
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_4:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_4:
 
 	mov	rsi,QWORD[64+rsp]
 	mov	rdi,QWORD[72+rsp]
 	mov	r12,QWORD[80+rsp]
 	sub	rsp,160
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_5:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_5:
 	vmovdqa	XMMWORD[rsp],xmm6
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_6:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_6:
 	vmovdqa	XMMWORD[16+rsp],xmm7
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_7:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_7:
 	vmovdqa	XMMWORD[32+rsp],xmm8
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_8:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_8:
 	vmovdqa	XMMWORD[48+rsp],xmm9
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_9:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_9:
 	vmovdqa	XMMWORD[64+rsp],xmm10
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_10:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_10:
 	vmovdqa	XMMWORD[80+rsp],xmm11
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_11:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_11:
 	vmovdqa	XMMWORD[96+rsp],xmm12
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_12:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_12:
 	vmovdqa	XMMWORD[112+rsp],xmm13
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_13:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_13:
 	vmovdqa	XMMWORD[128+rsp],xmm14
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_14:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_14:
 	vmovdqa	XMMWORD[144+rsp],xmm15
-$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_15:
+$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_15:
 
-$L$SEH_endprologue_aes_gcm_dec_update_vaes_avx10_512_16:
+$L$SEH_endprologue_aes_gcm_dec_update_vaes_avx512_16:
 
 	vbroadcasti32x4	zmm8,ZMMWORD[$L$bswap_mask]
 	vbroadcasti32x4	zmm31,ZMMWORD[$L$gfpoly]
@@ -1353,164 +1353,164 @@
 	pop	rdi
 	pop	rsi
 	ret
-$L$SEH_end_aes_gcm_dec_update_vaes_avx10_512_17:
+$L$SEH_end_aes_gcm_dec_update_vaes_avx512_17:
 
 
 section	.pdata rdata align=4
 ALIGN	4
-	DD	$L$SEH_begin_gcm_gmult_vpclmulqdq_avx10_1 wrt ..imagebase
-	DD	$L$SEH_end_gcm_gmult_vpclmulqdq_avx10_5 wrt ..imagebase
-	DD	$L$SEH_info_gcm_gmult_vpclmulqdq_avx10_0 wrt ..imagebase
+	DD	$L$SEH_begin_gcm_gmult_vpclmulqdq_avx512_1 wrt ..imagebase
+	DD	$L$SEH_end_gcm_gmult_vpclmulqdq_avx512_5 wrt ..imagebase
+	DD	$L$SEH_info_gcm_gmult_vpclmulqdq_avx512_0 wrt ..imagebase
 
-	DD	$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1 wrt ..imagebase
-	DD	$L$SEH_end_gcm_ghash_vpclmulqdq_avx10_512_12 wrt ..imagebase
-	DD	$L$SEH_info_gcm_ghash_vpclmulqdq_avx10_512_0 wrt ..imagebase
+	DD	$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1 wrt ..imagebase
+	DD	$L$SEH_end_gcm_ghash_vpclmulqdq_avx512_12 wrt ..imagebase
+	DD	$L$SEH_info_gcm_ghash_vpclmulqdq_avx512_0 wrt ..imagebase
 
-	DD	$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1 wrt ..imagebase
-	DD	$L$SEH_end_aes_gcm_enc_update_vaes_avx10_512_17 wrt ..imagebase
-	DD	$L$SEH_info_aes_gcm_enc_update_vaes_avx10_512_0 wrt ..imagebase
+	DD	$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1 wrt ..imagebase
+	DD	$L$SEH_end_aes_gcm_enc_update_vaes_avx512_17 wrt ..imagebase
+	DD	$L$SEH_info_aes_gcm_enc_update_vaes_avx512_0 wrt ..imagebase
 
-	DD	$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1 wrt ..imagebase
-	DD	$L$SEH_end_aes_gcm_dec_update_vaes_avx10_512_17 wrt ..imagebase
-	DD	$L$SEH_info_aes_gcm_dec_update_vaes_avx10_512_0 wrt ..imagebase
+	DD	$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1 wrt ..imagebase
+	DD	$L$SEH_end_aes_gcm_dec_update_vaes_avx512_17 wrt ..imagebase
+	DD	$L$SEH_info_aes_gcm_dec_update_vaes_avx512_0 wrt ..imagebase
 
 
 section	.xdata rdata align=8
 ALIGN	4
-$L$SEH_info_gcm_gmult_vpclmulqdq_avx10_0:
+$L$SEH_info_gcm_gmult_vpclmulqdq_avx512_0:
 	DB	1
-	DB	$L$SEH_endprologue_gcm_gmult_vpclmulqdq_avx10_4-$L$SEH_begin_gcm_gmult_vpclmulqdq_avx10_1
+	DB	$L$SEH_endprologue_gcm_gmult_vpclmulqdq_avx512_4-$L$SEH_begin_gcm_gmult_vpclmulqdq_avx512_1
 	DB	3
 	DB	0
-	DB	$L$SEH_prologue_gcm_gmult_vpclmulqdq_avx10_3-$L$SEH_begin_gcm_gmult_vpclmulqdq_avx10_1
+	DB	$L$SEH_prologue_gcm_gmult_vpclmulqdq_avx512_3-$L$SEH_begin_gcm_gmult_vpclmulqdq_avx512_1
 	DB	104
 	DW	0
-	DB	$L$SEH_prologue_gcm_gmult_vpclmulqdq_avx10_2-$L$SEH_begin_gcm_gmult_vpclmulqdq_avx10_1
+	DB	$L$SEH_prologue_gcm_gmult_vpclmulqdq_avx512_2-$L$SEH_begin_gcm_gmult_vpclmulqdq_avx512_1
 	DB	34
 
 	DW	0
-$L$SEH_info_gcm_ghash_vpclmulqdq_avx10_512_0:
+$L$SEH_info_gcm_ghash_vpclmulqdq_avx512_0:
 	DB	1
-	DB	$L$SEH_endprologue_gcm_ghash_vpclmulqdq_avx10_512_11-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_endprologue_gcm_ghash_vpclmulqdq_avx512_11-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	18
 	DB	0
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_10-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_10-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	216
 	DW	7
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_9-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_9-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	200
 	DW	6
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_8-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_8-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	184
 	DW	5
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_7-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_7-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	168
 	DW	4
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_6-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_6-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	152
 	DW	3
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_5-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_5-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	136
 	DW	2
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_4-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_4-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	120
 	DW	1
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_3-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_3-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	104
 	DW	0
-	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx10_512_2-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx10_512_1
+	DB	$L$SEH_prologue_gcm_ghash_vpclmulqdq_avx512_2-$L$SEH_begin_gcm_ghash_vpclmulqdq_avx512_1
 	DB	1
 	DW	17
 
-$L$SEH_info_aes_gcm_enc_update_vaes_avx10_512_0:
+$L$SEH_info_aes_gcm_enc_update_vaes_avx512_0:
 	DB	1
-	DB	$L$SEH_endprologue_aes_gcm_enc_update_vaes_avx10_512_16-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_endprologue_aes_gcm_enc_update_vaes_avx512_16-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	25
 	DB	0
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_15-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_15-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	248
 	DW	9
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_14-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_14-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	232
 	DW	8
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_13-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_13-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	216
 	DW	7
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_12-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_12-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	200
 	DW	6
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_11-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_11-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	184
 	DW	5
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_10-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_10-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	168
 	DW	4
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_9-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_9-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	152
 	DW	3
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_8-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_8-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	136
 	DW	2
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_7-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_7-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	120
 	DW	1
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_6-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_6-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	104
 	DW	0
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_5-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_5-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	1
 	DW	20
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_4-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_4-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	192
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_3-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_3-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	112
-	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx10_512_2-$L$SEH_begin_aes_gcm_enc_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_enc_update_vaes_avx512_2-$L$SEH_begin_aes_gcm_enc_update_vaes_avx512_1
 	DB	96
 
 	DW	0
-$L$SEH_info_aes_gcm_dec_update_vaes_avx10_512_0:
+$L$SEH_info_aes_gcm_dec_update_vaes_avx512_0:
 	DB	1
-	DB	$L$SEH_endprologue_aes_gcm_dec_update_vaes_avx10_512_16-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_endprologue_aes_gcm_dec_update_vaes_avx512_16-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	25
 	DB	0
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_15-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_15-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	248
 	DW	9
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_14-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_14-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	232
 	DW	8
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_13-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_13-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	216
 	DW	7
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_12-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_12-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	200
 	DW	6
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_11-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_11-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	184
 	DW	5
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_10-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_10-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	168
 	DW	4
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_9-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_9-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	152
 	DW	3
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_8-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_8-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	136
 	DW	2
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_7-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_7-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	120
 	DW	1
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_6-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_6-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	104
 	DW	0
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_5-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_5-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	1
 	DW	20
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_4-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_4-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	192
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_3-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_3-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	112
-	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx10_512_2-$L$SEH_begin_aes_gcm_dec_update_vaes_avx10_512_1
+	DB	$L$SEH_prologue_aes_gcm_dec_update_vaes_avx512_2-$L$SEH_begin_aes_gcm_dec_update_vaes_avx512_1
 	DB	96
 
 	DW	0
diff --git a/gen/sources.bzl b/gen/sources.bzl
index 6bfc0ad..95e3f89 100644
--- a/gen/sources.bzl
+++ b/gen/sources.bzl
@@ -104,10 +104,10 @@
 ]
 
 bcm_sources_asm = [
-    "gen/bcm/aes-gcm-avx10-x86_64-apple.S",
-    "gen/bcm/aes-gcm-avx10-x86_64-linux.S",
     "gen/bcm/aes-gcm-avx2-x86_64-apple.S",
     "gen/bcm/aes-gcm-avx2-x86_64-linux.S",
+    "gen/bcm/aes-gcm-avx512-x86_64-apple.S",
+    "gen/bcm/aes-gcm-avx512-x86_64-linux.S",
     "gen/bcm/aesni-gcm-x86_64-apple.S",
     "gen/bcm/aesni-gcm-x86_64-linux.S",
     "gen/bcm/aesni-x86-apple.S",
@@ -206,8 +206,8 @@
 ]
 
 bcm_sources_nasm = [
-    "gen/bcm/aes-gcm-avx10-x86_64-win.asm",
     "gen/bcm/aes-gcm-avx2-x86_64-win.asm",
+    "gen/bcm/aes-gcm-avx512-x86_64-win.asm",
     "gen/bcm/aesni-gcm-x86_64-win.asm",
     "gen/bcm/aesni-x86-win.asm",
     "gen/bcm/aesni-x86_64-win.asm",
diff --git a/gen/sources.cmake b/gen/sources.cmake
index 25fe1ba..647f2c7 100644
--- a/gen/sources.cmake
+++ b/gen/sources.cmake
@@ -110,10 +110,10 @@
 set(
   BCM_SOURCES_ASM
 
-  gen/bcm/aes-gcm-avx10-x86_64-apple.S
-  gen/bcm/aes-gcm-avx10-x86_64-linux.S
   gen/bcm/aes-gcm-avx2-x86_64-apple.S
   gen/bcm/aes-gcm-avx2-x86_64-linux.S
+  gen/bcm/aes-gcm-avx512-x86_64-apple.S
+  gen/bcm/aes-gcm-avx512-x86_64-linux.S
   gen/bcm/aesni-gcm-x86_64-apple.S
   gen/bcm/aesni-gcm-x86_64-linux.S
   gen/bcm/aesni-x86-apple.S
@@ -214,8 +214,8 @@
 set(
   BCM_SOURCES_NASM
 
-  gen/bcm/aes-gcm-avx10-x86_64-win.asm
   gen/bcm/aes-gcm-avx2-x86_64-win.asm
+  gen/bcm/aes-gcm-avx512-x86_64-win.asm
   gen/bcm/aesni-gcm-x86_64-win.asm
   gen/bcm/aesni-x86-win.asm
   gen/bcm/aesni-x86_64-win.asm
diff --git a/gen/sources.gni b/gen/sources.gni
index 0d13eee..34b1109 100644
--- a/gen/sources.gni
+++ b/gen/sources.gni
@@ -104,10 +104,10 @@
 ]
 
 bcm_sources_asm = [
-  "gen/bcm/aes-gcm-avx10-x86_64-apple.S",
-  "gen/bcm/aes-gcm-avx10-x86_64-linux.S",
   "gen/bcm/aes-gcm-avx2-x86_64-apple.S",
   "gen/bcm/aes-gcm-avx2-x86_64-linux.S",
+  "gen/bcm/aes-gcm-avx512-x86_64-apple.S",
+  "gen/bcm/aes-gcm-avx512-x86_64-linux.S",
   "gen/bcm/aesni-gcm-x86_64-apple.S",
   "gen/bcm/aesni-gcm-x86_64-linux.S",
   "gen/bcm/aesni-x86-apple.S",
@@ -206,8 +206,8 @@
 ]
 
 bcm_sources_nasm = [
-  "gen/bcm/aes-gcm-avx10-x86_64-win.asm",
   "gen/bcm/aes-gcm-avx2-x86_64-win.asm",
+  "gen/bcm/aes-gcm-avx512-x86_64-win.asm",
   "gen/bcm/aesni-gcm-x86_64-win.asm",
   "gen/bcm/aesni-x86-win.asm",
   "gen/bcm/aesni-x86_64-win.asm",
diff --git a/gen/sources.json b/gen/sources.json
index 2fddbe4..f01332a 100644
--- a/gen/sources.json
+++ b/gen/sources.json
@@ -88,10 +88,10 @@
       "crypto/fipsmodule/tls/kdf.cc.inc"
     ],
     "asm": [
-      "gen/bcm/aes-gcm-avx10-x86_64-apple.S",
-      "gen/bcm/aes-gcm-avx10-x86_64-linux.S",
       "gen/bcm/aes-gcm-avx2-x86_64-apple.S",
       "gen/bcm/aes-gcm-avx2-x86_64-linux.S",
+      "gen/bcm/aes-gcm-avx512-x86_64-apple.S",
+      "gen/bcm/aes-gcm-avx512-x86_64-linux.S",
       "gen/bcm/aesni-gcm-x86_64-apple.S",
       "gen/bcm/aesni-gcm-x86_64-linux.S",
       "gen/bcm/aesni-x86-apple.S",
@@ -189,8 +189,8 @@
       "third_party/fiat/asm/fiat_p256_adx_sqr.S"
     ],
     "nasm": [
-      "gen/bcm/aes-gcm-avx10-x86_64-win.asm",
       "gen/bcm/aes-gcm-avx2-x86_64-win.asm",
+      "gen/bcm/aes-gcm-avx512-x86_64-win.asm",
       "gen/bcm/aesni-gcm-x86_64-win.asm",
       "gen/bcm/aesni-x86-win.asm",
       "gen/bcm/aesni-x86_64-win.asm",
diff --git a/gen/sources.mk b/gen/sources.mk
index d7fbe4a..54cbb1f 100644
--- a/gen/sources.mk
+++ b/gen/sources.mk
@@ -102,10 +102,10 @@
   crypto/fipsmodule/tls/kdf.cc.inc
 
 boringssl_bcm_sources_asm := \
-  gen/bcm/aes-gcm-avx10-x86_64-apple.S \
-  gen/bcm/aes-gcm-avx10-x86_64-linux.S \
   gen/bcm/aes-gcm-avx2-x86_64-apple.S \
   gen/bcm/aes-gcm-avx2-x86_64-linux.S \
+  gen/bcm/aes-gcm-avx512-x86_64-apple.S \
+  gen/bcm/aes-gcm-avx512-x86_64-linux.S \
   gen/bcm/aesni-gcm-x86_64-apple.S \
   gen/bcm/aesni-gcm-x86_64-linux.S \
   gen/bcm/aesni-x86-apple.S \
@@ -203,8 +203,8 @@
   third_party/fiat/asm/fiat_p256_adx_sqr.S
 
 boringssl_bcm_sources_nasm := \
-  gen/bcm/aes-gcm-avx10-x86_64-win.asm \
   gen/bcm/aes-gcm-avx2-x86_64-win.asm \
+  gen/bcm/aes-gcm-avx512-x86_64-win.asm \
   gen/bcm/aesni-gcm-x86_64-win.asm \
   gen/bcm/aesni-x86-win.asm \
   gen/bcm/aesni-x86_64-win.asm \