mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 11:06:41 -05:00
crypto: x86 - Remove CONFIG_AS_VPCLMULQDQ
Current minimum required version of binutils is 2.30, which supports VPCLMULQDQ instruction mnemonics. Remove check for assembler support of VPCLMULQDQ instructions and all relevant macros for conditional compilation. No functional change intended. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Link: https://lore.kernel.org/20250819085855.333380-3-ubizjak@gmail.com
This commit is contained in:
committed by
Borislav Petkov (AMD)
parent
4593311290
commit
e084e9f815
@@ -6,11 +6,6 @@ config AS_AVX512
|
||||
help
|
||||
Supported by binutils >= 2.25 and LLVM integrated assembler
|
||||
|
||||
config AS_VPCLMULQDQ
|
||||
def_bool $(as-instr,vpclmulqdq \$0x10$(comma)%ymm0$(comma)%ymm1$(comma)%ymm2)
|
||||
help
|
||||
Supported by binutils >= 2.30 and LLVM integrated assembler
|
||||
|
||||
config AS_WRUSS
|
||||
def_bool $(as-instr64,wrussq %rax$(comma)(%rbx))
|
||||
help
|
||||
|
||||
@@ -46,10 +46,8 @@ obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
|
||||
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
|
||||
aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \
|
||||
aes-gcm-aesni-x86_64.o \
|
||||
aes-xts-avx-x86_64.o
|
||||
ifeq ($(CONFIG_AS_VPCLMULQDQ),y)
|
||||
aesni-intel-$(CONFIG_64BIT) += aes-gcm-avx10-x86_64.o
|
||||
endif
|
||||
aes-xts-avx-x86_64.o \
|
||||
aes-gcm-avx10-x86_64.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
|
||||
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
|
||||
|
||||
@@ -552,7 +552,6 @@ SYM_TYPED_FUNC_START(aes_xctr_crypt_aesni_avx)
|
||||
_aes_ctr_crypt 1
|
||||
SYM_FUNC_END(aes_xctr_crypt_aesni_avx)
|
||||
|
||||
#if defined(CONFIG_AS_VPCLMULQDQ)
|
||||
.set VL, 32
|
||||
.set USE_AVX512, 0
|
||||
SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2)
|
||||
@@ -570,4 +569,3 @@ SYM_FUNC_END(aes_ctr64_crypt_vaes_avx512)
|
||||
SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx512)
|
||||
_aes_ctr_crypt 1
|
||||
SYM_FUNC_END(aes_xctr_crypt_vaes_avx512)
|
||||
#endif // CONFIG_AS_VPCLMULQDQ
|
||||
|
||||
@@ -886,7 +886,6 @@ SYM_TYPED_FUNC_START(aes_xts_decrypt_aesni_avx)
|
||||
_aes_xts_crypt 0
|
||||
SYM_FUNC_END(aes_xts_decrypt_aesni_avx)
|
||||
|
||||
#if defined(CONFIG_AS_VPCLMULQDQ)
|
||||
.set VL, 32
|
||||
.set USE_AVX512, 0
|
||||
SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2)
|
||||
@@ -904,4 +903,3 @@ SYM_FUNC_END(aes_xts_encrypt_vaes_avx512)
|
||||
SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx512)
|
||||
_aes_xts_crypt 0
|
||||
SYM_FUNC_END(aes_xts_decrypt_vaes_avx512)
|
||||
#endif /* CONFIG_AS_VPCLMULQDQ */
|
||||
|
||||
@@ -828,10 +828,8 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
|
||||
}}
|
||||
|
||||
DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500);
|
||||
#if defined(CONFIG_AS_VPCLMULQDQ)
|
||||
DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600);
|
||||
DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800);
|
||||
#endif
|
||||
|
||||
/* The common part of the x86_64 AES-GCM key struct */
|
||||
struct aes_gcm_key {
|
||||
@@ -912,17 +910,8 @@ struct aes_gcm_key_avx10 {
|
||||
#define FLAG_RFC4106 BIT(0)
|
||||
#define FLAG_ENC BIT(1)
|
||||
#define FLAG_AVX BIT(2)
|
||||
#if defined(CONFIG_AS_VPCLMULQDQ)
|
||||
# define FLAG_AVX10_256 BIT(3)
|
||||
# define FLAG_AVX10_512 BIT(4)
|
||||
#else
|
||||
/*
|
||||
* This should cause all calls to the AVX10 assembly functions to be
|
||||
* optimized out, avoiding the need to ifdef each call individually.
|
||||
*/
|
||||
# define FLAG_AVX10_256 0
|
||||
# define FLAG_AVX10_512 0
|
||||
#endif
|
||||
#define FLAG_AVX10_256 BIT(3)
|
||||
#define FLAG_AVX10_512 BIT(4)
|
||||
|
||||
static inline struct aes_gcm_key *
|
||||
aes_gcm_key_get(struct crypto_aead *tfm, int flags)
|
||||
@@ -1519,7 +1508,6 @@ DEFINE_GCM_ALGS(aesni_avx, FLAG_AVX,
|
||||
"generic-gcm-aesni-avx", "rfc4106-gcm-aesni-avx",
|
||||
AES_GCM_KEY_AESNI_SIZE, 500);
|
||||
|
||||
#if defined(CONFIG_AS_VPCLMULQDQ)
|
||||
/* aes_gcm_algs_vaes_avx10_256 */
|
||||
DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256,
|
||||
"generic-gcm-vaes-avx10_256", "rfc4106-gcm-vaes-avx10_256",
|
||||
@@ -1529,7 +1517,6 @@ DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256,
|
||||
DEFINE_GCM_ALGS(vaes_avx10_512, FLAG_AVX10_512,
|
||||
"generic-gcm-vaes-avx10_512", "rfc4106-gcm-vaes-avx10_512",
|
||||
AES_GCM_KEY_AVX10_SIZE, 800);
|
||||
#endif /* CONFIG_AS_VPCLMULQDQ */
|
||||
|
||||
static int __init register_avx_algs(void)
|
||||
{
|
||||
@@ -1551,7 +1538,6 @@ static int __init register_avx_algs(void)
|
||||
* Similarly, the assembler support was added at about the same time.
|
||||
* For simplicity, just always check for VAES and VPCLMULQDQ together.
|
||||
*/
|
||||
#if defined(CONFIG_AS_VPCLMULQDQ)
|
||||
if (!boot_cpu_has(X86_FEATURE_AVX2) ||
|
||||
!boot_cpu_has(X86_FEATURE_VAES) ||
|
||||
!boot_cpu_has(X86_FEATURE_VPCLMULQDQ) ||
|
||||
@@ -1592,7 +1578,7 @@ static int __init register_avx_algs(void)
|
||||
ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512));
|
||||
if (err)
|
||||
return err;
|
||||
#endif /* CONFIG_AS_VPCLMULQDQ */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1607,12 +1593,10 @@ static void unregister_avx_algs(void)
|
||||
{
|
||||
unregister_skciphers(skcipher_algs_aesni_avx);
|
||||
unregister_aeads(aes_gcm_algs_aesni_avx);
|
||||
#if defined(CONFIG_AS_VPCLMULQDQ)
|
||||
unregister_skciphers(skcipher_algs_vaes_avx2);
|
||||
unregister_skciphers(skcipher_algs_vaes_avx512);
|
||||
unregister_aeads(aes_gcm_algs_vaes_avx10_256);
|
||||
unregister_aeads(aes_gcm_algs_vaes_avx10_512);
|
||||
#endif
|
||||
}
|
||||
#else /* CONFIG_X86_64 */
|
||||
static struct aead_alg aes_gcm_algs_aesni[0];
|
||||
|
||||
Reference in New Issue
Block a user