Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib/crypto: sha256: Consolidate into single module

Consolidate the CPU-based SHA-256 code into a single module, following
what I did with SHA-512:

- Each arch now provides a header file lib/crypto/$(SRCARCH)/sha256.h,
replacing lib/crypto/$(SRCARCH)/sha256.c. The header defines
sha256_blocks() and optionally sha256_mod_init_arch(). It is included
by lib/crypto/sha256.c, and thus the code gets built into the single
libsha256 module, with proper inlining and dead code elimination.

- sha256_blocks_generic() is moved from lib/crypto/sha256-generic.c into
lib/crypto/sha256.c. It's now a static function marked with
__maybe_unused, so the compiler automatically eliminates it in any
cases where it's not used.

- Whether arch-optimized SHA-256 is buildable is now controlled
centrally by lib/crypto/Kconfig instead of by
lib/crypto/$(SRCARCH)/Kconfig. The conditions for enabling it remain
the same as before, and it remains enabled by default.

- Any additional arch-specific translation units for the optimized
SHA-256 code (such as assembly files) are now compiled by
lib/crypto/Makefile instead of lib/crypto/$(SRCARCH)/Makefile.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20250630160645.3198-13-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>

+231 -465
-6
arch/mips/cavium-octeon/Kconfig
··· 23 23 legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is 24 24 between zero and 6192 bytes). 25 25 26 - config CRYPTO_SHA256_OCTEON 27 - tristate 28 - default CRYPTO_LIB_SHA256 29 - select CRYPTO_ARCH_HAVE_LIB_SHA256 30 - select CRYPTO_LIB_SHA256_GENERIC 31 - 32 26 endif # CPU_CAVIUM_OCTEON 33 27 34 28 if CAVIUM_OCTEON_SOC
-1
arch/mips/cavium-octeon/crypto/Makefile
··· 7 7 8 8 obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o 9 9 obj-$(CONFIG_CRYPTO_SHA1_OCTEON) += octeon-sha1.o 10 - obj-$(CONFIG_CRYPTO_SHA256_OCTEON) += octeon-sha256.o
+3 -11
arch/mips/cavium-octeon/crypto/octeon-sha256.c lib/crypto/mips/sha256.h
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 3 * SHA-256 Secure Hash Algorithm. 4 4 * ··· 14 14 15 15 #include <asm/octeon/crypto.h> 16 16 #include <asm/octeon/octeon.h> 17 - #include <crypto/internal/sha2.h> 18 - #include <linux/kernel.h> 19 - #include <linux/module.h> 20 17 21 18 /* 22 19 * We pass everything as 64-bit. OCTEON can handle misaligned data. 23 20 */ 24 21 25 - void sha256_blocks_arch(struct sha256_block_state *state, 26 - const u8 *data, size_t nblocks) 22 + static void sha256_blocks(struct sha256_block_state *state, 23 + const u8 *data, size_t nblocks) 27 24 { 28 25 struct octeon_cop2_state cop2_state; 29 26 u64 *state64 = (u64 *)state; ··· 56 59 state64[3] = read_octeon_64bit_hash_dword(3); 57 60 octeon_crypto_disable(&cop2_state, flags); 58 61 } 59 - EXPORT_SYMBOL_GPL(sha256_blocks_arch); 60 - 61 - MODULE_LICENSE("GPL"); 62 - MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm (OCTEON)"); 63 - MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
-52
include/crypto/internal/sha2.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - 3 - #ifndef _CRYPTO_INTERNAL_SHA2_H 4 - #define _CRYPTO_INTERNAL_SHA2_H 5 - 6 - #include <crypto/sha2.h> 7 - #include <linux/compiler_attributes.h> 8 - #include <linux/string.h> 9 - #include <linux/types.h> 10 - #include <linux/unaligned.h> 11 - 12 - void sha256_blocks_generic(struct sha256_block_state *state, 13 - const u8 *data, size_t nblocks); 14 - void sha256_blocks_arch(struct sha256_block_state *state, 15 - const u8 *data, size_t nblocks); 16 - 17 - static __always_inline void sha256_choose_blocks( 18 - u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks, 19 - bool force_generic, bool force_simd) 20 - { 21 - if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic) 22 - sha256_blocks_generic((struct sha256_block_state *)state, data, nblocks); 23 - else 24 - sha256_blocks_arch((struct sha256_block_state *)state, data, nblocks); 25 - } 26 - 27 - static __always_inline void sha256_finup( 28 - struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE], 29 - size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size, 30 - bool force_generic, bool force_simd) 31 - { 32 - const size_t bit_offset = SHA256_BLOCK_SIZE - 8; 33 - __be64 *bits = (__be64 *)&buf[bit_offset]; 34 - int i; 35 - 36 - buf[len++] = 0x80; 37 - if (len > bit_offset) { 38 - memset(&buf[len], 0, SHA256_BLOCK_SIZE - len); 39 - sha256_choose_blocks(sctx->state, buf, 1, force_generic, 40 - force_simd); 41 - len = 0; 42 - } 43 - 44 - memset(&buf[len], 0, bit_offset - len); 45 - *bits = cpu_to_be64(sctx->count << 3); 46 - sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd); 47 - 48 - for (i = 0; i < digest_size; i += 4) 49 - put_unaligned_be32(sctx->state[i / 4], out + i); 50 - } 51 - 52 - #endif /* _CRYPTO_INTERNAL_SHA2_H */
+10 -16
lib/crypto/Kconfig
··· 144 144 by either the generic implementation or an arch-specific one, if one 145 145 is available and enabled. 146 146 147 - config CRYPTO_ARCH_HAVE_LIB_SHA256 147 + config CRYPTO_LIB_SHA256_ARCH 148 148 bool 149 - help 150 - Declares whether the architecture provides an arch-specific 151 - accelerated implementation of the SHA-256 library interface. 152 - 153 - config CRYPTO_LIB_SHA256_GENERIC 154 - tristate 155 - default CRYPTO_LIB_SHA256 if !CRYPTO_ARCH_HAVE_LIB_SHA256 156 - help 157 - This symbol can be selected by arch implementations of the SHA-256 158 - library interface that require the generic code as a fallback, e.g., 159 - for SIMD implementations. If no arch specific implementation is 160 - enabled, this implementation serves the users of CRYPTO_LIB_SHA256. 149 + depends on CRYPTO_LIB_SHA256 && !UML 150 + default y if ARM && !CPU_V7M 151 + default y if ARM64 152 + default y if MIPS && CPU_CAVIUM_OCTEON 153 + default y if PPC && SPE 154 + default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO 155 + default y if S390 156 + default y if SPARC64 157 + default y if X86_64 161 158 162 159 config CRYPTO_LIB_SHA512 163 160 tristate ··· 195 198 endif 196 199 if S390 197 200 source "lib/crypto/s390/Kconfig" 198 - endif 199 - if SPARC 200 - source "lib/crypto/sparc/Kconfig" 201 201 endif 202 202 if X86 203 203 source "lib/crypto/x86/Kconfig"
+34 -5
lib/crypto/Makefile
··· 66 66 obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o 67 67 libsha1-y := sha1.o 68 68 69 - obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o 70 - libsha256-y := sha256.o 69 + ################################################################################ 71 70 72 - obj-$(CONFIG_CRYPTO_LIB_SHA256_GENERIC) += libsha256-generic.o 73 - libsha256-generic-y := sha256-generic.o 71 + obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o 72 + libsha256-y := sha256.o 73 + ifeq ($(CONFIG_CRYPTO_LIB_SHA256_ARCH),y) 74 + CFLAGS_sha256.o += -I$(src)/$(SRCARCH) 75 + 76 + ifeq ($(CONFIG_ARM),y) 77 + libsha256-y += arm/sha256-ce.o arm/sha256-core.o 78 + $(obj)/arm/sha256-core.S: $(src)/arm/sha256-armv4.pl 79 + $(call cmd,perlasm) 80 + clean-files += arm/sha256-core.S 81 + AFLAGS_arm/sha256-core.o += $(aflags-thumb2-y) 82 + endif 83 + 84 + ifeq ($(CONFIG_ARM64),y) 85 + libsha256-y += arm64/sha256-core.o 86 + $(obj)/arm64/sha256-core.S: $(src)/arm64/sha2-armv8.pl 87 + $(call cmd,perlasm_with_args) 88 + clean-files += arm64/sha256-core.S 89 + libsha256-$(CONFIG_KERNEL_MODE_NEON) += arm64/sha256-ce.o 90 + endif 91 + 92 + libsha256-$(CONFIG_PPC) += powerpc/sha256-spe-asm.o 93 + libsha256-$(CONFIG_RISCV) += riscv/sha256-riscv64-zvknha_or_zvknhb-zvkb.o 94 + libsha256-$(CONFIG_SPARC) += sparc/sha256_asm.o 95 + libsha256-$(CONFIG_X86) += x86/sha256-ssse3-asm.o \ 96 + x86/sha256-avx-asm.o \ 97 + x86/sha256-avx2-asm.o \ 98 + x86/sha256-ni-asm.o 99 + endif # CONFIG_CRYPTO_LIB_SHA256_ARCH 100 + 101 + ################################################################################ 74 102 75 103 obj-$(CONFIG_CRYPTO_LIB_SHA512) += libsha512.o 76 104 libsha512-y := sha512.o ··· 128 100 x86/sha512-avx2-asm.o 129 101 endif # CONFIG_CRYPTO_LIB_SHA512_ARCH 130 102 103 + ################################################################################ 104 + 131 105 obj-$(CONFIG_MPILIB) += mpi/ 132 106 133 107 obj-$(CONFIG_CRYPTO_SELFTESTS_FULL) += simd.o ··· 143 113 obj-$(CONFIG_PPC) += powerpc/ 144 114 obj-$(CONFIG_RISCV) += riscv/ 145 115 obj-$(CONFIG_S390) += s390/ 146 - obj-$(CONFIG_SPARC) += sparc/ 147 116 obj-$(CONFIG_X86) += x86/
-6
lib/crypto/arm/Kconfig
··· 22 22 tristate 23 23 default CRYPTO_LIB_POLY1305 24 24 select CRYPTO_ARCH_HAVE_LIB_POLY1305 25 - 26 - config CRYPTO_SHA256_ARM 27 - tristate 28 - depends on !CPU_V7M 29 - default CRYPTO_LIB_SHA256 30 - select CRYPTO_ARCH_HAVE_LIB_SHA256
+1 -7
lib/crypto/arm/Makefile
··· 10 10 obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o 11 11 poly1305-arm-y := poly1305-core.o poly1305-glue.o 12 12 13 - obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o 14 - sha256-arm-y := sha256.o sha256-core.o 15 - sha256-arm-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o 16 - 17 13 quiet_cmd_perl = PERL $@ 18 14 cmd_perl = $(PERL) $(<) > $(@) 19 15 20 16 $(obj)/%-core.S: $(src)/%-armv4.pl 21 17 $(call cmd,perl) 22 18 23 - clean-files += poly1305-core.S sha256-core.S 19 + clean-files += poly1305-core.S 24 20 25 21 aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1 26 22 ··· 24 28 poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5 25 29 poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7 26 30 AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y) 27 - 28 - AFLAGS_sha256-core.o += $(aflags-thumb2-y)
+8 -19
lib/crypto/arm/sha256.c lib/crypto/arm/sha256.h
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 3 * SHA-256 optimized for ARM 4 4 * 5 5 * Copyright 2025 Google LLC 6 6 */ 7 7 #include <asm/neon.h> 8 - #include <crypto/internal/sha2.h> 9 8 #include <crypto/internal/simd.h> 10 - #include <linux/kernel.h> 11 - #include <linux/module.h> 12 9 13 10 asmlinkage void sha256_block_data_order(struct sha256_block_state *state, 14 11 const u8 *data, size_t nblocks); ··· 17 20 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); 18 21 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce); 19 22 20 - void sha256_blocks_arch(struct sha256_block_state *state, 21 - const u8 *data, size_t nblocks) 23 + static void sha256_blocks(struct sha256_block_state *state, 24 + const u8 *data, size_t nblocks) 22 25 { 23 26 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 24 27 static_branch_likely(&have_neon) && crypto_simd_usable()) { ··· 32 35 sha256_block_data_order(state, data, nblocks); 33 36 } 34 37 } 35 - EXPORT_SYMBOL_GPL(sha256_blocks_arch); 36 38 37 - static int __init sha256_arm_mod_init(void) 39 + #ifdef CONFIG_KERNEL_MODE_NEON 40 + #define sha256_mod_init_arch sha256_mod_init_arch 41 + static inline void sha256_mod_init_arch(void) 38 42 { 39 - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) { 43 + if (elf_hwcap & HWCAP_NEON) { 40 44 static_branch_enable(&have_neon); 41 45 if (elf_hwcap2 & HWCAP2_SHA2) 42 46 static_branch_enable(&have_ce); 43 47 } 44 - return 0; 45 48 } 46 - subsys_initcall(sha256_arm_mod_init); 47 - 48 - static void __exit sha256_arm_mod_exit(void) 49 - { 50 - } 51 - module_exit(sha256_arm_mod_exit); 52 - 53 - MODULE_LICENSE("GPL"); 54 - MODULE_DESCRIPTION("SHA-256 optimized for ARM"); 49 + #endif /* CONFIG_KERNEL_MODE_NEON */
-5
lib/crypto/arm64/Kconfig
··· 12 12 depends on KERNEL_MODE_NEON 13 13 default CRYPTO_LIB_POLY1305 14 14 select CRYPTO_ARCH_HAVE_LIB_POLY1305 15 - 16 - config CRYPTO_SHA256_ARM64 17 - tristate 18 - default CRYPTO_LIB_SHA256 19 - select CRYPTO_ARCH_HAVE_LIB_SHA256
+1 -8
lib/crypto/arm64/Makefile
··· 8 8 AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_block_init_arch 9 9 AFLAGS_poly1305-core.o += -Dpoly1305_emit=poly1305_emit_arch 10 10 11 - obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o 12 - sha256-arm64-y := sha256.o sha256-core.o 13 - sha256-arm64-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o 14 - 15 11 quiet_cmd_perlasm = PERLASM $@ 16 12 cmd_perlasm = $(PERL) $(<) void $(@) 17 13 18 14 $(obj)/%-core.S: $(src)/%-armv8.pl 19 15 $(call cmd,perlasm) 20 16 21 - $(obj)/sha256-core.S: $(src)/sha2-armv8.pl 22 - $(call cmd,perlasm) 23 - 24 - clean-files += poly1305-core.S sha256-core.S 17 + clean-files += poly1305-core.S
+9 -20
lib/crypto/arm64/sha256.c lib/crypto/arm64/sha256.h
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 3 * SHA-256 optimized for ARM64 4 4 * 5 5 * Copyright 2025 Google LLC 6 6 */ 7 7 #include <asm/neon.h> 8 - #include <crypto/internal/sha2.h> 9 8 #include <crypto/internal/simd.h> 10 - #include <linux/kernel.h> 11 - #include <linux/module.h> 9 + #include <linux/cpufeature.h> 12 10 13 11 asmlinkage void sha256_block_data_order(struct sha256_block_state *state, 14 12 const u8 *data, size_t nblocks); ··· 18 20 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); 19 21 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce); 20 22 21 - void sha256_blocks_arch(struct sha256_block_state *state, 22 - const u8 *data, size_t nblocks) 23 + static void sha256_blocks(struct sha256_block_state *state, 24 + const u8 *data, size_t nblocks) 23 25 { 24 26 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 25 27 static_branch_likely(&have_neon) && crypto_simd_usable()) { ··· 43 45 sha256_block_data_order(state, data, nblocks); 44 46 } 45 47 } 46 - EXPORT_SYMBOL_GPL(sha256_blocks_arch); 47 48 48 - static int __init sha256_arm64_mod_init(void) 49 + #ifdef CONFIG_KERNEL_MODE_NEON 50 + #define sha256_mod_init_arch sha256_mod_init_arch 51 + static inline void sha256_mod_init_arch(void) 49 52 { 50 - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 51 - cpu_have_named_feature(ASIMD)) { 53 + if (cpu_have_named_feature(ASIMD)) { 52 54 static_branch_enable(&have_neon); 53 55 if (cpu_have_named_feature(SHA2)) 54 56 static_branch_enable(&have_ce); 55 57 } 56 - return 0; 57 58 } 58 - subsys_initcall(sha256_arm64_mod_init); 59 - 60 - static void __exit sha256_arm64_mod_exit(void) 61 - { 62 - } 63 - module_exit(sha256_arm64_mod_exit); 64 - 65 - MODULE_LICENSE("GPL"); 66 - MODULE_DESCRIPTION("SHA-256 optimized for ARM64"); 59 + #endif /* CONFIG_KERNEL_MODE_NEON */
-6
lib/crypto/powerpc/Kconfig
··· 14 14 default CRYPTO_LIB_POLY1305 15 15 select CRYPTO_ARCH_HAVE_LIB_POLY1305 16 16 select CRYPTO_LIB_POLY1305_GENERIC 17 - 18 - config CRYPTO_SHA256_PPC_SPE 19 - tristate 20 - depends on SPE 21 - default CRYPTO_LIB_SHA256 22 - select CRYPTO_ARCH_HAVE_LIB_SHA256
-3
lib/crypto/powerpc/Makefile
··· 5 5 6 6 obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o 7 7 poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o 8 - 9 - obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o 10 - sha256-ppc-spe-y := sha256.o sha256-spe-asm.o
+3 -10
lib/crypto/powerpc/sha256.c lib/crypto/powerpc/sha256.h
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 3 * SHA-256 Secure Hash Algorithm, SPE optimized 4 4 * ··· 9 9 */ 10 10 11 11 #include <asm/switch_to.h> 12 - #include <crypto/internal/sha2.h> 13 - #include <linux/kernel.h> 14 - #include <linux/module.h> 15 12 #include <linux/preempt.h> 16 13 17 14 /* ··· 40 43 preempt_enable(); 41 44 } 42 45 43 - void sha256_blocks_arch(struct sha256_block_state *state, 44 - const u8 *data, size_t nblocks) 46 + static void sha256_blocks(struct sha256_block_state *state, 47 + const u8 *data, size_t nblocks) 45 48 { 46 49 do { 47 50 /* cut input data into smaller blocks */ ··· 56 59 nblocks -= unit; 57 60 } while (nblocks); 58 61 } 59 - EXPORT_SYMBOL_GPL(sha256_blocks_arch); 60 - 61 - MODULE_LICENSE("GPL"); 62 - MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm, SPE optimized");
-7
lib/crypto/riscv/Kconfig
··· 6 6 default CRYPTO_LIB_CHACHA 7 7 select CRYPTO_ARCH_HAVE_LIB_CHACHA 8 8 select CRYPTO_LIB_CHACHA_GENERIC 9 - 10 - config CRYPTO_SHA256_RISCV64 11 - tristate 12 - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO 13 - default CRYPTO_LIB_SHA256 14 - select CRYPTO_ARCH_HAVE_LIB_SHA256 15 - select CRYPTO_LIB_SHA256_GENERIC
-3
lib/crypto/riscv/Makefile
··· 2 2 3 3 obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o 4 4 chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o 5 - 6 - obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o 7 - sha256-riscv64-y := sha256.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
+5 -19
lib/crypto/riscv/sha256.c lib/crypto/riscv/sha256.h
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 3 * SHA-256 (RISC-V accelerated) 4 4 * ··· 10 10 */ 11 11 12 12 #include <asm/vector.h> 13 - #include <crypto/internal/sha2.h> 14 13 #include <crypto/internal/simd.h> 15 - #include <linux/kernel.h> 16 - #include <linux/module.h> 17 14 18 15 asmlinkage void 19 16 sha256_transform_zvknha_or_zvknhb_zvkb(struct sha256_block_state *state, ··· 18 21 19 22 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions); 20 23 21 - void sha256_blocks_arch(struct sha256_block_state *state, 22 - const u8 *data, size_t nblocks) 24 + static void sha256_blocks(struct sha256_block_state *state, 25 + const u8 *data, size_t nblocks) 23 26 { 24 27 if (static_branch_likely(&have_extensions) && crypto_simd_usable()) { 25 28 kernel_vector_begin(); ··· 29 32 sha256_blocks_generic(state, data, nblocks); 30 33 } 31 34 } 32 - EXPORT_SYMBOL_GPL(sha256_blocks_arch); 33 35 34 - static int __init riscv64_sha256_mod_init(void) 36 + #define sha256_mod_init_arch sha256_mod_init_arch 37 + static inline void sha256_mod_init_arch(void) 35 38 { 36 39 /* Both zvknha and zvknhb provide the SHA-256 instructions. */ 37 40 if ((riscv_isa_extension_available(NULL, ZVKNHA) || ··· 39 42 riscv_isa_extension_available(NULL, ZVKB) && 40 43 riscv_vector_vlen() >= 128) 41 44 static_branch_enable(&have_extensions); 42 - return 0; 43 45 } 44 - subsys_initcall(riscv64_sha256_mod_init); 45 - 46 - static void __exit riscv64_sha256_mod_exit(void) 47 - { 48 - } 49 - module_exit(riscv64_sha256_mod_exit); 50 - 51 - MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)"); 52 - MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>"); 53 - MODULE_LICENSE("GPL");
-6
lib/crypto/s390/Kconfig
··· 5 5 default CRYPTO_LIB_CHACHA 6 6 select CRYPTO_LIB_CHACHA_GENERIC 7 7 select CRYPTO_ARCH_HAVE_LIB_CHACHA 8 - 9 - config CRYPTO_SHA256_S390 10 - tristate 11 - default CRYPTO_LIB_SHA256 12 - select CRYPTO_ARCH_HAVE_LIB_SHA256 13 - select CRYPTO_LIB_SHA256_GENERIC
-3
lib/crypto/s390/Makefile
··· 2 2 3 3 obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o 4 4 chacha_s390-y := chacha-glue.o chacha-s390.o 5 - 6 - obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256-s390.o 7 - sha256-s390-y := sha256.o
+5 -18
lib/crypto/s390/sha256.c lib/crypto/s390/sha256.h
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 3 * SHA-256 optimized using the CP Assist for Cryptographic Functions (CPACF) 4 4 * 5 5 * Copyright 2025 Google LLC 6 6 */ 7 7 #include <asm/cpacf.h> 8 - #include <crypto/internal/sha2.h> 9 8 #include <linux/cpufeature.h> 10 - #include <linux/kernel.h> 11 - #include <linux/module.h> 12 9 13 10 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_sha256); 14 11 15 - void sha256_blocks_arch(struct sha256_block_state *state, 16 - const u8 *data, size_t nblocks) 12 + static void sha256_blocks(struct sha256_block_state *state, 13 + const u8 *data, size_t nblocks) 17 14 { 18 15 if (static_branch_likely(&have_cpacf_sha256)) 19 16 cpacf_kimd(CPACF_KIMD_SHA_256, state, data, ··· 18 21 else 19 22 sha256_blocks_generic(state, data, nblocks); 20 23 } 21 - EXPORT_SYMBOL_GPL(sha256_blocks_arch); 22 24 23 - static int __init sha256_s390_mod_init(void) 25 + #define sha256_mod_init_arch sha256_mod_init_arch 26 + static inline void sha256_mod_init_arch(void) 24 27 { 25 28 if (cpu_have_feature(S390_CPU_FEATURE_MSA) && 26 29 cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256)) 27 30 static_branch_enable(&have_cpacf_sha256); 28 - return 0; 29 31 } 30 - subsys_initcall(sha256_s390_mod_init); 31 - 32 - static void __exit sha256_s390_mod_exit(void) 33 - { 34 - } 35 - module_exit(sha256_s390_mod_exit); 36 - 37 - MODULE_LICENSE("GPL"); 38 - MODULE_DESCRIPTION("SHA-256 using the CP Assist for Cryptographic Functions (CPACF)");
-150
lib/crypto/sha256-generic.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * SHA-256, as specified in 4 - * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf 5 - * 6 - * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>. 7 - * 8 - * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> 9 - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> 10 - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 11 - * Copyright (c) 2014 Red Hat Inc. 12 - */ 13 - 14 - #include <crypto/internal/sha2.h> 15 - #include <linux/export.h> 16 - #include <linux/kernel.h> 17 - #include <linux/module.h> 18 - #include <linux/string.h> 19 - #include <linux/unaligned.h> 20 - 21 - static const u32 SHA256_K[] = { 22 - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 23 - 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 24 - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 25 - 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 26 - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 27 - 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 28 - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 29 - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 30 - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 31 - 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 32 - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 33 - 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 34 - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 35 - 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 36 - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 37 - 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, 38 - }; 39 - 40 - static inline u32 Ch(u32 x, u32 y, u32 z) 41 - { 42 - return z ^ (x & (y ^ z)); 43 - } 44 - 45 - static inline u32 Maj(u32 x, u32 y, u32 z) 46 - { 47 - return (x & y) | (z & (x | y)); 48 - } 49 - 50 - #define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) 51 - #define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) 52 - #define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) 53 - #define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) 54 - 55 - static inline void LOAD_OP(int I, u32 *W, const u8 *input) 56 - { 57 - W[I] = get_unaligned_be32((__u32 *)input + I); 58 - } 59 - 60 - static inline void BLEND_OP(int I, u32 *W) 61 - { 62 - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; 63 - } 64 - 65 - #define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \ 66 - u32 t1, t2; \ 67 - t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \ 68 - t2 = e0(a) + Maj(a, b, c); \ 69 - d += t1; \ 70 - h = t1 + t2; \ 71 - } while (0) 72 - 73 - static void sha256_block_generic(struct sha256_block_state *state, 74 - const u8 *input, u32 W[64]) 75 - { 76 - u32 a, b, c, d, e, f, g, h; 77 - int i; 78 - 79 - /* load the input */ 80 - for (i = 0; i < 16; i += 8) { 81 - LOAD_OP(i + 0, W, input); 82 - LOAD_OP(i + 1, W, input); 83 - LOAD_OP(i + 2, W, input); 84 - LOAD_OP(i + 3, W, input); 85 - LOAD_OP(i + 4, W, input); 86 - LOAD_OP(i + 5, W, input); 87 - LOAD_OP(i + 6, W, input); 88 - LOAD_OP(i + 7, W, input); 89 - } 90 - 91 - /* now blend */ 92 - for (i = 16; i < 64; i += 8) { 93 - BLEND_OP(i + 0, W); 94 - BLEND_OP(i + 1, W); 95 - BLEND_OP(i + 2, W); 96 - BLEND_OP(i + 3, W); 97 - BLEND_OP(i + 4, W); 98 - BLEND_OP(i + 5, W); 99 - BLEND_OP(i + 6, W); 100 - BLEND_OP(i + 7, W); 101 - } 102 - 103 - /* load the state into our registers */ 104 - a = state->h[0]; 105 - b = state->h[1]; 106 - c = state->h[2]; 107 - d = state->h[3]; 108 - e = state->h[4]; 109 - f = state->h[5]; 110 - g = state->h[6]; 111 - h = state->h[7]; 112 - 113 - /* now iterate */ 114 - for (i = 0; i < 64; i += 8) { 115 - SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h); 116 - SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g); 117 - SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f); 118 - SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e); 119 - SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d); 120 - SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c); 121 - SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b); 122 - SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a); 123 - } 124 - 125 - state->h[0] += a; 126 - state->h[1] += b; 127 - state->h[2] += c; 128 - state->h[3] += d; 129 - state->h[4] += e; 130 - state->h[5] += f; 131 - state->h[6] += g; 132 - state->h[7] += h; 133 - } 134 - 135 - void sha256_blocks_generic(struct sha256_block_state *state, 136 - const u8 *data, size_t nblocks) 137 - { 138 - u32 W[64]; 139 - 140 - do { 141 - sha256_block_generic(state, data, W); 142 - data += SHA256_BLOCK_SIZE; 143 - } while (--nblocks); 144 - 145 - memzero_explicit(W, sizeof(W)); 146 - } 147 - EXPORT_SYMBOL_GPL(sha256_blocks_generic); 148 - 149 - MODULE_DESCRIPTION("SHA-256 Algorithm (generic implementation)"); 150 - MODULE_LICENSE("GPL");
+137 -19
lib/crypto/sha256.c
··· 6 6 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> 7 7 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 8 8 * Copyright (c) 2014 Red Hat Inc. 9 + * Copyright 2025 Google LLC 9 10 */ 10 11 11 12 #include <crypto/hmac.h> 12 13 #include <crypto/internal/blockhash.h> 13 - #include <crypto/internal/sha2.h> 14 + #include <crypto/sha2.h> 14 15 #include <linux/export.h> 15 16 #include <linux/kernel.h> 16 17 #include <linux/module.h> 17 18 #include <linux/string.h> 19 + #include <linux/unaligned.h> 18 20 #include <linux/wordpart.h> 19 21 20 22 static const struct sha256_block_state sha224_iv = { ··· 33 31 }, 34 32 }; 35 33 36 - /* 37 - * If __DISABLE_EXPORTS is defined, then this file is being compiled for a 38 - * pre-boot environment. In that case, ignore the kconfig options, pull the 39 - * generic code into the same translation unit, and use that only. 40 - */ 41 - #ifdef __DISABLE_EXPORTS 42 - #include "sha256-generic.c" 34 + static const u32 sha256_K[64] = { 35 + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 36 + 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 37 + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 38 + 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 39 + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 40 + 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 41 + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 42 + 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 43 + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 44 + 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 45 + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, 46 + }; 47 + 48 + #define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) 49 + #define Maj(x, y, z) (((x) & (y)) | ((z) & ((x) | (y)))) 50 + #define e0(x) (ror32((x), 2) ^ ror32((x), 13) ^ ror32((x), 22)) 51 + #define e1(x) (ror32((x), 6) ^ ror32((x), 11) ^ ror32((x), 25)) 52 + #define s0(x) (ror32((x), 7) ^ ror32((x), 18) ^ ((x) >> 3)) 53 + #define s1(x) (ror32((x), 17) ^ ror32((x), 19) ^ ((x) >> 10)) 54 + 55 + static inline void LOAD_OP(int I, u32 *W, const u8 *input) 56 + { 57 + W[I] = get_unaligned_be32((__u32 *)input + I); 58 + } 59 + 60 + static inline void BLEND_OP(int I, u32 *W) 61 + { 62 + W[I] = s1(W[I - 2]) + W[I - 7] + s0(W[I - 15]) + W[I - 16]; 63 + } 64 + 65 + #define SHA256_ROUND(i, a, b, c, d, e, f, g, h) \ 66 + do { \ 67 + u32 t1, t2; \ 68 + t1 = h + e1(e) + Ch(e, f, g) + sha256_K[i] + W[i]; \ 69 + t2 = e0(a) + Maj(a, b, c); \ 70 + d += t1; \ 71 + h = t1 + t2; \ 72 + } while (0) 73 + 74 + static void sha256_block_generic(struct sha256_block_state *state, 75 + const u8 *input, u32 W[64]) 76 + { 77 + u32 a, b, c, d, e, f, g, h; 78 + int i; 79 + 80 + /* load the input */ 81 + for (i = 0; i < 16; i += 8) { 82 + LOAD_OP(i + 0, W, input); 83 + LOAD_OP(i + 1, W, input); 84 + LOAD_OP(i + 2, W, input); 85 + LOAD_OP(i + 3, W, input); 86 + LOAD_OP(i + 4, W, input); 87 + LOAD_OP(i + 5, W, input); 88 + LOAD_OP(i + 6, W, input); 89 + LOAD_OP(i + 7, W, input); 90 + } 91 + 92 + /* now blend */ 93 + for (i = 16; i < 64; i += 8) { 94 + BLEND_OP(i + 0, W); 95 + BLEND_OP(i + 1, W); 96 + BLEND_OP(i + 2, W); 97 + BLEND_OP(i + 3, W); 98 + BLEND_OP(i + 4, W); 99 + BLEND_OP(i + 5, W); 100 + BLEND_OP(i + 6, W); 101 + BLEND_OP(i + 7, W); 102 + } 103 + 104 + /* load the state into our registers */ 105 + a = state->h[0]; 106 + b = state->h[1]; 107 + c = state->h[2]; 108 + d = state->h[3]; 109 + e = state->h[4]; 110 + f = state->h[5]; 111 + g = state->h[6]; 112 + h = state->h[7]; 113 + 114 + /* now iterate */ 115 + for (i = 0; i < 64; i += 8) { 116 + SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h); 117 + SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g); 118 + SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f); 119 + SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e); 120 + SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d); 121 + SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c); 122 + SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b); 123 + SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a); 124 + } 125 + 126 + state->h[0] += a; 127 + state->h[1] += b; 128 + state->h[2] += c; 129 + state->h[3] += d; 130 + state->h[4] += e; 131 + state->h[5] += f; 132 + state->h[6] += g; 133 + state->h[7] += h; 134 + } 135 + 136 + static void __maybe_unused 137 + sha256_blocks_generic(struct sha256_block_state *state, 138 + const u8 *data, size_t nblocks) 139 + { 140 + u32 W[64]; 141 + 142 + do { 143 + sha256_block_generic(state, data, W); 144 + data += SHA256_BLOCK_SIZE; 145 + } while (--nblocks); 146 + 147 + memzero_explicit(W, sizeof(W)); 148 + } 149 + 150 + #if defined(CONFIG_CRYPTO_LIB_SHA256_ARCH) && !defined(__DISABLE_EXPORTS) 151 + #include "sha256.h" /* $(SRCARCH)/sha256.h */ 152 + #else 153 + #define sha256_blocks sha256_blocks_generic 43 154 #endif 44 - 45 - static inline bool sha256_purgatory(void) 46 - { 47 - return __is_defined(__DISABLE_EXPORTS); 48 - } 49 - 50 - static inline void sha256_blocks(struct sha256_block_state *state, 51 - const u8 *data, size_t nblocks) 52 - { 53 - sha256_choose_blocks(state->h, data, nblocks, sha256_purgatory(), false); 54 - } 55 155 56 156 static void __sha256_init(struct __sha256_ctx *ctx, 57 157 const struct sha256_block_state *iv, ··· 376 272 } 377 273 EXPORT_SYMBOL_GPL(hmac_sha256_usingrawkey); 378 274 #endif /* !__DISABLE_EXPORTS */ 275 + 276 + #ifdef sha256_mod_init_arch 277 + static int __init sha256_mod_init(void) 278 + { 279 + sha256_mod_init_arch(); 280 + return 0; 281 + } 282 + subsys_initcall(sha256_mod_init); 283 + 284 + static void __exit sha256_mod_exit(void) 285 + { 286 + } 287 + module_exit(sha256_mod_exit); 288 + #endif 379 289 380 290 MODULE_DESCRIPTION("SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256 library functions"); 381 291 MODULE_LICENSE("GPL");
-8
lib/crypto/sparc/Kconfig
··· 1 - # SPDX-License-Identifier: GPL-2.0-only 2 - 3 - config CRYPTO_SHA256_SPARC64 4 - tristate 5 - depends on SPARC64 6 - default CRYPTO_LIB_SHA256 7 - select CRYPTO_ARCH_HAVE_LIB_SHA256 8 - select CRYPTO_LIB_SHA256_GENERIC
-4
lib/crypto/sparc/Makefile
··· 1 - # SPDX-License-Identifier: GPL-2.0-only 2 - 3 - obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o 4 - sha256-sparc64-y := sha256.o sha256_asm.o
+7 -22
lib/crypto/sparc/sha256.c lib/crypto/sparc/sha256.h
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 3 * SHA-256 accelerated using the sparc64 sha256 opcodes 4 4 * ··· 8 8 * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> 9 9 */ 10 10 11 - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 - 13 11 #include <asm/elf.h> 14 12 #include <asm/opcodes.h> 15 13 #include <asm/pstate.h> 16 - #include <crypto/internal/sha2.h> 17 - #include <linux/kernel.h> 18 - #include <linux/module.h> 19 14 20 15 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_opcodes); 21 16 22 17 asmlinkage void sha256_sparc64_transform(struct sha256_block_state *state, 23 18 const u8 *data, size_t nblocks); 24 19 25 - void sha256_blocks_arch(struct sha256_block_state *state, 26 - const u8 *data, size_t nblocks) 20 + static void sha256_blocks(struct sha256_block_state *state, 21 + const u8 *data, size_t nblocks) 27 22 { 28 23 if (static_branch_likely(&have_sha256_opcodes)) 29 24 sha256_sparc64_transform(state, data, nblocks); 30 25 else 31 26 sha256_blocks_generic(state, data, nblocks); 32 27 } 33 - EXPORT_SYMBOL_GPL(sha256_blocks_arch); 34 28 35 - static int __init sha256_sparc64_mod_init(void) 29 + #define sha256_mod_init_arch sha256_mod_init_arch 30 + static inline void sha256_mod_init_arch(void) 36 31 { 37 32 unsigned long cfr; 38 33 39 34 if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) 40 - return 0; 35 + return; 41 36 42 37 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); 43 38 if (!(cfr & CFR_SHA256)) 44 - return 0; 39 + return; 45 40 46 41 static_branch_enable(&have_sha256_opcodes); 47 42 pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n"); 48 - return 0; 49 43 } 50 - subsys_initcall(sha256_sparc64_mod_init); 51 - 52 - static void __exit sha256_sparc64_mod_exit(void) 53 - { 54 - } 55 - module_exit(sha256_sparc64_mod_exit); 56 - 57 - MODULE_LICENSE("GPL"); 58 - MODULE_DESCRIPTION("SHA-256 accelerated using the sparc64 sha256 opcodes");
-7
lib/crypto/x86/Kconfig
··· 24 24 depends on 64BIT 25 25 default CRYPTO_LIB_POLY1305 26 26 select CRYPTO_ARCH_HAVE_LIB_POLY1305 27 - 28 - config CRYPTO_SHA256_X86_64 29 - tristate 30 - depends on 64BIT 31 - default CRYPTO_LIB_SHA256 32 - select CRYPTO_ARCH_HAVE_LIB_SHA256 33 - select CRYPTO_LIB_SHA256_GENERIC
-3
lib/crypto/x86/Makefile
··· 10 10 poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o 11 11 targets += poly1305-x86_64-cryptogams.S 12 12 13 - obj-$(CONFIG_CRYPTO_SHA256_X86_64) += sha256-x86_64.o 14 - sha256-x86_64-y := sha256.o sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256-ni-asm.o 15 - 16 13 quiet_cmd_perlasm = PERLASM $@ 17 14 cmd_perlasm = $(PERL) $< > $@ 18 15
+8 -21
lib/crypto/x86/sha256.c lib/crypto/x86/sha256.h
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 2 /* 3 3 * SHA-256 optimized for x86_64 4 4 * 5 5 * Copyright 2025 Google LLC 6 6 */ 7 7 #include <asm/fpu/api.h> 8 - #include <crypto/internal/sha2.h> 9 8 #include <crypto/internal/simd.h> 10 - #include <linux/kernel.h> 11 - #include <linux/module.h> 12 9 #include <linux/static_call.h> 13 10 14 11 asmlinkage void sha256_transform_ssse3(struct sha256_block_state *state, ··· 21 24 22 25 DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_transform_ssse3); 23 26 24 - void sha256_blocks_arch(struct sha256_block_state *state, 25 - const u8 *data, size_t nblocks) 27 + static void sha256_blocks(struct sha256_block_state *state, 28 + const u8 *data, size_t nblocks) 26 29 { 27 30 if (static_branch_likely(&have_sha256_x86) && crypto_simd_usable()) { 28 31 kernel_fpu_begin(); ··· 32 35 sha256_blocks_generic(state, data, nblocks); 33 36 } 34 37 } 35 - EXPORT_SYMBOL_GPL(sha256_blocks_arch); 36 38 37 - static int __init sha256_x86_mod_init(void) 39 + #define sha256_mod_init_arch sha256_mod_init_arch 40 + static inline void sha256_mod_init_arch(void) 38 41 { 39 42 if (boot_cpu_has(X86_FEATURE_SHA_NI)) { 40 43 static_call_update(sha256_blocks_x86, sha256_ni_transform); 41 - } else if (cpu_has_xfeatures(XFEATURE_MASK_SSE | 42 - XFEATURE_MASK_YMM, NULL) && 44 + } else if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, 45 + NULL) && 43 46 boot_cpu_has(X86_FEATURE_AVX)) { 44 47 if (boot_cpu_has(X86_FEATURE_AVX2) && 45 48 boot_cpu_has(X86_FEATURE_BMI2)) ··· 49 52 static_call_update(sha256_blocks_x86, 50 53 sha256_transform_avx); 51 54 } else if (!boot_cpu_has(X86_FEATURE_SSSE3)) { 52 - return 0; 55 + return; 53 56 } 54 57 static_branch_enable(&have_sha256_x86); 55 - return 0; 56 58 } 57 - subsys_initcall(sha256_x86_mod_init); 58 - 59 - static void __exit sha256_x86_mod_exit(void) 60 - { 61 - } 62 - module_exit(sha256_x86_mod_exit); 63 - 64 - MODULE_LICENSE("GPL"); 65 - MODULE_DESCRIPTION("SHA-256 optimized for x86_64");