mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-11 09:32:45 -04:00
crypto: arm64/sha256 - remove obsolete chunking logic
Since kernel-mode NEON sections are now preemptible on arm64, there is no longer any need to limit the length of them. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
@@ -86,23 +86,8 @@ static struct shash_alg algs[] = { {
|
||||
static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
do {
|
||||
unsigned int chunk = len;
|
||||
|
||||
/*
|
||||
* Don't hog the CPU for the entire time it takes to process all
|
||||
* input when running on a preemptible kernel, but process the
|
||||
* data block by block instead.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
chunk = SHA256_BLOCK_SIZE;
|
||||
|
||||
chunk -= sha256_base_do_update_blocks(desc, data, chunk,
|
||||
sha256_neon_transform);
|
||||
data += chunk;
|
||||
len -= chunk;
|
||||
} while (len >= SHA256_BLOCK_SIZE);
|
||||
return len;
|
||||
return sha256_base_do_update_blocks(desc, data, len,
|
||||
sha256_neon_transform);
|
||||
}
|
||||
|
||||
static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
|
||||
|
||||
Reference in New Issue
Block a user