mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
crypto: aegis128/neon - move final tag check to SIMD domain
Instead of calculating the tag and returning it to the caller on decryption, use a SIMD compare and min across vector to perform the comparison. This is slightly more efficient, and removes the need on the caller's part to wipe the tag from memory if the decryption failed. While at it, switch to unsigned int when passing cryptlen and assoclen - we don't support input sizes where it matters anyway. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Ondrej Mosnacek <omosnacek@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
ad00d41b47
commit
97b70180b7
@ -67,9 +67,11 @@ void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
|
||||
const u8 *src, unsigned int size);
|
||||
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
|
||||
const u8 *src, unsigned int size);
|
||||
void crypto_aegis128_final_simd(struct aegis_state *state,
|
||||
union aegis_block *tag_xor,
|
||||
u64 assoclen, u64 cryptlen);
|
||||
int crypto_aegis128_final_simd(struct aegis_state *state,
|
||||
union aegis_block *tag_xor,
|
||||
unsigned int assoclen,
|
||||
unsigned int cryptlen,
|
||||
unsigned int authsize);
|
||||
|
||||
static void crypto_aegis128_update(struct aegis_state *state)
|
||||
{
|
||||
@ -411,7 +413,7 @@ static int crypto_aegis128_encrypt(struct aead_request *req)
|
||||
crypto_aegis128_process_crypt(&state, &walk,
|
||||
crypto_aegis128_encrypt_chunk_simd);
|
||||
crypto_aegis128_final_simd(&state, &tag, req->assoclen,
|
||||
cryptlen);
|
||||
cryptlen, 0);
|
||||
} else {
|
||||
crypto_aegis128_init(&state, &ctx->key, req->iv);
|
||||
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
|
||||
@ -445,8 +447,15 @@ static int crypto_aegis128_decrypt(struct aead_request *req)
|
||||
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis128_process_crypt(&state, &walk,
|
||||
crypto_aegis128_decrypt_chunk_simd);
|
||||
crypto_aegis128_final_simd(&state, &tag, req->assoclen,
|
||||
cryptlen);
|
||||
if (unlikely(crypto_aegis128_final_simd(&state, &tag,
|
||||
req->assoclen,
|
||||
cryptlen, authsize))) {
|
||||
skcipher_walk_aead_decrypt(&walk, req, false);
|
||||
crypto_aegis128_process_crypt(NULL, req, &walk,
|
||||
crypto_aegis128_wipe_chunk);
|
||||
return -EBADMSG;
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
crypto_aegis128_init(&state, &ctx->key, req->iv);
|
||||
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
|
||||
|
@ -199,6 +199,17 @@ static uint8x16_t vqtbx1q_u8(uint8x16_t v, uint8x16_t a, uint8x16_t b)
|
||||
return vcombine_u8(vtbx2_u8(vget_low_u8(v), __a.pair, vget_low_u8(b)),
|
||||
vtbx2_u8(vget_high_u8(v), __a.pair, vget_high_u8(b)));
|
||||
}
|
||||
|
||||
static int8_t vminvq_s8(int8x16_t v)
|
||||
{
|
||||
int8x8_t s = vpmin_s8(vget_low_s8(v), vget_high_s8(v));
|
||||
|
||||
s = vpmin_s8(s, s);
|
||||
s = vpmin_s8(s, s);
|
||||
s = vpmin_s8(s, s);
|
||||
|
||||
return vget_lane_s8(s, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const uint8_t permute[] __aligned(64) = {
|
||||
@ -302,8 +313,10 @@ void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
|
||||
aegis128_save_state_neon(st, state);
|
||||
}
|
||||
|
||||
void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
|
||||
uint64_t cryptlen)
|
||||
int crypto_aegis128_final_neon(void *state, void *tag_xor,
|
||||
unsigned int assoclen,
|
||||
unsigned int cryptlen,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct aegis128_state st = aegis128_load_state_neon(state);
|
||||
uint8x16_t v;
|
||||
@ -311,13 +324,21 @@ void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
|
||||
|
||||
preload_sbox();
|
||||
|
||||
v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8 * assoclen),
|
||||
vmov_n_u64(8 * cryptlen));
|
||||
v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8ULL * assoclen),
|
||||
vmov_n_u64(8ULL * cryptlen));
|
||||
|
||||
for (i = 0; i < 7; i++)
|
||||
st = aegis128_update_neon(st, v);
|
||||
|
||||
v = vld1q_u8(tag_xor);
|
||||
v ^= st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4];
|
||||
v = st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4];
|
||||
|
||||
if (authsize > 0) {
|
||||
v = vqtbl1q_u8(~vceqq_u8(v, vld1q_u8(tag_xor)),
|
||||
vld1q_u8(permute + authsize));
|
||||
|
||||
return vminvq_s8((int8x16_t)v);
|
||||
}
|
||||
|
||||
vst1q_u8(tag_xor, v);
|
||||
return 0;
|
||||
}
|
||||
|
@ -14,8 +14,10 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
|
||||
unsigned int size);
|
||||
void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
|
||||
unsigned int size);
|
||||
void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
|
||||
uint64_t cryptlen);
|
||||
int crypto_aegis128_final_neon(void *state, void *tag_xor,
|
||||
unsigned int assoclen,
|
||||
unsigned int cryptlen,
|
||||
unsigned int authsize);
|
||||
|
||||
int aegis128_have_aes_insn __ro_after_init;
|
||||
|
||||
@ -60,11 +62,18 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
|
||||
kernel_neon_end();
|
||||
}
|
||||
|
||||
void crypto_aegis128_final_simd(union aegis_block *state,
|
||||
union aegis_block *tag_xor,
|
||||
u64 assoclen, u64 cryptlen)
|
||||
int crypto_aegis128_final_simd(union aegis_block *state,
|
||||
union aegis_block *tag_xor,
|
||||
unsigned int assoclen,
|
||||
unsigned int cryptlen,
|
||||
unsigned int authsize)
|
||||
{
|
||||
int ret;
|
||||
|
||||
kernel_neon_begin();
|
||||
crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen);
|
||||
ret = crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen,
|
||||
authsize);
|
||||
kernel_neon_end();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user