Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git

This commit is contained in:
Stephen Rothwell 2025-01-14 12:16:43 +11:00
commit 5f4ae54f45
89 changed files with 1025 additions and 4807 deletions

View File

@ -13,6 +13,7 @@ properties:
compatible:
items:
- enum:
- qcom,qcs8300-inline-crypto-engine
- qcom,sa8775p-inline-crypto-engine
- qcom,sc7180-inline-crypto-engine
- qcom,sc7280-inline-crypto-engine

View File

@ -17,6 +17,10 @@ properties:
- qcom,prng-ee # 8996 and later using EE
- items:
- enum:
- qcom,ipq5332-trng
- qcom,ipq5424-trng
- qcom,ipq9574-trng
- qcom,qcs8300-trng
- qcom,sa8255p-trng
- qcom,sa8775p-trng
- qcom,sc7280-trng

View File

@ -44,6 +44,7 @@ properties:
- items:
- enum:
- qcom,qcs8300-qce
- qcom,sa8775p-qce
- qcom,sc7280-qce
- qcom,sm6350-qce

View File

@ -272,7 +272,7 @@ The available attributes are:
echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode
Async mode without interrupts (caller must poll) can be enabled by
writing 'async' to it::
writing 'async' to it (please see Caveat)::
echo async > /sys/bus/dsa/drivers/crypto/sync_mode
@ -283,6 +283,13 @@ The available attributes are:
The default mode is 'sync'.
Caveat: since the only mechanism that iaa_crypto currently implements
for async polling without interrupts is via the 'sync' mode as
described earlier, writing 'async' to
'/sys/bus/dsa/drivers/crypto/sync_mode' will internally enable the
'sync' mode. This is to ensure correct iaa_crypto behavior until true
async polling without interrupts is enabled in iaa_crypto.
.. _iaa_default_config:
IAA Default Configuration

View File

@ -20134,7 +20134,7 @@ F: net/rfkill/
RHASHTABLE
M: Thomas Graf <tgraf@suug.ch>
M: Herbert Xu <herbert@gondor.apana.org.au>
L: netdev@vger.kernel.org
L: linux-crypto@vger.kernel.org
S: Maintained
F: include/linux/rhashtable-types.h
F: include/linux/rhashtable.h

View File

@ -652,7 +652,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m

View File

@ -1032,7 +1032,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m

View File

@ -579,7 +579,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -589,7 +588,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -536,7 +536,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -546,7 +545,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -556,7 +556,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -566,7 +565,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -528,7 +528,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -538,7 +537,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -538,7 +538,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -548,7 +547,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -555,7 +555,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -565,7 +564,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -642,7 +642,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -652,7 +651,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -528,7 +528,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -538,7 +537,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -529,7 +529,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -539,7 +538,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -545,7 +545,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -555,7 +554,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -526,7 +526,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -536,7 +535,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -526,7 +526,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@ -536,7 +535,6 @@ CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -222,7 +222,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m

View File

@ -177,10 +177,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m

View File

@ -172,10 +172,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m

View File

@ -172,10 +172,8 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m

View File

@ -305,7 +305,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA512=m

View File

@ -176,7 +176,6 @@ CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_LZO=m

View File

@ -770,7 +770,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_AEGIS128=m
@ -782,7 +781,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m

View File

@ -756,7 +756,6 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_AEGIS128=m
@ -768,7 +767,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m

View File

@ -240,7 +240,6 @@ static struct aead_alg crypto_aegis128_aesni_alg = {
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx) +
__alignof__(struct aegis_ctx),
.cra_alignmask = 0,
.cra_priority = 400,
.cra_name = "__aegis128",

View File

@ -384,8 +384,8 @@
vpshufd $0xd3, H_CUR_XMM, %xmm0
vpsrad $31, %xmm0, %xmm0
vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
vpand .Lgfpoly_and_internal_carrybit(%rip), %xmm0, %xmm0
vpxor %xmm0, H_CUR_XMM, H_CUR_XMM
// H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit
vpternlogd $0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM
// Load the gfpoly constant.
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
@ -562,6 +562,32 @@
vpxord RNDKEY0, V3, V3
.endm
// Do the last AES round for four vectors of counter blocks V0-V3, XOR source
// data with the resulting keystream, and write the result to DST and
// GHASHDATA[0-3]. (Implementation differs slightly, but has the same effect.)
.macro _aesenclast_and_xor_4x
// XOR the source data with the last round key, saving the result in
// GHASHDATA[0-3]. This reduces latency by taking advantage of the
// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
vpxord 0*VL(SRC), RNDKEYLAST, GHASHDATA0
vpxord 1*VL(SRC), RNDKEYLAST, GHASHDATA1
vpxord 2*VL(SRC), RNDKEYLAST, GHASHDATA2
vpxord 3*VL(SRC), RNDKEYLAST, GHASHDATA3
// Do the last AES round. This handles the XOR with the source data
// too, as per the optimization described above.
vaesenclast GHASHDATA0, V0, GHASHDATA0
vaesenclast GHASHDATA1, V1, GHASHDATA1
vaesenclast GHASHDATA2, V2, GHASHDATA2
vaesenclast GHASHDATA3, V3, GHASHDATA3
// Store the en/decrypted data to DST.
vmovdqu8 GHASHDATA0, 0*VL(DST)
vmovdqu8 GHASHDATA1, 1*VL(DST)
vmovdqu8 GHASHDATA2, 2*VL(DST)
vmovdqu8 GHASHDATA3, 3*VL(DST)
.endm
// void aes_gcm_{enc,dec}_update_##suffix(const struct aes_gcm_key_avx10 *key,
// const u32 le_ctr[4], u8 ghash_acc[16],
// const u8 *src, u8 *dst, int datalen);
@ -640,7 +666,7 @@
// LE_CTR contains the next set of little-endian counter blocks.
.set LE_CTR, V12
// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-5] contain cached AES round keys,
// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,
// copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,
// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
.set RNDKEY0, V13
@ -650,15 +676,10 @@
.set RNDKEY_M7, V17
.set RNDKEY_M6, V18
.set RNDKEY_M5, V19
// RNDKEYLAST[0-3] temporarily store the last AES round key XOR'd with
// the corresponding block of source data. This is useful because
// vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a), and key ^ b can
// be computed in parallel with the AES rounds.
.set RNDKEYLAST0, V20
.set RNDKEYLAST1, V21
.set RNDKEYLAST2, V22
.set RNDKEYLAST3, V23
.set RNDKEY_M4, V20
.set RNDKEY_M3, V21
.set RNDKEY_M2, V22
.set RNDKEY_M1, V23
// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These
// cannot coincide with anything used for AES encryption, since for
@ -713,7 +734,7 @@
// Pre-subtracting 4*VL from DATALEN saves an instruction from the main
// loop and also ensures that at least one write always occurs to
// DATALEN, zero-extending it and allowing DATALEN64 to be used later.
sub $4*VL, DATALEN
add $-4*VL, DATALEN // shorter than 'sub 4*VL' when VL=32
jl .Lcrypt_loop_4x_done\@
// Load powers of the hash key.
@ -748,26 +769,15 @@
add $16, %rax
cmp %rax, RNDKEYLAST_PTR
jne 1b
vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0
vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1
vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2
vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3
vaesenclast RNDKEYLAST0, V0, GHASHDATA0
vaesenclast RNDKEYLAST1, V1, GHASHDATA1
vaesenclast RNDKEYLAST2, V2, GHASHDATA2
vaesenclast RNDKEYLAST3, V3, GHASHDATA3
vmovdqu8 GHASHDATA0, 0*VL(DST)
vmovdqu8 GHASHDATA1, 1*VL(DST)
vmovdqu8 GHASHDATA2, 2*VL(DST)
vmovdqu8 GHASHDATA3, 3*VL(DST)
add $4*VL, SRC
add $4*VL, DST
sub $4*VL, DATALEN
_aesenclast_and_xor_4x
sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
sub $-4*VL, DST
add $-4*VL, DATALEN
jl .Lghash_last_ciphertext_4x\@
.endif
// Cache as many additional AES round keys as possible.
.irp i, 9,8,7,6,5
.irp i, 9,8,7,6,5,4,3,2,1
vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i
.endr
@ -799,50 +809,17 @@
_vaesenc_4x RNDKEY
128:
// XOR the source data with the last round key, saving the result in
// RNDKEYLAST[0-3]. This reduces latency by taking advantage of the
// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
.if \enc
vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0
vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1
vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2
vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3
.else
vpxord GHASHDATA0, RNDKEYLAST, RNDKEYLAST0
vpxord GHASHDATA1, RNDKEYLAST, RNDKEYLAST1
vpxord GHASHDATA2, RNDKEYLAST, RNDKEYLAST2
vpxord GHASHDATA3, RNDKEYLAST, RNDKEYLAST3
.endif
// Finish the AES encryption of the counter blocks in V0-V3, interleaved
// with the GHASH update of the ciphertext blocks in GHASHDATA[0-3].
.irp i, 9,8,7,6,5
.irp i, 9,8,7,6,5,4,3,2,1
_ghash_step_4x (9 - \i)
_vaesenc_4x RNDKEY_M\i
_ghash_step_4x (9 - \i)
.endr
.irp i, 4,3,2,1
vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY
_vaesenc_4x RNDKEY
_ghash_step_4x (9 - \i)
.endr
_ghash_step_4x 9
// Do the last AES round. This handles the XOR with the source data
// too, as per the optimization described above.
vaesenclast RNDKEYLAST0, V0, GHASHDATA0
vaesenclast RNDKEYLAST1, V1, GHASHDATA1
vaesenclast RNDKEYLAST2, V2, GHASHDATA2
vaesenclast RNDKEYLAST3, V3, GHASHDATA3
// Store the en/decrypted data to DST.
vmovdqu8 GHASHDATA0, 0*VL(DST)
vmovdqu8 GHASHDATA1, 1*VL(DST)
vmovdqu8 GHASHDATA2, 2*VL(DST)
vmovdqu8 GHASHDATA3, 3*VL(DST)
add $4*VL, SRC
add $4*VL, DST
sub $4*VL, DATALEN
_aesenclast_and_xor_4x
sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
sub $-4*VL, DST
add $-4*VL, DATALEN
jge .Lcrypt_loop_4x\@
.if \enc
@ -856,7 +833,7 @@
.Lcrypt_loop_4x_done\@:
// Undo the extra subtraction by 4*VL and check whether data remains.
add $4*VL, DATALEN
sub $-4*VL, DATALEN // shorter than 'add 4*VL' when VL=32
jz .Ldone\@
// The data length isn't a multiple of 4*VL. Process the remaining data

View File

@ -80,22 +80,6 @@
.byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
.text
// Function parameters
.set KEY, %rdi // Initially points to crypto_aes_ctx, then is
// advanced to point to 7th-from-last round key
.set SRC, %rsi // Pointer to next source data
.set DST, %rdx // Pointer to next destination data
.set LEN, %ecx // Remaining length in bytes
.set LEN8, %cl
.set LEN64, %rcx
.set TWEAK, %r8 // Pointer to next tweak
// %rax holds the AES key length in bytes.
.set KEYLEN, %eax
.set KEYLEN64, %rax
// %r9-r11 are available as temporaries.
.macro _define_Vi i
.if VL == 16
.set V\i, %xmm\i
@ -112,41 +96,31 @@
// Define register aliases V0-V15, or V0-V31 if all 32 SIMD registers
// are available, that map to the xmm, ymm, or zmm registers according
// to the selected Vector Length (VL).
_define_Vi 0
_define_Vi 1
_define_Vi 2
_define_Vi 3
_define_Vi 4
_define_Vi 5
_define_Vi 6
_define_Vi 7
_define_Vi 8
_define_Vi 9
_define_Vi 10
_define_Vi 11
_define_Vi 12
_define_Vi 13
_define_Vi 14
_define_Vi 15
.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
_define_Vi \i
.endr
.if USE_AVX10
_define_Vi 16
_define_Vi 17
_define_Vi 18
_define_Vi 19
_define_Vi 20
_define_Vi 21
_define_Vi 22
_define_Vi 23
_define_Vi 24
_define_Vi 25
_define_Vi 26
_define_Vi 27
_define_Vi 28
_define_Vi 29
_define_Vi 30
_define_Vi 31
.irp i, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
_define_Vi \i
.endr
.endif
// Function parameters
.set KEY, %rdi // Initially points to crypto_aes_ctx, then is
// advanced to point to 7th-from-last round key
.set SRC, %rsi // Pointer to next source data
.set DST, %rdx // Pointer to next destination data
.set LEN, %ecx // Remaining length in bytes
.set LEN8, %cl
.set LEN64, %rcx
.set TWEAK, %r8 // Pointer to next tweak
// %rax holds the AES key length in bytes.
.set KEYLEN, %eax
.set KEYLEN64, %rax
// %r9-r11 are available as temporaries.
// V0-V3 hold the data blocks during the main loop, or temporary values
// otherwise. V4-V5 hold temporary values.
@ -214,6 +188,7 @@
.endm
// Move a vector between memory and a register.
// The register operand must be in the first 16 vector registers.
.macro _vmovdqu src, dst
.if VL < 64
vmovdqu \src, \dst
@ -234,11 +209,12 @@
.endm
// XOR two vectors together.
// Any register operands must be in the first 16 vector registers.
.macro _vpxor src1, src2, dst
.if USE_AVX10
vpxord \src1, \src2, \dst
.else
.if VL < 64
vpxor \src1, \src2, \dst
.else
vpxord \src1, \src2, \dst
.endif
.endm
@ -259,8 +235,12 @@
vpshufd $0x13, \src, \tmp
vpaddq \src, \src, \dst
vpsrad $31, \tmp, \tmp
.if USE_AVX10
vpternlogd $0x78, GF_POLY_XMM, \tmp, \dst
.else
vpand GF_POLY_XMM, \tmp, \tmp
vpxor \tmp, \dst, \dst
.endif
.endm
// Given the XTS tweak(s) in the vector \src, compute the next vector of
@ -369,9 +349,14 @@
// Do one step in computing the next set of tweaks using the VPCLMULQDQ method
// (the same method _next_tweakvec uses for VL > 16). This means multiplying
// each tweak by x^(4*VL/16) independently. Since 4*VL/16 is a multiple of 8
// when VL > 16 (which it is here), the needed shift amounts are byte-aligned,
// which allows the use of vpsrldq and vpslldq to do 128-bit wide shifts.
// each tweak by x^(4*VL/16) independently.
//
// Since 4*VL/16 is a multiple of 8 when VL > 16 (which it is here), the needed
// shift amounts are byte-aligned, which allows the use of vpsrldq and vpslldq
// to do 128-bit wide shifts. The 128-bit left shift (vpslldq) saves
// instructions directly. The 128-bit right shift (vpsrldq) performs better
// than a 64-bit right shift on Intel CPUs in the context where it is used here,
// because it runs on a different execution port from the AES instructions.
.macro _tweak_step_pclmul i
.if \i == 0
vpsrldq $(128 - 4*VL/16) / 8, TWEAK0, NEXT_TWEAK0
@ -406,7 +391,7 @@
// \i that include at least 0 through 19, then 1000 which signals the last step.
//
// This is used to interleave the computation of the next set of tweaks with the
// AES en/decryptions, which increases performance in some cases.
// AES en/decryptions, which increases performance in some cases. Clobbers V5.
.macro _tweak_step i
.if VL == 16
_tweak_step_mulx \i
@ -443,9 +428,10 @@
// the last round needs different instructions.
//
// An alternative approach would be to roll up all the round loops. We
// don't do that because it isn't compatible with caching the round keys
// in registers which we do when possible (see below), and also because
// it seems unwise to rely *too* heavily on the CPU's branch predictor.
// don't do that because (a) it isn't compatible with caching the round
// keys in registers which we do when possible (see below), (b) we
// interleave the AES rounds with the XTS tweak computation, and (c) it
// seems unwise to rely *too* heavily on the CPU's branch predictor.
lea OFFS-16(KEY, KEYLEN64, 4), KEY
// If all 32 SIMD registers are available, cache all the round keys.
@ -472,90 +458,94 @@
.endif
.endm
// Do a single round of AES encryption (if \enc==1) or decryption (if \enc==0)
// on the block(s) in \data using the round key(s) in \key. The register length
// determines the number of AES blocks en/decrypted.
.macro _vaes enc, last, key, data
// Do a single non-last round of AES encryption (if \enc==1) or decryption (if
// \enc==0) on the block(s) in \data using the round key(s) in \key. The
// register length determines the number of AES blocks en/decrypted.
.macro _vaes enc, key, data
.if \enc
.if \last
vaesenclast \key, \data, \data
.else
vaesenc \key, \data, \data
.endif
.else
.if \last
vaesdeclast \key, \data, \data
.else
vaesdec \key, \data, \data
.endif
.endm
// Same as _vaes, but does the last round.
.macro _vaeslast enc, key, data
.if \enc
vaesenclast \key, \data, \data
.else
vaesdeclast \key, \data, \data
.endif
.endm
// Do a single round of AES en/decryption on the block(s) in \data, using the
// same key for all block(s). The round key is loaded from the appropriate
// register or memory location for round \i. May clobber V4.
.macro _vaes_1x enc, last, i, xmm_suffix, data
// Do a single non-last round of AES en/decryption on the block(s) in \data,
// using the same key for all block(s). The round key is loaded from the
// appropriate register or memory location for round \i. May clobber \tmp.
.macro _vaes_1x enc, i, xmm_suffix, data, tmp
.if USE_AVX10
_vaes \enc, \last, KEY\i\xmm_suffix, \data
_vaes \enc, KEY\i\xmm_suffix, \data
.else
.ifnb \xmm_suffix
_vaes \enc, \last, (\i-7)*16(KEY), \data
_vaes \enc, (\i-7)*16(KEY), \data
.else
_vbroadcast128 (\i-7)*16(KEY), V4
_vaes \enc, \last, V4, \data
_vbroadcast128 (\i-7)*16(KEY), \tmp
_vaes \enc, \tmp, \data
.endif
.endif
.endm
// Do a single round of AES en/decryption on the blocks in registers V0-V3,
// using the same key for all blocks. The round key is loaded from the
// Do a single non-last round of AES en/decryption on the blocks in registers
// V0-V3, using the same key for all blocks. The round key is loaded from the
// appropriate register or memory location for round \i. In addition, does two
// steps of the computation of the next set of tweaks. May clobber V4.
.macro _vaes_4x enc, last, i
// steps of the computation of the next set of tweaks. May clobber V4 and V5.
.macro _vaes_4x enc, i
.if USE_AVX10
_tweak_step (2*(\i-5))
_vaes \enc, \last, KEY\i, V0
_vaes \enc, \last, KEY\i, V1
_vaes \enc, KEY\i, V0
_vaes \enc, KEY\i, V1
_tweak_step (2*(\i-5) + 1)
_vaes \enc, \last, KEY\i, V2
_vaes \enc, \last, KEY\i, V3
_vaes \enc, KEY\i, V2
_vaes \enc, KEY\i, V3
.else
_vbroadcast128 (\i-7)*16(KEY), V4
_tweak_step (2*(\i-5))
_vaes \enc, \last, V4, V0
_vaes \enc, \last, V4, V1
_vaes \enc, V4, V0
_vaes \enc, V4, V1
_tweak_step (2*(\i-5) + 1)
_vaes \enc, \last, V4, V2
_vaes \enc, \last, V4, V3
_vaes \enc, V4, V2
_vaes \enc, V4, V3
.endif
.endm
// Do tweaked AES en/decryption (i.e., XOR with \tweak, then AES en/decrypt,
// then XOR with \tweak again) of the block(s) in \data. To process a single
// block, use xmm registers and set \xmm_suffix=_XMM. To process a vector of
// length VL, use V* registers and leave \xmm_suffix empty. May clobber V4.
.macro _aes_crypt enc, xmm_suffix, tweak, data
// length VL, use V* registers and leave \xmm_suffix empty. Clobbers \tmp.
.macro _aes_crypt enc, xmm_suffix, tweak, data, tmp
_xor3 KEY0\xmm_suffix, \tweak, \data
cmp $24, KEYLEN
jl .Laes128\@
je .Laes192\@
_vaes_1x \enc, 0, 1, \xmm_suffix, \data
_vaes_1x \enc, 0, 2, \xmm_suffix, \data
_vaes_1x \enc, 1, \xmm_suffix, \data, tmp=\tmp
_vaes_1x \enc, 2, \xmm_suffix, \data, tmp=\tmp
.Laes192\@:
_vaes_1x \enc, 0, 3, \xmm_suffix, \data
_vaes_1x \enc, 0, 4, \xmm_suffix, \data
_vaes_1x \enc, 3, \xmm_suffix, \data, tmp=\tmp
_vaes_1x \enc, 4, \xmm_suffix, \data, tmp=\tmp
.Laes128\@:
_vaes_1x \enc, 0, 5, \xmm_suffix, \data
_vaes_1x \enc, 0, 6, \xmm_suffix, \data
_vaes_1x \enc, 0, 7, \xmm_suffix, \data
_vaes_1x \enc, 0, 8, \xmm_suffix, \data
_vaes_1x \enc, 0, 9, \xmm_suffix, \data
_vaes_1x \enc, 0, 10, \xmm_suffix, \data
_vaes_1x \enc, 0, 11, \xmm_suffix, \data
_vaes_1x \enc, 0, 12, \xmm_suffix, \data
_vaes_1x \enc, 0, 13, \xmm_suffix, \data
_vaes_1x \enc, 1, 14, \xmm_suffix, \data
_vpxor \tweak, \data, \data
.irp i, 5,6,7,8,9,10,11,12,13
_vaes_1x \enc, \i, \xmm_suffix, \data, tmp=\tmp
.endr
.if USE_AVX10
vpxord KEY14\xmm_suffix, \tweak, \tmp
.else
.ifnb \xmm_suffix
vpxor 7*16(KEY), \tweak, \tmp
.else
_vbroadcast128 7*16(KEY), \tmp
vpxor \tweak, \tmp, \tmp
.endif
.endif
_vaeslast \enc, \tmp, \data
.endm
.macro _aes_xts_crypt enc
@ -581,7 +571,7 @@
// Compute the first set of tweaks TWEAK[0-3].
_compute_first_set_of_tweaks
sub $4*VL, LEN
add $-4*VL, LEN // shorter than 'sub 4*VL' when VL=32
jl .Lhandle_remainder\@
.Lmain_loop\@:
@ -589,10 +579,10 @@
// XOR each source block with its tweak and the zero-th round key.
.if USE_AVX10
vmovdqu8 0*VL(SRC), V0
vmovdqu8 1*VL(SRC), V1
vmovdqu8 2*VL(SRC), V2
vmovdqu8 3*VL(SRC), V3
_vmovdqu 0*VL(SRC), V0
_vmovdqu 1*VL(SRC), V1
_vmovdqu 2*VL(SRC), V2
_vmovdqu 3*VL(SRC), V3
vpternlogd $0x96, TWEAK0, KEY0, V0
vpternlogd $0x96, TWEAK1, KEY0, V1
vpternlogd $0x96, TWEAK2, KEY0, V2
@ -612,28 +602,43 @@
je .Laes192\@
// Do all the AES rounds on the data blocks, interleaved with
// the computation of the next set of tweaks.
_vaes_4x \enc, 0, 1
_vaes_4x \enc, 0, 2
_vaes_4x \enc, 1
_vaes_4x \enc, 2
.Laes192\@:
_vaes_4x \enc, 0, 3
_vaes_4x \enc, 0, 4
_vaes_4x \enc, 3
_vaes_4x \enc, 4
.Laes128\@:
_vaes_4x \enc, 0, 5
_vaes_4x \enc, 0, 6
_vaes_4x \enc, 0, 7
_vaes_4x \enc, 0, 8
_vaes_4x \enc, 0, 9
_vaes_4x \enc, 0, 10
_vaes_4x \enc, 0, 11
_vaes_4x \enc, 0, 12
_vaes_4x \enc, 0, 13
_vaes_4x \enc, 1, 14
// XOR in the tweaks again.
_vpxor TWEAK0, V0, V0
_vpxor TWEAK1, V1, V1
_vpxor TWEAK2, V2, V2
_vpxor TWEAK3, V3, V3
.irp i, 5,6,7,8,9,10,11,12,13
_vaes_4x \enc, \i
.endr
// Do the last AES round, then XOR the results with the tweaks again.
// Reduce latency by doing the XOR before the vaesenclast, utilizing the
// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a)
// (and likewise for vaesdeclast).
.if USE_AVX10
_tweak_step 18
_tweak_step 19
vpxord TWEAK0, KEY14, V4
vpxord TWEAK1, KEY14, V5
_vaeslast \enc, V4, V0
_vaeslast \enc, V5, V1
vpxord TWEAK2, KEY14, V4
vpxord TWEAK3, KEY14, V5
_vaeslast \enc, V4, V2
_vaeslast \enc, V5, V3
.else
_vbroadcast128 7*16(KEY), V4
_tweak_step 18 // uses V5
_tweak_step 19 // uses V5
vpxor TWEAK0, V4, V5
_vaeslast \enc, V5, V0
vpxor TWEAK1, V4, V5
_vaeslast \enc, V5, V1
vpxor TWEAK2, V4, V5
vpxor TWEAK3, V4, V4
_vaeslast \enc, V5, V2
_vaeslast \enc, V4, V3
.endif
// Store the destination blocks.
_vmovdqu V0, 0*VL(DST)
@ -644,9 +649,9 @@
// Finish computing the next set of tweaks.
_tweak_step 1000
add $4*VL, SRC
add $4*VL, DST
sub $4*VL, LEN
sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
sub $-4*VL, DST
add $-4*VL, LEN
jge .Lmain_loop\@
// Check for the uncommon case where the data length isn't a multiple of
@ -670,7 +675,7 @@
jl .Lvec_at_a_time_done\@
.Lvec_at_a_time\@:
_vmovdqu (SRC), V0
_aes_crypt \enc, , TWEAK0, V0
_aes_crypt \enc, , TWEAK0, V0, tmp=V1
_vmovdqu V0, (DST)
_next_tweakvec TWEAK0, V0, V1, TWEAK0
add $VL, SRC
@ -687,7 +692,7 @@
jl .Lblock_at_a_time_done\@
.Lblock_at_a_time\@:
vmovdqu (SRC), %xmm0
_aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0
_aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1
vmovdqu %xmm0, (DST)
_next_tweak TWEAK0_XMM, %xmm0, TWEAK0_XMM
add $16, SRC
@ -715,7 +720,7 @@
// Do it now by advancing the tweak and decrypting the last full block.
_next_tweak TWEAK0_XMM, %xmm0, TWEAK1_XMM
vmovdqu (SRC), %xmm0
_aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0
_aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0, tmp=%xmm1
.endif
.if USE_AVX10
@ -758,46 +763,48 @@
vpblendvb %xmm3, %xmm0, %xmm1, %xmm0
.endif
// En/decrypt again and store the last full block.
_aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0
_aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1
vmovdqu %xmm0, (DST)
jmp .Ldone\@
.endm
// void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
// u8 iv[AES_BLOCK_SIZE]);
//
// Encrypt |iv| using the AES key |tweak_key| to get the first tweak. Assumes
// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX10.
SYM_TYPED_FUNC_START(aes_xts_encrypt_iv)
vmovdqu (%rsi), %xmm0
vpxor (%rdi), %xmm0, %xmm0
movl 480(%rdi), %eax // AES key length
lea -16(%rdi, %rax, 4), %rdi
cmp $24, %eax
.set TWEAK_KEY, %rdi
.set IV, %rsi
.set KEYLEN, %eax
.set KEYLEN64, %rax
vmovdqu (IV), %xmm0
vpxor (TWEAK_KEY), %xmm0, %xmm0
movl 480(TWEAK_KEY), KEYLEN
lea -16(TWEAK_KEY, KEYLEN64, 4), TWEAK_KEY
cmp $24, KEYLEN
jl .Lencrypt_iv_aes128
je .Lencrypt_iv_aes192
vaesenc -6*16(%rdi), %xmm0, %xmm0
vaesenc -5*16(%rdi), %xmm0, %xmm0
vaesenc -6*16(TWEAK_KEY), %xmm0, %xmm0
vaesenc -5*16(TWEAK_KEY), %xmm0, %xmm0
.Lencrypt_iv_aes192:
vaesenc -4*16(%rdi), %xmm0, %xmm0
vaesenc -3*16(%rdi), %xmm0, %xmm0
vaesenc -4*16(TWEAK_KEY), %xmm0, %xmm0
vaesenc -3*16(TWEAK_KEY), %xmm0, %xmm0
.Lencrypt_iv_aes128:
vaesenc -2*16(%rdi), %xmm0, %xmm0
vaesenc -1*16(%rdi), %xmm0, %xmm0
vaesenc 0*16(%rdi), %xmm0, %xmm0
vaesenc 1*16(%rdi), %xmm0, %xmm0
vaesenc 2*16(%rdi), %xmm0, %xmm0
vaesenc 3*16(%rdi), %xmm0, %xmm0
vaesenc 4*16(%rdi), %xmm0, %xmm0
vaesenc 5*16(%rdi), %xmm0, %xmm0
vaesenc 6*16(%rdi), %xmm0, %xmm0
vaesenclast 7*16(%rdi), %xmm0, %xmm0
vmovdqu %xmm0, (%rsi)
.irp i, -2,-1,0,1,2,3,4,5,6
vaesenc \i*16(TWEAK_KEY), %xmm0, %xmm0
.endr
vaesenclast 7*16(TWEAK_KEY), %xmm0, %xmm0
vmovdqu %xmm0, (IV)
RET
SYM_FUNC_END(aes_xts_encrypt_iv)
// Below are the actual AES-XTS encryption and decryption functions,
// instantiated from the above macro. They all have the following prototype:
//
// void (*xts_asm_func)(const struct crypto_aes_ctx *key,
// const u8 *src, u8 *dst, unsigned int len,
// void (*xts_crypt_func)(const struct crypto_aes_ctx *key,
// const u8 *src, u8 *dst, int len,
// u8 tweak[AES_BLOCK_SIZE]);
//
// |key| is the data key. |tweak| contains the next tweak; the encryption of

View File

@ -505,7 +505,7 @@ static int xts_setkey_aesni(struct crypto_skcipher *tfm, const u8 *key,
typedef void (*xts_encrypt_iv_func)(const struct crypto_aes_ctx *tweak_key,
u8 iv[AES_BLOCK_SIZE]);
typedef void (*xts_crypt_func)(const struct crypto_aes_ctx *key,
const u8 *src, u8 *dst, unsigned int len,
const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE]);
/* This handles cases where the source and/or destination span pages. */
@ -624,14 +624,14 @@ static void aesni_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
}
static void aesni_xts_encrypt(const struct crypto_aes_ctx *key,
const u8 *src, u8 *dst, unsigned int len,
const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE])
{
aesni_xts_enc(key, dst, src, len, tweak);
}
static void aesni_xts_decrypt(const struct crypto_aes_ctx *key,
const u8 *src, u8 *dst, unsigned int len,
const u8 *src, u8 *dst, int len,
u8 tweak[AES_BLOCK_SIZE])
{
aesni_xts_dec(key, dst, src, len, tweak);
@ -790,10 +790,10 @@ asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
\
asmlinkage void \
aes_xts_encrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \
u8 *dst, unsigned int len, u8 tweak[AES_BLOCK_SIZE]); \
u8 *dst, int len, u8 tweak[AES_BLOCK_SIZE]); \
asmlinkage void \
aes_xts_decrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \
u8 *dst, unsigned int len, u8 tweak[AES_BLOCK_SIZE]); \
u8 *dst, int len, u8 tweak[AES_BLOCK_SIZE]); \
\
static int xts_encrypt_##suffix(struct skcipher_request *req) \
{ \

View File

@ -94,7 +94,6 @@ static struct crypto_alg bf_cipher_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {

View File

@ -1313,7 +1313,6 @@ static struct crypto_alg camellia_cipher_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {

View File

@ -291,7 +291,6 @@ static struct crypto_alg des3_ede_cipher = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {

View File

@ -68,7 +68,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {

View File

@ -684,14 +684,6 @@ config CRYPTO_HCTR2
See https://eprint.iacr.org/2021/1441
config CRYPTO_KEYWRAP
tristate "KW (AES Key Wrap)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F
and RFC3394) without padding.
config CRYPTO_LRW
tristate "LRW (Liskov Rivest Wagner)"
select CRYPTO_LIB_GF128MUL
@ -1029,16 +1021,6 @@ config CRYPTO_STREEBOG
https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf
https://tools.ietf.org/html/rfc6986
config CRYPTO_VMAC
tristate "VMAC"
select CRYPTO_HASH
select CRYPTO_MANAGER
help
VMAC is a message authentication algorithm designed for
very high speed on 64-bit architectures.
See https://fastcrypto.org/vmac for further information.
config CRYPTO_WP512
tristate "Whirlpool"
select CRYPTO_HASH

View File

@ -69,7 +69,6 @@ obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
@ -95,7 +94,6 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_XCTR) += xctr.o
obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o
obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o

View File

@ -516,7 +516,6 @@ static struct aead_alg crypto_aegis128_alg_generic = {
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
.base.cra_alignmask = 0,
.base.cra_priority = 100,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-generic",
@ -535,7 +534,6 @@ static struct aead_alg crypto_aegis128_alg_simd = {
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
.base.cra_alignmask = 0,
.base.cra_priority = 200,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-simd",

View File

@ -27,6 +27,93 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
struct crypto_hash_walk {
char *data;
unsigned int offset;
unsigned int flags;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
};
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = kmap_local_page(walk->pg);
walk->data += offset;
walk->entrylen -= nbytes;
return nbytes;
}
static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
struct scatterlist *sg;
sg = walk->sg;
walk->offset = sg->offset;
walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
walk->entrylen = walk->total;
walk->total -= walk->entrylen;
return hash_walk_next(walk);
}
static int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total) {
walk->entrylen = 0;
return 0;
}
walk->sg = req->src;
walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
walk->data -= walk->offset;
kunmap_local(walk->data);
crypto_yield(walk->flags);
if (err)
return err;
if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
}
if (!walk->total)
return 0;
walk->sg = sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
{
return !(walk->entrylen | walk->total);
}
/*
* For an ahash tfm that is using an shash algorithm (instead of an ahash
* algorithm), this returns the underlying shash tfm.
@ -137,77 +224,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
return 0;
}
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = kmap_local_page(walk->pg);
walk->data += offset;
walk->entrylen -= nbytes;
return nbytes;
}
static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
struct scatterlist *sg;
sg = walk->sg;
walk->offset = sg->offset;
walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
walk->entrylen = walk->total;
walk->total -= walk->entrylen;
return hash_walk_next(walk);
}
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
walk->data -= walk->offset;
kunmap_local(walk->data);
crypto_yield(walk->flags);
if (err)
return err;
if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
}
if (!walk->total)
return 0;
walk->sg = sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total) {
walk->entrylen = 0;
return 0;
}
walk->sg = req->src;
walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{

View File

@ -407,6 +407,7 @@ EXPORT_SYMBOL_GPL(crypto_remove_final);
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
bool test_started = false;
LIST_HEAD(algs_to_put);
int err;
@ -418,17 +419,19 @@ int crypto_register_alg(struct crypto_alg *alg)
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
bool test_started = crypto_boot_test_finished();
test_started = crypto_boot_test_finished();
larval->test_started = test_started;
if (test_started)
crypto_schedule_test(larval);
}
up_write(&crypto_alg_sem);
if (IS_ERR(larval))
return PTR_ERR(larval);
if (test_started)
crypto_schedule_test(larval);
else
crypto_remove_final(&algs_to_put);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_alg);
@ -642,10 +645,8 @@ int crypto_register_instance(struct crypto_template *tmpl,
larval = __crypto_register_alg(&inst->alg, &algs_to_put);
if (IS_ERR(larval))
goto unlock;
else if (larval) {
else if (larval)
larval->test_started = true;
crypto_schedule_test(larval);
}
hlist_add_head(&inst->list, &tmpl->instances);
inst->tmpl = tmpl;
@ -655,7 +656,12 @@ unlock:
if (IS_ERR(larval))
return PTR_ERR(larval);
if (larval)
crypto_schedule_test(larval);
else
crypto_remove_final(&algs_to_put);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
@ -1016,6 +1022,8 @@ static void __init crypto_start_tests(void)
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
return;
set_crypto_boot_test_finished();
for (;;) {
struct crypto_larval *larval = NULL;
struct crypto_alg *q;
@ -1038,7 +1046,6 @@ static void __init crypto_start_tests(void)
l->test_started = true;
larval = l;
crypto_schedule_test(larval);
break;
}
@ -1046,9 +1053,9 @@ static void __init crypto_start_tests(void)
if (!larval)
break;
}
set_crypto_boot_test_finished();
crypto_schedule_test(larval);
}
}
static int __init crypto_algapi_init(void)

View File

@ -33,7 +33,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/unaligned.h>
#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16
@ -463,7 +463,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
@ -482,7 +481,7 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
/* * map cipher key to initial key state (mu): */
for (i = 0; i < N; i++)
kappa[i] = be32_to_cpu(key[i]);
kappa[i] = get_unaligned_be32(&in_key[4 * i]);
/*
* generate R + 1 round keys:
@ -570,10 +569,8 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
u8 *ciphertext, const u8 *plaintext, const int R)
u8 *dst, const u8 *src, const int R)
{
const __be32 *src = (const __be32 *)plaintext;
__be32 *dst = (__be32 *)ciphertext;
int i, r;
u32 state[4];
u32 inter[4];
@ -583,7 +580,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
* and add initial round key (sigma[K^0]):
*/
for (i = 0; i < 4; i++)
state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i];
/*
* R - 1 full rounds:
@ -654,7 +651,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
*/
for (i = 0; i < 4; i++)
dst[i] = cpu_to_be32(inter[i]);
put_unaligned_be32(inter[i], &dst[4 * i]);
}
static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@ -675,7 +672,6 @@ static struct crypto_alg anubis_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = ANUBIS_MIN_KEY_SIZE,

View File

@ -15,6 +15,7 @@
*/
#include <crypto/aria.h>
#include <linux/unaligned.h>
static const u32 key_rc[20] = {
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
@ -27,7 +28,6 @@ static const u32 key_rc[20] = {
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
const __be32 *key = (const __be32 *)in_key;
u32 w0[4], w1[4], w2[4], w3[4];
u32 reg0, reg1, reg2, reg3;
const u32 *ck;
@ -35,10 +35,10 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
ck = &key_rc[(key_len - 16) / 2];
w0[0] = be32_to_cpu(key[0]);
w0[1] = be32_to_cpu(key[1]);
w0[2] = be32_to_cpu(key[2]);
w0[3] = be32_to_cpu(key[3]);
w0[0] = get_unaligned_be32(&in_key[0]);
w0[1] = get_unaligned_be32(&in_key[4]);
w0[2] = get_unaligned_be32(&in_key[8]);
w0[3] = get_unaligned_be32(&in_key[12]);
reg0 = w0[0] ^ ck[0];
reg1 = w0[1] ^ ck[1];
@ -48,11 +48,11 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
aria_subst_diff_odd(&reg0, &reg1, &reg2, &reg3);
if (key_len > 16) {
w1[0] = be32_to_cpu(key[4]);
w1[1] = be32_to_cpu(key[5]);
w1[0] = get_unaligned_be32(&in_key[16]);
w1[1] = get_unaligned_be32(&in_key[20]);
if (key_len > 24) {
w1[2] = be32_to_cpu(key[6]);
w1[3] = be32_to_cpu(key[7]);
w1[2] = get_unaligned_be32(&in_key[24]);
w1[3] = get_unaligned_be32(&in_key[28]);
} else {
w1[2] = 0;
w1[3] = 0;
@ -195,17 +195,15 @@ EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
{
const __be32 *src = (const __be32 *)in;
__be32 *dst = (__be32 *)out;
u32 reg0, reg1, reg2, reg3;
int rounds, rkidx = 0;
rounds = ctx->rounds;
reg0 = be32_to_cpu(src[0]);
reg1 = be32_to_cpu(src[1]);
reg2 = be32_to_cpu(src[2]);
reg3 = be32_to_cpu(src[3]);
reg0 = get_unaligned_be32(&in[0]);
reg1 = get_unaligned_be32(&in[4]);
reg2 = get_unaligned_be32(&in[8]);
reg3 = get_unaligned_be32(&in[12]);
aria_add_round_key(key[rkidx], &reg0, &reg1, &reg2, &reg3);
rkidx++;
@ -241,10 +239,10 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
(u8)(s1[get_u8(reg3, 2)]),
(u8)(s2[get_u8(reg3, 3)]));
dst[0] = cpu_to_be32(reg0);
dst[1] = cpu_to_be32(reg1);
dst[2] = cpu_to_be32(reg2);
dst[3] = cpu_to_be32(reg3);
put_unaligned_be32(reg0, &out[0]);
put_unaligned_be32(reg1, &out[4]);
put_unaligned_be32(reg2, &out[8]);
put_unaligned_be32(reg3, &out[12]);
}
void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
@ -284,7 +282,6 @@ static struct crypto_alg aria_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aria_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {

View File

@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/notifier.h>
#include <linux/string_choices.h>
#include <generated/utsrelease.h>
int fips_enabled;
@ -24,8 +25,7 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
static int fips_enable(char *str)
{
fips_enabled = !!simple_strtol(str, NULL, 0);
printk(KERN_INFO "fips mode: %s\n",
fips_enabled ? "enabled" : "disabled");
pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled));
return 1;
}

View File

@ -1,320 +0,0 @@
/*
* Key Wrapping: RFC3394 / NIST SP800-38F
*
* Copyright (C) 2015, Stephan Mueller <smueller@chronox.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL2
* are required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* Note for using key wrapping:
*
* * The result of the encryption operation is the ciphertext starting
* with the 2nd semiblock. The first semiblock is provided as the IV.
* The IV used to start the encryption operation is the default IV.
*
* * The input for the decryption is the first semiblock handed in as an
* IV. The ciphertext is the data starting with the 2nd semiblock. The
* return code of the decryption operation will be EBADMSG in case an
* integrity error occurs.
*
* To obtain the full result of an encryption as expected by SP800-38F, the
* caller must allocate a buffer of plaintext + 8 bytes:
*
* unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
* u8 data[datalen];
* u8 *iv = data;
* u8 *pt = data + crypto_skcipher_ivsize(tfm);
* <ensure that pt contains the plaintext of size ptlen>
* sg_init_one(&sg, pt, ptlen);
* skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
*
* ==> After encryption, data now contains full KW result as per SP800-38F.
*
* In case of decryption, ciphertext now already has the expected length
* and must be segmented appropriately:
*
* unsigned int datalen = CTLEN;
* u8 data[datalen];
* <ensure that data contains full ciphertext>
* u8 *iv = data;
* u8 *ct = data + crypto_skcipher_ivsize(tfm);
* unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
* sg_init_one(&sg, ct, ctlen);
* skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
*
* ==> After decryption (which hopefully does not return EBADMSG), the ct
* pointer now points to the plaintext of size ctlen.
*
* Note 2: KWP is not implemented as this would defy in-place operation.
* If somebody wants to wrap non-aligned data, he should simply pad
* the input with zeros to fill it up to the 8 byte boundary.
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
struct crypto_kw_block {
#define SEMIBSIZE 8
__be64 A;
__be64 R;
};
/*
* Fast forward the SGL to the "end" length minus SEMIBSIZE.
* The start in the SGL defined by the fast-forward is returned with
* the walk variable
*/
static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
struct scatterlist *sg,
unsigned int end)
{
unsigned int skip = 0;
/* The caller should only operate on full SEMIBLOCKs. */
BUG_ON(end < SEMIBSIZE);
skip = end - SEMIBSIZE;
while (sg) {
if (sg->length > skip) {
scatterwalk_start(walk, sg);
scatterwalk_advance(walk, skip);
break;
}
skip -= sg->length;
sg = sg_next(sg);
}
}
static int crypto_kw_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct crypto_kw_block block;
struct scatterlist *src, *dst;
u64 t = 6 * ((req->cryptlen) >> 3);
unsigned int i;
int ret = 0;
/*
* Require at least 2 semiblocks (note, the 3rd semiblock that is
* required by SP800-38F is the IV.
*/
if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
return -EINVAL;
/* Place the IV into block A */
memcpy(&block.A, req->iv, SEMIBSIZE);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
* first loop, src points to req->src and dst to req->dst. For any
* subsequent round, the code operates on req->dst only.
*/
src = req->src;
dst = req->dst;
for (i = 0; i < 6; i++) {
struct scatter_walk src_walk, dst_walk;
unsigned int nbytes = req->cryptlen;
while (nbytes) {
/* move pointer by nbytes in the SGL */
crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
/* get the source block */
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
/* perform KW operation: modify IV with counter */
block.A ^= cpu_to_be64(t);
t--;
/* perform KW operation: decrypt block */
crypto_cipher_decrypt_one(cipher, (u8 *)&block,
(u8 *)&block);
/* move pointer by nbytes in the SGL */
crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
/* Copy block->R into place */
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
nbytes -= SEMIBSIZE;
}
/* we now start to operate on the dst SGL only */
src = req->dst;
dst = req->dst;
}
/* Perform authentication check */
if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
ret = -EBADMSG;
memzero_explicit(&block, sizeof(struct crypto_kw_block));
return ret;
}
static int crypto_kw_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct crypto_kw_block block;
struct scatterlist *src, *dst;
u64 t = 1;
unsigned int i;
/*
* Require at least 2 semiblocks (note, the 3rd semiblock that is
* required by SP800-38F is the IV that occupies the first semiblock.
* This means that the dst memory must be one semiblock larger than src.
* Also ensure that the given data is aligned to semiblock.
*/
if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
return -EINVAL;
/*
* Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV.
*/
block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
* first loop, src points to req->src and dst to req->dst. For any
* subsequent round, the code operates on req->dst only.
*/
src = req->src;
dst = req->dst;
for (i = 0; i < 6; i++) {
struct scatter_walk src_walk, dst_walk;
unsigned int nbytes = req->cryptlen;
scatterwalk_start(&src_walk, src);
scatterwalk_start(&dst_walk, dst);
while (nbytes) {
/* get the source block */
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
/* perform KW operation: encrypt block */
crypto_cipher_encrypt_one(cipher, (u8 *)&block,
(u8 *)&block);
/* perform KW operation: modify IV with counter */
block.A ^= cpu_to_be64(t);
t++;
/* Copy block->R into place */
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
nbytes -= SEMIBSIZE;
}
/* we now start to operate on the dst SGL only */
src = req->dst;
dst = req->dst;
}
/* establish the IV for the caller to pick up */
memcpy(req->iv, &block.A, SEMIBSIZE);
memzero_explicit(&block, sizeof(struct crypto_kw_block));
return 0;
}
static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
alg = skcipher_ialg_simple(inst);
err = -EINVAL;
/* Section 5.1 requirement for KW */
if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
goto out_free_inst;
inst->alg.base.cra_blocksize = SEMIBSIZE;
inst->alg.base.cra_alignmask = 0;
inst->alg.ivsize = SEMIBSIZE;
inst->alg.encrypt = crypto_kw_encrypt;
inst->alg.decrypt = crypto_kw_decrypt;
err = skcipher_register_instance(tmpl, inst);
if (err) {
out_free_inst:
inst->free(inst);
}
return err;
}
static struct crypto_template crypto_kw_tmpl = {
.name = "kw",
.create = crypto_kw_create,
.module = THIS_MODULE,
};
static int __init crypto_kw_init(void)
{
return crypto_register_template(&crypto_kw_tmpl);
}
static void __exit crypto_kw_exit(void)
{
crypto_unregister_template(&crypto_kw_tmpl);
}
subsys_initcall(crypto_kw_init);
module_exit(crypto_kw_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
MODULE_ALIAS_CRYPTO("kw");
MODULE_IMPORT_NS("CRYPTO_INTERNAL");

View File

@ -23,7 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/unaligned.h>
#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16
@ -757,14 +757,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
/* key is supposed to be 32-bit aligned */
K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
K2 = get_unaligned_be64(&in_key[0]);
K1 = get_unaligned_be64(&in_key[8]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
@ -800,14 +798,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
u8 *ciphertext, const u8 *plaintext)
u8 *dst, const u8 *src)
{
const __be64 *src = (const __be64 *)plaintext;
__be64 *dst = (__be64 *)ciphertext;
int r;
u64 state;
state = be64_to_cpu(*src) ^ roundKey[0];
state = get_unaligned_be64(src) ^ roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^
@ -831,7 +827,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS];
*dst = cpu_to_be64(state);
put_unaligned_be64(state, dst);
}
static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@ -852,7 +848,6 @@ static struct crypto_alg khazad_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = KHAZAD_KEY_SIZE,

View File

@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <linux/unaligned.h>
#define SEED_NUM_KCONSTANTS 16
#define SEED_KEY_SIZE 16
@ -329,13 +329,12 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *keyout = ctx->keysched;
const __be32 *key = (const __be32 *)in_key;
u32 i, t0, t1, x1, x2, x3, x4;
x1 = be32_to_cpu(key[0]);
x2 = be32_to_cpu(key[1]);
x3 = be32_to_cpu(key[2]);
x4 = be32_to_cpu(key[3]);
x1 = get_unaligned_be32(&in_key[0]);
x2 = get_unaligned_be32(&in_key[4]);
x3 = get_unaligned_be32(&in_key[8]);
x4 = get_unaligned_be32(&in_key[12]);
for (i = 0; i < SEED_NUM_KCONSTANTS; i++) {
t0 = x1 + x3 - KC[i];
@ -364,15 +363,13 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)in;
__be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
x1 = be32_to_cpu(src[0]);
x2 = be32_to_cpu(src[1]);
x3 = be32_to_cpu(src[2]);
x4 = be32_to_cpu(src[3]);
x1 = get_unaligned_be32(&in[0]);
x2 = get_unaligned_be32(&in[4]);
x3 = get_unaligned_be32(&in[8]);
x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 0);
OP(x3, x4, x1, x2, 2);
@ -391,10 +388,10 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 28);
OP(x3, x4, x1, x2, 30);
dst[0] = cpu_to_be32(x3);
dst[1] = cpu_to_be32(x4);
dst[2] = cpu_to_be32(x1);
dst[3] = cpu_to_be32(x2);
put_unaligned_be32(x3, &out[0]);
put_unaligned_be32(x4, &out[4]);
put_unaligned_be32(x1, &out[8]);
put_unaligned_be32(x2, &out[12]);
}
/* decrypt a block of text */
@ -402,15 +399,13 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)in;
__be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
x1 = be32_to_cpu(src[0]);
x2 = be32_to_cpu(src[1]);
x3 = be32_to_cpu(src[2]);
x4 = be32_to_cpu(src[3]);
x1 = get_unaligned_be32(&in[0]);
x2 = get_unaligned_be32(&in[4]);
x3 = get_unaligned_be32(&in[8]);
x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 30);
OP(x3, x4, x1, x2, 28);
@ -429,10 +424,10 @@ static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 2);
OP(x3, x4, x1, x2, 0);
dst[0] = cpu_to_be32(x3);
dst[1] = cpu_to_be32(x4);
dst[2] = cpu_to_be32(x1);
dst[3] = cpu_to_be32(x2);
put_unaligned_be32(x3, &out[0]);
put_unaligned_be32(x4, &out[4]);
put_unaligned_be32(x1, &out[8]);
put_unaligned_be32(x2, &out[12]);
}
@ -443,7 +438,6 @@ static struct crypto_alg seed_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SEED_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct seed_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {

View File

@ -15,8 +15,6 @@
#include "internal.h"
#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e
static void crypto_sig_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_sig *sig = __crypto_sig_tfm(tfm);
@ -73,7 +71,7 @@ static const struct crypto_type crypto_sig_type = {
.report = crypto_sig_report,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_SIG_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
.tfmsize = offsetof(struct crypto_sig, base),
};

View File

@ -17,7 +17,6 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
@ -29,19 +28,10 @@
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
enum {
SKCIPHER_WALK_PHYS = 1 << 0,
SKCIPHER_WALK_SLOW = 1 << 1,
SKCIPHER_WALK_COPY = 1 << 2,
SKCIPHER_WALK_DIFF = 1 << 3,
SKCIPHER_WALK_SLEEP = 1 << 4,
};
struct skcipher_walk_buffer {
struct list_head entry;
struct scatter_walk dst;
unsigned int len;
u8 *data;
u8 buffer[];
SKCIPHER_WALK_SLOW = 1 << 0,
SKCIPHER_WALK_COPY = 1 << 1,
SKCIPHER_WALK_DIFF = 1 << 2,
SKCIPHER_WALK_SLEEP = 1 << 3,
};
static const struct crypto_type crypto_skcipher_type;
@ -95,8 +85,7 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = skcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize,
(walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
return 0;
}
@ -113,8 +102,7 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
nbytes = walk->total - n;
}
if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
SKCIPHER_WALK_SLOW |
if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
unmap_src:
@ -162,9 +150,6 @@ finish:
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
goto out;
if (walk->flags & SKCIPHER_WALK_PHYS)
goto out;
if (walk->iv != walk->oiv)
memcpy(walk->oiv, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
@ -177,97 +162,33 @@ out:
}
EXPORT_SYMBOL_GPL(skcipher_walk_done);
void skcipher_walk_complete(struct skcipher_walk *walk, int err)
{
struct skcipher_walk_buffer *p, *tmp;
list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
u8 *data;
if (err)
goto done;
data = p->data;
if (!data) {
data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
data = skcipher_get_spot(data, walk->stride);
}
scatterwalk_copychunks(data, &p->dst, p->len, 1);
if (offset_in_page(p->data) + p->len + walk->stride >
PAGE_SIZE)
free_page((unsigned long)p->data);
done:
list_del(&p->entry);
kfree(p);
}
if (!err && walk->iv != walk->oiv)
memcpy(walk->oiv, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
}
EXPORT_SYMBOL_GPL(skcipher_walk_complete);
static void skcipher_queue_write(struct skcipher_walk *walk,
struct skcipher_walk_buffer *p)
{
p->dst = walk->out;
list_add_tail(&p->entry, &walk->buffers);
}
static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
{
bool phys = walk->flags & SKCIPHER_WALK_PHYS;
unsigned alignmask = walk->alignmask;
struct skcipher_walk_buffer *p;
unsigned a;
unsigned n;
u8 *buffer;
void *v;
if (!phys) {
if (!walk->buffer)
walk->buffer = walk->page;
buffer = walk->buffer;
if (buffer)
goto ok;
}
/* Start with the minimum alignment of kmalloc. */
a = crypto_tfm_ctx_alignment() - 1;
n = bsize;
if (phys) {
/* Calculate the minimum alignment of p->buffer. */
a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
n += sizeof(*p);
}
/* Minimum size to align p->buffer by alignmask. */
/* Minimum size to align buffer by alignmask. */
n += alignmask & ~a;
/* Minimum size to ensure p->buffer does not straddle a page. */
/* Minimum size to ensure buffer does not straddle a page. */
n += (bsize - 1) & ~(alignmask | a);
v = kzalloc(n, skcipher_walk_gfp(walk));
if (!v)
buffer = kzalloc(n, skcipher_walk_gfp(walk));
if (!buffer)
return skcipher_walk_done(walk, -ENOMEM);
if (phys) {
p = v;
p->len = bsize;
skcipher_queue_write(walk, p);
buffer = p->buffer;
} else {
walk->buffer = v;
buffer = v;
}
walk->buffer = buffer;
ok:
walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
@ -283,7 +204,6 @@ ok:
static int skcipher_next_copy(struct skcipher_walk *walk)
{
struct skcipher_walk_buffer *p;
u8 *tmp = walk->page;
skcipher_map_src(walk);
@ -292,24 +212,6 @@ static int skcipher_next_copy(struct skcipher_walk *walk)
walk->src.virt.addr = tmp;
walk->dst.virt.addr = tmp;
if (!(walk->flags & SKCIPHER_WALK_PHYS))
return 0;
p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
if (!p)
return -ENOMEM;
p->data = walk->page;
p->len = walk->nbytes;
skcipher_queue_write(walk, p);
if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
PAGE_SIZE)
walk->page = NULL;
else
walk->page += walk->nbytes;
return 0;
}
@ -317,16 +219,10 @@ static int skcipher_next_fast(struct skcipher_walk *walk)
{
unsigned long diff;
walk->src.phys.page = scatterwalk_page(&walk->in);
walk->src.phys.offset = offset_in_page(walk->in.offset);
walk->dst.phys.page = scatterwalk_page(&walk->out);
walk->dst.phys.offset = offset_in_page(walk->out.offset);
if (walk->flags & SKCIPHER_WALK_PHYS)
return 0;
diff = walk->src.phys.offset - walk->dst.phys.offset;
diff |= walk->src.virt.page - walk->dst.virt.page;
diff = offset_in_page(walk->in.offset) -
offset_in_page(walk->out.offset);
diff |= (u8 *)scatterwalk_page(&walk->in) -
(u8 *)scatterwalk_page(&walk->out);
skcipher_map_src(walk);
walk->dst.virt.addr = walk->src.virt.addr;
@ -343,7 +239,6 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
{
unsigned int bsize;
unsigned int n;
int err;
walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF);
@ -358,8 +253,7 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
return skcipher_walk_done(walk, -EINVAL);
slow_path:
err = skcipher_next_slow(walk, bsize);
goto set_phys_lowmem;
return skcipher_next_slow(walk, bsize);
}
if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
@ -374,22 +268,12 @@ slow_path:
walk->nbytes = min_t(unsigned, n,
PAGE_SIZE - offset_in_page(walk->page));
walk->flags |= SKCIPHER_WALK_COPY;
err = skcipher_next_copy(walk);
goto set_phys_lowmem;
return skcipher_next_copy(walk);
}
walk->nbytes = n;
return skcipher_next_fast(walk);
set_phys_lowmem:
if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
walk->src.phys.page = virt_to_page(walk->src.virt.addr);
walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
walk->src.phys.offset &= PAGE_SIZE - 1;
walk->dst.phys.offset &= PAGE_SIZE - 1;
}
return err;
}
static int skcipher_copy_iv(struct skcipher_walk *walk)
@ -407,14 +291,10 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
/* Minimum size to align buffer by alignmask. */
size = alignmask & ~a;
if (walk->flags & SKCIPHER_WALK_PHYS)
size += ivsize;
else {
size += aligned_bs + ivsize;
/* Minimum size to ensure buffer does not straddle a page. */
size += (bs - 1) & ~(alignmask | a);
}
walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
if (!walk->buffer)
@ -484,8 +364,6 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
walk->flags &= ~SKCIPHER_WALK_PHYS;
err = skcipher_walk_skcipher(walk, req);
walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
@ -494,17 +372,6 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req)
{
walk->flags |= SKCIPHER_WALK_PHYS;
INIT_LIST_HEAD(&walk->buffers);
return skcipher_walk_skcipher(walk, req);
}
EXPORT_SYMBOL_GPL(skcipher_walk_async);
static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct aead_request *req, bool atomic)
{
@ -518,8 +385,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
if (unlikely(!walk->total))
return 0;
walk->flags &= ~SKCIPHER_WALK_PHYS;
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);

View File

@ -1738,10 +1738,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("hmac(rmd160)"));
break;
case 109:
ret = min(ret, tcrypt_test("vmac64(aes)"));
break;
case 111:
ret = min(ret, tcrypt_test("hmac(sha3-224)"));
break;

View File

@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/unaligned.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16
@ -43,12 +43,11 @@ static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = le32_to_cpu(key[1]);
ctx->KEY[2] = le32_to_cpu(key[2]);
ctx->KEY[3] = le32_to_cpu(key[3]);
ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
@ -59,11 +58,9 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@ -78,8 +75,8 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@ -87,11 +84,9 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@ -108,20 +103,19 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
sum -= TEA_DELTA;
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = le32_to_cpu(key[1]);
ctx->KEY[2] = le32_to_cpu(key[2]);
ctx->KEY[3] = le32_to_cpu(key[3]);
ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
@ -132,11 +126,9 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
@ -144,19 +136,17 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@ -166,8 +156,8 @@ static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
@ -176,11 +166,9 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
@ -188,19 +176,17 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@ -210,8 +196,8 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static struct crypto_alg tea_algs[3] = { {
@ -220,7 +206,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
@ -234,7 +219,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
@ -248,7 +232,6 @@ static struct crypto_alg tea_algs[3] = { {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,

View File

@ -2885,18 +2885,11 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
if (ivsize) {
if (WARN_ON(ivsize > MAX_IVLEN))
return -EINVAL;
if (vec->generates_iv && !enc)
memcpy(iv, vec->iv_out, ivsize);
else if (vec->iv)
if (vec->iv)
memcpy(iv, vec->iv, ivsize);
else
memset(iv, 0, ivsize);
} else {
if (vec->generates_iv) {
pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
driver, vec_name);
return -EINVAL;
}
iv = NULL;
}
@ -3133,10 +3126,6 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
if (noextratests)
return 0;
/* Keywrap isn't supported here yet as it handles its IV differently. */
if (strncmp(algname, "kw(", 3) == 0)
return 0;
init_rnd_state(&rng);
if (!generic_driver) { /* Use default naming convention? */
@ -5408,13 +5397,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "jitterentropy_rng",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "kw(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_kw_tv_template)
}
}, {
.alg = "lrw(aes)",
.generic_driver = "lrw(ecb(aes-generic))",
@ -5749,12 +5731,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.hash = __VECS(streebog512_tv_template)
}
}, {
.alg = "vmac64(aes)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(vmac64_aes_tv_template)
}
}, {
.alg = "wp256",
.test = alg_test_hash,

View File

@ -59,8 +59,6 @@ struct hash_testvec {
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* ( e.g. test needs to fail due to a weak key )
* @fips_skip: Skip the test vector in FIPS mode
* @generates_iv: Encryption should ignore the given IV, and output @iv_out.
* Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)").
* @setkey_error: Expected error from setkey()
* @crypt_error: Expected error from encrypt() and decrypt()
*/
@ -74,7 +72,6 @@ struct cipher_testvec {
unsigned short klen;
unsigned int len;
bool fips_skip;
bool generates_iv;
int setkey_error;
int crypt_error;
};
@ -8561,159 +8558,6 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
static const char vmac64_string1[144] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
};
static const char vmac64_string2[144] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'a', 'b', 'c',
};
static const char vmac64_string3[144] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
};
static const char vmac64_string4[33] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm',
'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
'z',
};
static const char vmac64_string5[143] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'r', 'm', 'b', 't', 'c', 'o', 'l', 'k',
']', '%', '9', '2', '7', '!', 'A',
};
static const char vmac64_string6[145] = {
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'p', 't', '*', '7', 'l', 'i', '!', '#',
'w', '0', 'z', '/', '4', 'A', 'n',
};
static const struct hash_testvec vmac64_aes_tv_template[] = {
{ /* draft-krovetz-vmac-01 test vector 1 */
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
.psize = 16,
.digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
}, { /* draft-krovetz-vmac-01 test vector 2 */
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
.psize = 19,
.digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
}, { /* draft-krovetz-vmac-01 test vector 3 */
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
.psize = 64,
.digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
}, { /* draft-krovetz-vmac-01 test vector 4 */
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
.psize = 316,
.digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.ksize = 16,
.plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.psize = 16,
.digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07",
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.ksize = 16,
.plaintext = vmac64_string1,
.psize = sizeof(vmac64_string1),
.digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.ksize = 16,
.plaintext = vmac64_string2,
.psize = sizeof(vmac64_string2),
.digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.ksize = 16,
.plaintext = vmac64_string3,
.psize = sizeof(vmac64_string3),
.digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
}, {
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.psize = 16,
.digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
}, {
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = vmac64_string1,
.psize = sizeof(vmac64_string1),
.digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
}, {
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = vmac64_string2,
.psize = sizeof(vmac64_string2),
.digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
}, {
.key = "abcdefghijklmnop",
.ksize = 16,
.plaintext = vmac64_string3,
.psize = sizeof(vmac64_string3),
.digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
}, {
.key = "a09b5cd!f#07K\x00\x00\x00",
.ksize = 16,
.plaintext = vmac64_string4,
.psize = sizeof(vmac64_string4),
.digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
}, {
.key = "a09b5cd!f#07K\x00\x00\x00",
.ksize = 16,
.plaintext = vmac64_string5,
.psize = sizeof(vmac64_string5),
.digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
}, {
.key = "a09b5cd!f#07K\x00\x00\x00",
.ksize = 16,
.plaintext = vmac64_string6,
.psize = sizeof(vmac64_string6),
.digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
},
};
/*
* SHA384 HMAC test vectors from RFC4231
*/
@ -24348,42 +24192,6 @@ static const struct aead_testvec aegis128_tv_template[] = {
},
};
/*
* All key wrapping test vectors taken from
* http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip
*
* Note: as documented in keywrap.c, the ivout for encryption is the first
* semiblock of the ciphertext from the test vector. For decryption, iv is
* the first semiblock of the ciphertext.
*/
static const struct cipher_testvec aes_kw_tv_template[] = {
{
.key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
"\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
.klen = 16,
.ptext = "\x42\x13\x6d\x3c\x38\x4a\x3e\xea"
"\xc9\x5a\x06\x6f\xd2\x8f\xed\x3f",
.ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3"
"\xf5\x6f\xab\xea\x25\x48\xf5\xfb",
.len = 16,
.iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d",
.generates_iv = true,
}, {
.key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
"\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
"\x03\x86\xf9\x32\x78\x6e\xf7\x96"
"\x76\xfa\xfb\x90\xb8\x26\x3c\x5f",
.klen = 32,
.ptext = "\x0a\x25\x6b\xa7\x5c\xfa\x03\xaa"
"\xa0\x2b\xa9\x42\x03\xf1\x5b\xaa",
.ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15"
"\x59\xf9\x9c\x8a\xcd\x29\x3d\x43",
.len = 16,
.iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1",
.generates_iv = true,
},
};
/*
* ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode)
* test vectors, taken from Appendix B.2.9 and B.2.10:

View File

@ -1,696 +0,0 @@
/*
* VMAC: Message Authentication Code using Universal Hashing
*
* Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
*
* Copyright (c) 2009, Intel Corporation.
* Copyright (c) 2018, Google Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
/*
* Derived from:
* VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
* This implementation is herby placed in the public domain.
* The authors offers no warranty. Use at your own risk.
* Last modified: 17 APR 08, 1700 PDT
*/
#include <linux/unaligned.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
/*
* User definable settings.
*/
#define VMAC_TAG_LEN 64
#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
#define VMAC_NONCEBYTES 16
/* per-transform (per-key) context */
struct vmac_tfm_ctx {
struct crypto_cipher *cipher;
u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
u64 polykey[2*VMAC_TAG_LEN/64];
u64 l3key[2*VMAC_TAG_LEN/64];
};
/* per-request context */
struct vmac_desc_ctx {
union {
u8 partial[VMAC_NHBYTES]; /* partial block */
__le64 partial_words[VMAC_NHBYTES / 8];
};
unsigned int partial_size; /* size of the partial block */
bool first_block_processed;
u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
union {
u8 bytes[VMAC_NONCEBYTES];
__be64 pads[VMAC_NONCEBYTES / 8];
} nonce;
unsigned int nonce_size; /* nonce bytes filled so far */
};
/*
* Constants and masks
*/
#define UINT64_C(x) x##ULL
static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH 1
#define INDEX_LOW 0
#else
#define INDEX_HIGH 0
#define INDEX_LOW 1
#endif
/*
* The following routines are used in this implementation. They are
* written via macros to simulate zero-overhead call-by-reference.
*
* MUL64: 64x64->128-bit multiplication
* PMUL64: assumes top bits cleared on inputs
* ADD128: 128x128->128-bit addition
*/
#define ADD128(rh, rl, ih, il) \
do { \
u64 _il = (il); \
(rl) += (_il); \
if ((rl) < (_il)) \
(rh)++; \
(rh) += (ih); \
} while (0)
#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
do { \
u64 _i1 = (i1), _i2 = (i2); \
u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
rh = MUL32(_i1>>32, _i2>>32); \
rl = MUL32(_i1, _i2); \
ADD128(rh, rl, (m >> 32), (m << 32)); \
} while (0)
#define MUL64(rh, rl, i1, i2) \
do { \
u64 _i1 = (i1), _i2 = (i2); \
u64 m1 = MUL32(_i1, _i2>>32); \
u64 m2 = MUL32(_i1>>32, _i2); \
rh = MUL32(_i1>>32, _i2>>32); \
rl = MUL32(_i1, _i2); \
ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
} while (0)
/*
* For highest performance the L1 NH and L2 polynomial hashes should be
* carefully implemented to take advantage of one's target architecture.
* Here these two hash functions are defined multiple time; once for
* 64-bit architectures, once for 32-bit SSE2 architectures, and once
* for the rest (32-bit) architectures.
* For each, nh_16 *must* be defined (works on multiples of 16 bytes).
* Optionally, nh_vmac_nhbytes can be defined (for multiples of
* VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
* NH computations at once).
*/
#ifdef CONFIG_64BIT
#define nh_16(mp, kp, nw, rh, rl) \
do { \
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
do { \
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
do { \
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
do { \
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
#endif
#define poly_step(ah, al, kh, kl, mh, ml) \
do { \
u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
/* compute ab*cd, put bd into result registers */ \
PMUL64(t3h, t3l, al, kh); \
PMUL64(t2h, t2l, ah, kl); \
PMUL64(t1h, t1l, ah, 2*kh); \
PMUL64(ah, al, al, kl); \
/* add 2 * ac to result */ \
ADD128(ah, al, t1h, t1l); \
/* add together ad + bc */ \
ADD128(t2h, t2l, t3h, t3l); \
/* now (ah,al), (t2l,2*t2h) need summing */ \
/* first add the high registers, carrying into t2h */ \
ADD128(t2h, ah, z, t2l); \
/* double t2h and add top bit of ah */ \
t2h = 2 * t2h + (ah >> 63); \
ah &= m63; \
/* now add the low registers */ \
ADD128(ah, al, mh, ml); \
ADD128(ah, al, z, t2h); \
} while (0)
#else /* ! CONFIG_64BIT */
#ifndef nh_16
#define nh_16(mp, kp, nw, rh, rl) \
do { \
u64 t1, t2, m1, m2, t; \
int i; \
rh = rl = t = 0; \
for (i = 0; i < nw; i += 2) { \
t1 = pe64_to_cpup(mp+i) + kp[i]; \
t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
m2 = MUL32(t1 >> 32, t2); \
m1 = MUL32(t1, t2 >> 32); \
ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
MUL32(t1, t2)); \
rh += (u64)(u32)(m1 >> 32) \
+ (u32)(m2 >> 32); \
t += (u64)(u32)m1 + (u32)m2; \
} \
ADD128(rh, rl, (t >> 32), (t << 32)); \
} while (0)
#endif
static void poly_step_func(u64 *ahi, u64 *alo,
const u64 *kh, const u64 *kl,
const u64 *mh, const u64 *ml)
{
#define a0 (*(((u32 *)alo)+INDEX_LOW))
#define a1 (*(((u32 *)alo)+INDEX_HIGH))
#define a2 (*(((u32 *)ahi)+INDEX_LOW))
#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
#define k0 (*(((u32 *)kl)+INDEX_LOW))
#define k1 (*(((u32 *)kl)+INDEX_HIGH))
#define k2 (*(((u32 *)kh)+INDEX_LOW))
#define k3 (*(((u32 *)kh)+INDEX_HIGH))
u64 p, q, t;
u32 t2;
p = MUL32(a3, k3);
p += p;
p += *(u64 *)mh;
p += MUL32(a0, k2);
p += MUL32(a1, k1);
p += MUL32(a2, k0);
t = (u32)(p);
p >>= 32;
p += MUL32(a0, k3);
p += MUL32(a1, k2);
p += MUL32(a2, k1);
p += MUL32(a3, k0);
t |= ((u64)((u32)p & 0x7fffffff)) << 32;
p >>= 31;
p += (u64)(((u32 *)ml)[INDEX_LOW]);
p += MUL32(a0, k0);
q = MUL32(a1, k3);
q += MUL32(a2, k2);
q += MUL32(a3, k1);
q += q;
p += q;
t2 = (u32)(p);
p >>= 32;
p += (u64)(((u32 *)ml)[INDEX_HIGH]);
p += MUL32(a0, k1);
p += MUL32(a1, k0);
q = MUL32(a2, k3);
q += MUL32(a3, k2);
q += q;
p += q;
*(u64 *)(alo) = (p << 32) | t2;
p >>= 32;
*(u64 *)(ahi) = p + t;
#undef a0
#undef a1
#undef a2
#undef a3
#undef k0
#undef k1
#undef k2
#undef k3
}
#define poly_step(ah, al, kh, kl, mh, ml) \
poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
#endif /* end of specialized NH and poly definitions */
/* At least nh_16 is defined. Defined others as needed here */
#ifndef nh_16_2
#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
do { \
nh_16(mp, kp, nw, rh, rl); \
nh_16(mp, ((kp)+2), nw, rh2, rl2); \
} while (0)
#endif
#ifndef nh_vmac_nhbytes
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
nh_16(mp, kp, nw, rh, rl)
#endif
#ifndef nh_vmac_nhbytes_2
#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
do { \
nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
} while (0)
#endif
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
/* fully reduce (p1,p2)+(len,0) mod p127 */
t = p1 >> 63;
p1 &= m63;
ADD128(p1, p2, len, t);
/* At this point, (p1,p2) is at most 2^127+(len<<64) */
t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
ADD128(p1, p2, z, t);
p1 &= m63;
/* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
t = p1 + (p2 >> 32);
t += (t >> 32);
t += (u32)t > 0xfffffffeu;
p1 += (t >> 32);
p2 += (p1 << 32);
/* compute (p1+k1)%p64 and (p2+k2)%p64 */
p1 += k1;
p1 += (0 - (p1 < k1)) & 257;
p2 += k2;
p2 += (0 - (p2 < k2)) & 257;
/* compute (p1+k1)*(p2+k2)%p64 */
MUL64(rh, rl, p1, p2);
t = rh >> 56;
ADD128(t, rl, z, rh);
rh <<= 8;
ADD128(t, rl, z, rh);
t += t << 8;
rl += t;
rl += (0 - (rl < t)) & 257;
rl += (0 - (rl > p64-1)) & 257;
return rl;
}
/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
struct vmac_desc_ctx *dctx,
const __le64 *mptr, unsigned int blocks)
{
const u64 *kptr = tctx->nhkey;
const u64 pkh = tctx->polykey[0];
const u64 pkl = tctx->polykey[1];
u64 ch = dctx->polytmp[0];
u64 cl = dctx->polytmp[1];
u64 rh, rl;
if (!dctx->first_block_processed) {
dctx->first_block_processed = true;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
blocks--;
}
while (blocks--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
dctx->polytmp[0] = ch;
dctx->polytmp[1] = cl;
}
static int vmac_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
__be64 out[2];
u8 in[16] = { 0 };
unsigned int i;
int err;
if (keylen != VMAC_KEY_LEN)
return -EINVAL;
err = crypto_cipher_setkey(tctx->cipher, key, keylen);
if (err)
return err;
/* Fill nh key */
in[0] = 0x80;
for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->nhkey[i] = be64_to_cpu(out[0]);
tctx->nhkey[i+1] = be64_to_cpu(out[1]);
in[15]++;
}
/* Fill poly key */
in[0] = 0xC0;
in[15] = 0;
for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
in[15]++;
}
/* Fill ip key */
in[0] = 0xE0;
in[15] = 0;
for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
do {
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->l3key[i] = be64_to_cpu(out[0]);
tctx->l3key[i+1] = be64_to_cpu(out[1]);
in[15]++;
} while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
}
return 0;
}
static int vmac_init(struct shash_desc *desc)
{
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
dctx->partial_size = 0;
dctx->first_block_processed = false;
memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
dctx->nonce_size = 0;
return 0;
}
static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int n;
/* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
if (dctx->nonce_size < VMAC_NONCEBYTES) {
n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
dctx->nonce_size += n;
p += n;
len -= n;
}
if (dctx->partial_size) {
n = min(len, VMAC_NHBYTES - dctx->partial_size);
memcpy(&dctx->partial[dctx->partial_size], p, n);
dctx->partial_size += n;
p += n;
len -= n;
if (dctx->partial_size == VMAC_NHBYTES) {
vhash_blocks(tctx, dctx, dctx->partial_words, 1);
dctx->partial_size = 0;
}
}
if (len >= VMAC_NHBYTES) {
n = round_down(len, VMAC_NHBYTES);
/* TODO: 'p' may be misaligned here */
vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
p += n;
len -= n;
}
if (len) {
memcpy(dctx->partial, p, len);
dctx->partial_size = len;
}
return 0;
}
static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
struct vmac_desc_ctx *dctx)
{
unsigned int partial = dctx->partial_size;
u64 ch = dctx->polytmp[0];
u64 cl = dctx->polytmp[1];
/* L1 and L2-hash the final block if needed */
if (partial) {
/* Zero-pad to next 128-bit boundary */
unsigned int n = round_up(partial, 16);
u64 rh, rl;
memset(&dctx->partial[partial], 0, n - partial);
nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
rh &= m62;
if (dctx->first_block_processed)
poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
rh, rl);
else
ADD128(ch, cl, rh, rl);
}
/* L3-hash the 128-bit output of L2-hash */
return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
}
static int vmac_final(struct shash_desc *desc, u8 *out)
{
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
int index;
u64 hash, pad;
if (dctx->nonce_size != VMAC_NONCEBYTES)
return -EINVAL;
/*
* The VMAC specification requires a nonce at least 1 bit shorter than
* the block cipher's block length, so we actually only accept a 127-bit
* nonce. We define the unused bit to be the first one and require that
* it be 0, so the needed prepending of a 0 bit is implicit.
*/
if (dctx->nonce.bytes[0] & 0x80)
return -EINVAL;
/* Finish calculating the VHASH of the message */
hash = vhash_final(tctx, dctx);
/* Generate pseudorandom pad by encrypting the nonce */
BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
dctx->nonce.bytes);
pad = be64_to_cpu(dctx->nonce.pads[index]);
/* The VMAC is the sum of VHASH and the pseudorandom pad */
put_unaligned_be64(hash + pad, out);
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
tctx->cipher = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(tctx->cipher);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
err = -EINVAL;
if (alg->cra_blocksize != VMAC_NONCEBYTES)
goto err_free_inst;
err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
inst->alg.descsize = sizeof(struct vmac_desc_ctx);
inst->alg.digestsize = VMAC_TAG_LEN / 8;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
inst->alg.setkey = vmac_setkey;
inst->free = shash_free_singlespawn_instance;
err = shash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
shash_free_singlespawn_instance(inst);
}
return err;
}
static struct crypto_template vmac64_tmpl = {
.name = "vmac64",
.create = vmac_create,
.module = THIS_MODULE,
};
static int __init vmac_module_init(void)
{
return crypto_register_template(&vmac64_tmpl);
}
static void __exit vmac_module_exit(void)
{
crypto_unregister_template(&vmac64_tmpl);
}
subsys_initcall(vmac_module_init);
module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
MODULE_ALIAS_CRYPTO("vmac64");
MODULE_IMPORT_NS("CRYPTO_INTERNAL");

View File

@ -200,23 +200,6 @@ config S390_PRNG
It is available as of z9.
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
depends on SPARC64
help
Each core of a Niagara2 processor contains a Stream
Processing Unit, which itself contains several cryptographic
sub-units. One set provides the Modular Arithmetic Unit,
used for SSL offload. The other set provides the Cipher
Group, which can perform encryption, decryption, hashing,
checksumming, and raw copies.
config CRYPTO_DEV_SL3516
tristate "Storlink SL3516 crypto offloader"
depends on ARCH_GEMINI || COMPILE_TEST

View File

@ -21,8 +21,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o

View File

@ -2,6 +2,7 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
* Copyright 2024 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");

View File

@ -7,6 +7,8 @@
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
#include <linux/mutex.h>
#include "dbc.h"
#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
@ -137,64 +139,49 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENODEV;
dbc_dev = psp_master->dbc_data;
mutex_lock(&dbc_dev->ioctl_mutex);
guard(mutex)(&dbc_dev->ioctl_mutex);
switch (cmd) {
case DBCIOCNONCE:
if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) {
ret = -EFAULT;
goto unlock;
}
if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce)))
return -EFAULT;
ret = send_dbc_nonce(dbc_dev);
if (ret)
goto unlock;
return ret;
if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) {
ret = -EFAULT;
goto unlock;
}
if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce)))
return -EFAULT;
break;
case DBCIOCUID:
if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) {
ret = -EFAULT;
goto unlock;
}
if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid)))
return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
goto unlock;
return ret;
if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) {
ret = -EFAULT;
goto unlock;
}
if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid)))
return -EFAULT;
break;
case DBCIOCPARAM:
if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) {
ret = -EFAULT;
goto unlock;
}
if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param)))
return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
ret = send_dbc_parameter(dbc_dev);
if (ret)
goto unlock;
return ret;
if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) {
ret = -EFAULT;
goto unlock;
}
if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param)))
return -EFAULT;
break;
default:
ret = -EINVAL;
return -EINVAL;
}
unlock:
mutex_unlock(&dbc_dev->ioctl_mutex);
return ret;
return 0;
}
static const struct file_operations dbc_fops = {

View File

@ -1396,6 +1396,17 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
{
u32 err_status;
err_status = hpre_get_hw_err_status(qm);
if (err_status & qm->err_info.dev_shutdown_mask)
return true;
return false;
}
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@ -1428,6 +1439,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
.dev_is_abnormal = hpre_dev_is_abnormal,
};
static int hpre_pf_probe_init(struct hpre *hpre)

View File

@ -119,6 +119,7 @@
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
#define QM_MAX_QC_TYPE 2
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@ -234,8 +235,6 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000
#define QM_DEV_ALG_MAX_LEN 256
/* abnormal status value for stopping queue */
#define QM_STOP_QUEUE_FAIL 1
#define QM_DUMP_SQC_FAIL 3
@ -333,6 +332,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
{QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
};
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
@ -501,15 +501,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
/* Check if the error causes the master ooo block */
static bool qm_check_dev_error(struct hisi_qm *qm)
{
u32 val, dev_val;
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 err_status;
if (qm->fun_type == QM_HW_VF)
if (pf_qm->fun_type == QM_HW_VF)
return false;
val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
err_status = qm_get_hw_error_status(pf_qm);
if (err_status & pf_qm->err_info.qm_shutdown_mask)
return true;
return val || dev_val;
if (pf_qm->err_ini->dev_is_abnormal)
return pf_qm->err_ini->dev_is_abnormal(pf_qm);
return false;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@ -654,7 +659,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct qm_mailbox mailbox;
dma_addr_t xqc_dma;
void *tmp_xqc;
@ -688,7 +692,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
}
/* Setting xqc will fail if master OOO is blocked. */
if (qm_check_dev_error(pf_qm)) {
if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
return -EIO;
}
@ -855,10 +859,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
strcat(algs, dev_algs[i].alg);
ptr = strrchr(algs, '\n');
if (ptr) {
if (ptr)
*ptr = '\0';
qm->uacce->algs = algs;
}
return 0;
}
@ -1052,11 +1056,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm)
{
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct device *dev = &qm->pdev->dev;
int ret;
if (qm_check_dev_error(pf_qm))
if (qm_check_dev_error(qm))
return;
ret = qm_reset_prepare_ready(qm);
@ -2156,12 +2159,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
static int qm_drain_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 state = 0;
int ret;
/* No need to judge if master OOO is blocked. */
if (qm_check_dev_error(pf_qm))
if (qm_check_dev_error(qm))
return 0;
/* HW V3 supports drain qp by device */
@ -2475,7 +2477,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
return -EINVAL;
qm_set_sqctype(q, qp_ctx.qc_type);
@ -4137,6 +4139,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
if (qm->err_ini->set_priv_status) {
ret = qm->err_ini->set_priv_status(qm);
if (ret)
return ret;
}
ret = qm_reset_prepare_ready(qm);
if (ret) {
pci_err(pdev, "Controller reset not ready!\n");
@ -4527,7 +4535,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
* Check whether there is an ECC mbit error, If it occurs, need to
* wait for soft reset to fix it.
*/
while (qm_check_dev_error(pf_qm)) {
while (qm_check_dev_error(qm)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return;
@ -5247,6 +5255,14 @@ static int qm_clear_device(struct hisi_qm *qm)
return ret;
}
if (qm->err_ini->set_priv_status) {
ret = qm->err_ini->set_priv_status(qm);
if (ret) {
writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
return ret;
}
}
return qm_reset_device(qm);
}
@ -5598,6 +5614,12 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
if (ret)
return ret;
if (qm->err_ini->set_priv_status) {
ret = qm->err_ini->set_priv_status(qm);
if (ret)
return ret;
}
ret = qm_set_pf_mse(qm, false);
if (ret)
pci_err(pdev, "failed to disable MSE before suspending!\n");

View File

@ -37,6 +37,7 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
bool fallback;
};
/* SEC request of Crypto */
@ -90,9 +91,7 @@ struct sec_auth_ctx {
dma_addr_t a_key_dma;
u8 *a_key;
u8 a_key_len;
u8 mac_len;
u8 a_alg;
bool fallback;
struct crypto_shash *hash_tfm;
struct crypto_aead *fallback_aead_tfm;
};

View File

@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
struct aead_request *aead_req = req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
u8 *mac_out = req->out_mac;
struct scatterlist *sgl = aead_req->src;
u8 *mac_out = req->out_mac;
size_t copy_size;
off_t skip_size;
/* Copy input mac */
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
authsize, skip_size);
copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
if (unlikely(copy_size != authsize))
return -EINVAL;
@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
if (unlikely(a_ctx->fallback_aead_tfm))
return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
return 0;
}
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
const enum sec_mac_len mac_len,
const enum sec_cmode c_mode)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
if (unlikely(a_ctx->fallback_aead_tfm)) {
ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
if (ret)
return ret;
}
return 0;
return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
(ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
ret = -EINVAL;
dev_err(dev, "MAC or AUTH key length error!\n");
dev_err(dev, "AUTH key length error!\n");
goto bad_key;
}
ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
if (ret) {
dev_err(dev, "set sec fallback key err!\n");
goto bad_key;
}
@ -1202,27 +1195,19 @@ bad_key:
}
#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
u32 keylen) \
#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
{ \
return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
}
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *a_req = &req->aead_req;
size_t authsize = ctx->a_ctx.mac_len;
struct sec_cipher_req *c_req = &req->c_req;
u32 data_size = aead_req->cryptlen;
u8 flage = 0;
u8 cm, cl;
@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
/*
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
* the counter must set to 0x01
* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
*/
ctx->a_ctx.mac_len = authsize;
/* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
set_aead_auth_iv(ctx, req);
}
} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
/* GCM 12Byte Cipher_IV == Auth_IV */
if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
ctx->a_ctx.mac_len = authsize;
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
}
}
@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sqe3->a_key_addr = sqe3->c_key_addr;
@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
size_t authsize = crypto_aead_authsize(tfm);
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
sec_sqe->type2.mac_key_alg =
cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)((ctx->a_key_len) /
@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
size_t authsize = crypto_aead_authsize(tfm);
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
cpu_to_le32((u32)(ctx->mac_len /
cpu_to_le32((u32)(authsize /
SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct aead_request *backlog_aead_req;
struct sec_req *backlog_req;
@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
if (!err && c_req->encrypt) {
struct scatterlist *sgl = a_req->dst;
sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
aead_req->out_mac,
authsize, a_req->cryptlen +
a_req->assoclen);
sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
const char *aead_name = alg->base.cra_name;
int ret;
ret = sec_aead_init(tfm);
@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
return ret;
}
auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(auth_ctx->hash_tfm)) {
a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(a_ctx->hash_tfm)) {
dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
return PTR_ERR(auth_ctx->hash_tfm);
return PTR_ERR(a_ctx->hash_tfm);
}
a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
if (IS_ERR(a_ctx->fallback_aead_tfm)) {
dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
return 0;
@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
}
@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
a_ctx->fallback = false;
return 0;
}
@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
size_t authsize = crypto_aead_authsize(tfm);
size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
struct device *dev = ctx->dev;
int ret;
if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
req->assoclen > SEC_MAX_AAD_LEN)) {
dev_err(dev, "aead input spec error!\n");
/* Hardware does not handle cases where authsize is less than 4 bytes */
if (unlikely(sz < MIN_MAC_LEN)) {
sreq->aead_req.fallback = true;
return -EINVAL;
}
if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
(c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
authsize & MAC_LEN_MASK)))) {
dev_err(dev, "aead input mac length error!\n");
if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
req->assoclen > SEC_MAX_AAD_LEN)) {
dev_err(dev, "aead input spec error!\n");
return -EINVAL;
}
@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (sreq->c_req.encrypt)
sreq->c_req.c_len = req->cryptlen;
else
sreq->c_req.c_len = req->cryptlen - authsize;
sreq->c_req.c_len = req->cryptlen - sz;
if (c_mode == SEC_CMODE_CBC) {
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "aead crypto length error!\n");
@ -2293,7 +2288,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
req->cryptlen <= authsize))) {
ctx->a_ctx.fallback = true;
sreq->aead_req.fallback = true;
return -EINVAL;
}
}
@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
bool encrypt)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
struct device *dev = ctx->dev;
struct aead_request *subreq;
int ret;
/* Kunpeng920 aead mode not support input 0 size */
if (!a_ctx->fallback_aead_tfm) {
dev_err(dev, "aead fallback tfm is NULL!\n");
return -EINVAL;
}
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
if (!subreq)
return -ENOMEM;
@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
req->aead_req.fallback = false;
ret = sec_aead_param_check(ctx, req);
if (unlikely(ret)) {
if (ctx->a_ctx.fallback)
if (req->aead_req.fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}

View File

@ -23,17 +23,6 @@ enum sec_hash_alg {
SEC_A_HMAC_SHA512 = 0x15,
};
enum sec_mac_len {
SEC_HMAC_CCM_MAC = 16,
SEC_HMAC_GCM_MAC = 16,
SEC_SM3_MAC = 32,
SEC_HMAC_SM3_MAC = 32,
SEC_HMAC_MD5_MAC = 16,
SEC_HMAC_SHA1_MAC = 20,
SEC_HMAC_SHA256_MAC = 32,
SEC_HMAC_SHA512_MAC = 64,
};
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,

View File

@ -1097,6 +1097,17 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
static bool sec_dev_is_abnormal(struct hisi_qm *qm)
{
u32 err_status;
err_status = sec_get_hw_err_status(qm);
if (err_status & qm->err_info.dev_shutdown_mask)
return true;
return false;
}
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@ -1129,6 +1140,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
.get_err_result = sec_get_err_result,
.dev_is_abnormal = sec_dev_is_abnormal,
};
static int sec_pf_probe_init(struct sec_dev *sec)

View File

@ -1,2 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
hisi_zip-objs = zip_main.o zip_crypto.o
hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o

View File

@ -0,0 +1,262 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 HiSilicon Limited. */
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/uacce.h>
#include "zip.h"
/* memory */
#define DAE_MEM_START_OFFSET 0x331040
#define DAE_MEM_DONE_OFFSET 0x331044
#define DAE_MEM_START_MASK 0x1
#define DAE_MEM_DONE_MASK 0x1
#define DAE_REG_RD_INTVRL_US 10
#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
#define DAE_ALG_NAME "hashagg"
/* error */
#define DAE_AXI_CFG_OFFSET 0x331000
#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5))
#define DAE_ERR_SOURCE_OFFSET 0x331C84
#define DAE_ERR_STATUS_OFFSET 0x331C88
#define DAE_ERR_CE_OFFSET 0x331CA0
#define DAE_ERR_CE_MASK BIT(3)
#define DAE_ERR_NFE_OFFSET 0x331CA4
#define DAE_ERR_NFE_MASK 0x17
#define DAE_ERR_FE_OFFSET 0x331CA8
#define DAE_ERR_FE_MASK 0
#define DAE_ECC_MBIT_MASK BIT(2)
#define DAE_ECC_INFO_OFFSET 0x33400C
#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC
#define DAE_ERR_SHUTDOWN_MASK 0x17
#define DAE_ERR_ENABLE_OFFSET 0x331C80
#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000
#define DAE_AM_RETURN_OFFSET 0x330150
#define DAE_AM_RETURN_MASK 0x3
#define DAE_AXI_CFG_OFFSET 0x331000
#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5))
struct hisi_dae_hw_error {
u32 int_msk;
const char *msg;
};
static const struct hisi_dae_hw_error dae_hw_error[] = {
{ .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
{ .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
{ .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
{ .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
{ .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
};
static inline bool dae_is_support(struct hisi_qm *qm)
{
if (test_bit(QM_SUPPORT_DAE, &qm->caps))
return true;
return false;
}
int hisi_dae_set_user_domain(struct hisi_qm *qm)
{
u32 val;
int ret;
if (!dae_is_support(qm))
return 0;
val = readl(qm->io_base + DAE_MEM_START_OFFSET);
val |= DAE_MEM_START_MASK;
writel(val, qm->io_base + DAE_MEM_START_OFFSET);
ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
val & DAE_MEM_DONE_MASK,
DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
if (ret)
pci_err(qm->pdev, "failed to init dae memory!\n");
return ret;
}
int hisi_dae_set_alg(struct hisi_qm *qm)
{
size_t len;
if (!dae_is_support(qm))
return 0;
if (!qm->uacce)
return 0;
len = strlen(qm->uacce->algs);
/* A line break may be required */
if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
pci_err(qm->pdev, "algorithm name is too long!\n");
return -EINVAL;
}
if (len)
strcat((char *)qm->uacce->algs, "\n");
strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
return 0;
}
static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
{
u32 axi_val, err_val;
axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
if (enable) {
axi_val |= DAE_AXI_SHUTDOWN_MASK;
err_val = DAE_ERR_SHUTDOWN_MASK;
} else {
axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
err_val = 0;
}
writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
}
void hisi_dae_hw_error_enable(struct hisi_qm *qm)
{
if (!dae_is_support(qm))
return;
/* clear dae hw error source if having */
writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
/* configure error type */
writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
hisi_dae_master_ooo_ctrl(qm, true);
/* enable dae hw error interrupts */
writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
}
void hisi_dae_hw_error_disable(struct hisi_qm *qm)
{
if (!dae_is_support(qm))
return;
writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
hisi_dae_master_ooo_ctrl(qm, false);
}
static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
{
return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
}
static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
if (!dae_is_support(qm))
return;
writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
}
static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
}
static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
{
const struct hisi_dae_hw_error *err = dae_hw_error;
struct device *dev = &qm->pdev->dev;
u32 ecc_info;
size_t i;
for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
err = &dae_hw_error[i];
if (!(err->int_msk & err_type))
continue;
dev_err(dev, "%s [error status=0x%x] found\n",
err->msg, err->int_msk);
if (err->int_msk & DAE_ECC_MBIT_MASK) {
ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
}
}
}
enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
{
u32 err_status;
if (!dae_is_support(qm))
return ACC_ERR_NONE;
err_status = hisi_dae_get_hw_err_status(qm);
if (!err_status)
return ACC_ERR_NONE;
hisi_dae_log_hw_error(qm, err_status);
if (err_status & DAE_ERR_NFE_MASK) {
/* Disable the same error reporting until device is recovered. */
hisi_dae_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
hisi_dae_clear_hw_err_status(qm, err_status);
return ACC_ERR_RECOVERED;
}
bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
{
u32 err_status;
if (!dae_is_support(qm))
return false;
err_status = hisi_dae_get_hw_err_status(qm);
if (err_status & DAE_ERR_NFE_MASK)
return true;
return false;
}
int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
int ret;
if (!dae_is_support(qm))
return 0;
val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
val |= BIT(0);
writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
val, (val == DAE_AM_RETURN_MASK),
DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
if (ret)
dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
return ret;
}
void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
if (!dae_is_support(qm))
return;
val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
}

View File

@ -103,4 +103,12 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
int hisi_dae_set_user_domain(struct hisi_qm *qm);
int hisi_dae_set_alg(struct hisi_qm *qm);
void hisi_dae_hw_error_disable(struct hisi_qm *qm);
void hisi_dae_hw_error_enable(struct hisi_qm *qm);
void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm);
int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm);
bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm);
enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm);
#endif

View File

@ -582,7 +582,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
hisi_zip_enable_clock_gate(qm);
return 0;
return hisi_dae_set_user_domain(qm);
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@ -631,6 +631,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
@ -643,6 +645,8 @@ static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
hisi_dae_hw_error_disable(qm);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@ -1129,6 +1133,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
hisi_dae_open_axi_master_ooo(qm);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
@ -1147,8 +1153,11 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
{
enum acc_err_result zip_result = ACC_ERR_NONE;
enum acc_err_result dae_result;
u32 err_status;
/* Get device hardware new error status */
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status) {
if (err_status & qm->err_info.ecc_2bits_mask)
@ -1159,11 +1168,32 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
/* Disable the same error reporting until device is recovered. */
hisi_zip_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
} else {
hisi_zip_clear_hw_err_status(qm, err_status);
}
}
return ACC_ERR_RECOVERED;
dae_result = hisi_dae_get_err_result(qm);
return (zip_result == ACC_ERR_NEED_RESET ||
dae_result == ACC_ERR_NEED_RESET) ?
ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
}
static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
{
u32 err_status;
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status & qm->err_info.dev_shutdown_mask)
return true;
return hisi_dae_dev_is_abnormal(qm);
}
static int hisi_zip_set_priv_status(struct hisi_qm *qm)
{
return hisi_dae_close_axi_master_ooo(qm);
}
static void hisi_zip_err_info_init(struct hisi_qm *qm)
@ -1200,6 +1230,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
.get_err_result = hisi_zip_get_err_result,
.set_priv_status = hisi_zip_set_priv_status,
.dev_is_abnormal = hisi_zip_dev_is_abnormal,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@ -1301,17 +1333,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
ret = zip_pre_store_cap_reg(qm);
if (ret) {
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
hisi_qm_uninit(qm);
return ret;
goto err_qm_uninit;
}
alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
hisi_qm_uninit(qm);
goto err_qm_uninit;
}
ret = hisi_dae_set_alg(qm);
if (ret)
goto err_qm_uninit;
return 0;
err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
}

View File

@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name)
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async")) {
async_mode = true;
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async_irq")) {
async_mode = true;

View File

@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
npe_id = npe_spec.args[0];
of_node_put(npe_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
&queue_spec);
@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
recv_qid = queue_spec.args[0];
of_node_put(queue_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
&queue_spec);
@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
send_qid = queue_spec.args[0];
of_node_put(queue_spec.np);
} else {
/*
* Hardcoded engine when using platform data, this goes away

View File

@ -1,96 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* n2_asm.S: Hypervisor calls for NCS support.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
#include "n2_core.h"
/* o0: queue type
* o1: RA of queue
* o2: num entries in queue
* o3: address of queue handle return
*/
ENTRY(sun4v_ncs_qconf)
mov HV_FAST_NCS_QCONF, %o5
ta HV_FAST_TRAP
stx %o1, [%o3]
retl
nop
ENDPROC(sun4v_ncs_qconf)
/* %o0: queue handle
* %o1: address of queue type return
* %o2: address of queue base address return
* %o3: address of queue num entries return
*/
ENTRY(sun4v_ncs_qinfo)
mov %o1, %g1
mov %o2, %g2
mov %o3, %g3
mov HV_FAST_NCS_QINFO, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
stx %o3, [%g3]
retl
nop
ENDPROC(sun4v_ncs_qinfo)
/* %o0: queue handle
* %o1: address of head offset return
*/
ENTRY(sun4v_ncs_gethead)
mov %o1, %o2
mov HV_FAST_NCS_GETHEAD, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_gethead)
/* %o0: queue handle
* %o1: address of tail offset return
*/
ENTRY(sun4v_ncs_gettail)
mov %o1, %o2
mov HV_FAST_NCS_GETTAIL, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_gettail)
/* %o0: queue handle
* %o1: new tail offset
*/
ENTRY(sun4v_ncs_settail)
mov HV_FAST_NCS_SETTAIL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_settail)
/* %o0: queue handle
* %o1: address of devino return
*/
ENTRY(sun4v_ncs_qhandle_to_devino)
mov %o1, %o2
mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_qhandle_to_devino)
/* %o0: queue handle
* %o1: new head offset
*/
ENTRY(sun4v_ncs_sethead_marker)
mov HV_FAST_NCS_SETHEAD_MARKER, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_sethead_marker)

File diff suppressed because it is too large Load Diff

View File

@ -1,232 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _N2_CORE_H
#define _N2_CORE_H
#ifndef __ASSEMBLY__
struct ino_blob {
u64 intr;
u64 ino;
};
struct spu_mdesc_info {
u64 cfg_handle;
struct ino_blob *ino_table;
int num_intrs;
};
struct n2_crypto {
struct spu_mdesc_info cwq_info;
struct list_head cwq_list;
};
struct n2_mau {
struct spu_mdesc_info mau_info;
struct list_head mau_list;
};
#define CWQ_ENTRY_SIZE 64
#define CWQ_NUM_ENTRIES 64
#define MAU_ENTRY_SIZE 64
#define MAU_NUM_ENTRIES 64
struct cwq_initial_entry {
u64 control;
u64 src_addr;
u64 auth_key_addr;
u64 auth_iv_addr;
u64 final_auth_state_addr;
u64 enc_key_addr;
u64 enc_iv_addr;
u64 dest_addr;
};
struct cwq_ext_entry {
u64 len;
u64 src_addr;
u64 resv1;
u64 resv2;
u64 resv3;
u64 resv4;
u64 resv5;
u64 resv6;
};
struct cwq_final_entry {
u64 control;
u64 src_addr;
u64 resv1;
u64 resv2;
u64 resv3;
u64 resv4;
u64 resv5;
u64 resv6;
};
#define CONTROL_LEN 0x000000000000ffffULL
#define CONTROL_LEN_SHIFT 0
#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
#define CONTROL_HMAC_KEY_LEN_SHIFT 16
#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
#define CONTROL_ENC_TYPE_SHIFT 24
#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
#define ENC_TYPE_ALG_DES 0x08ULL
#define ENC_TYPE_ALG_3DES 0x0cULL
#define ENC_TYPE_ALG_AES128 0x10ULL
#define ENC_TYPE_ALG_AES192 0x14ULL
#define ENC_TYPE_ALG_AES256 0x18ULL
#define ENC_TYPE_ALG_RESERVED 0x1cULL
#define ENC_TYPE_ALG_MASK 0x1cULL
#define ENC_TYPE_CHAINING_ECB 0x00ULL
#define ENC_TYPE_CHAINING_CBC 0x01ULL
#define ENC_TYPE_CHAINING_CFB 0x02ULL
#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
#define ENC_TYPE_CHAINING_MASK 0x03ULL
#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
#define CONTROL_AUTH_TYPE_SHIFT 32
#define AUTH_TYPE_RESERVED 0x00ULL
#define AUTH_TYPE_MD5 0x01ULL
#define AUTH_TYPE_SHA1 0x02ULL
#define AUTH_TYPE_SHA256 0x03ULL
#define AUTH_TYPE_CRC32 0x04ULL
#define AUTH_TYPE_HMAC_MD5 0x05ULL
#define AUTH_TYPE_HMAC_SHA1 0x06ULL
#define AUTH_TYPE_HMAC_SHA256 0x07ULL
#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
#define CONTROL_STRAND 0x000000e000000000ULL
#define CONTROL_STRAND_SHIFT 37
#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
#define CONTROL_HASH_LEN_SHIFT 40
#define CONTROL_INTERRUPT 0x0001000000000000ULL
#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
#define CONTROL_RESERVED 0x001c000000000000ULL
#define CONTROL_HV_DONE 0x0004000000000000ULL
#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
#define CONTROL_ENCRYPT 0x0080000000000000ULL
#define CONTROL_OPCODE 0xff00000000000000ULL
#define CONTROL_OPCODE_SHIFT 56
#define OPCODE_INPLACE_BIT 0x80ULL
#define OPCODE_SSL_KEYBLOCK 0x10ULL
#define OPCODE_COPY 0x20ULL
#define OPCODE_ENCRYPT 0x40ULL
#define OPCODE_AUTH_MAC 0x41ULL
#endif /* !(__ASSEMBLY__) */
/* NCS v2.0 hypervisor interfaces */
#define HV_NCS_QTYPE_MAU 0x01
#define HV_NCS_QTYPE_CWQ 0x02
/* ncs_qconf()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_QCONF
* ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
* ARG1: Real address of queue, or handle for unconfigure
* ARG2: Number of entries in queue, zero for unconfigure
* RET0: status
* RET1: queue handle
*
* Configure a queue in the stream processing unit.
*
* The real address given as the base must be 64-byte
* aligned.
*
* The queue size can range from a minimum of 2 to a maximum
* of 64. The queue size must be a power of two.
*
* To unconfigure a queue, specify a length of zero and place
* the queue handle into ARG1.
*
* On configure success the hypervisor will set the FIRST, HEAD,
* and TAIL registers to the address of the first entry in the
* queue. The LAST register will be set to point to the last
* entry in the queue.
*/
#define HV_FAST_NCS_QCONF 0x111
/* ncs_qinfo()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_QINFO
* ARG0: Queue handle
* RET0: status
* RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
* RET2: Queue base address
* RET3: Number of entries
*/
#define HV_FAST_NCS_QINFO 0x112
/* ncs_gethead()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_GETHEAD
* ARG0: Queue handle
* RET0: status
* RET1: queue head offset
*/
#define HV_FAST_NCS_GETHEAD 0x113
/* ncs_gettail()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_GETTAIL
* ARG0: Queue handle
* RET0: status
* RET1: queue tail offset
*/
#define HV_FAST_NCS_GETTAIL 0x114
/* ncs_settail()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_SETTAIL
* ARG0: Queue handle
* ARG1: New tail offset
* RET0: status
*/
#define HV_FAST_NCS_SETTAIL 0x115
/* ncs_qhandle_to_devino()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
* ARG0: Queue handle
* RET0: status
* RET1: devino
*/
#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
/* ncs_sethead_marker()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
* ARG0: Queue handle
* ARG1: New head offset
* RET0: status
*/
#define HV_FAST_NCS_SETHEAD_MARKER 0x117
#ifndef __ASSEMBLY__
extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
unsigned long queue_ra,
unsigned long num_entries,
unsigned long *qhandle);
extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
unsigned long *queue_type,
unsigned long *queue_ra,
unsigned long *num_entries);
extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
unsigned long *head);
extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
unsigned long *tail);
extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
unsigned long tail);
extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
unsigned long *devino);
extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
unsigned long head);
#endif /* !(__ASSEMBLY__) */
#endif /* _N2_CORE_H */

View File

@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg->init = qce_aead_init;
alg->exit = qce_aead_exit;
alg->base.cra_priority = 300;
alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |

View File

@ -3,14 +3,15 @@
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
@ -37,9 +38,10 @@ static const struct qce_algo_ops *qce_ops[] = {
#endif
};
static void qce_unregister_algs(struct qce_device *qce)
static void qce_unregister_algs(void *data)
{
const struct qce_algo_ops *ops;
struct qce_device *qce = data;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
@ -48,19 +50,22 @@ static void qce_unregister_algs(struct qce_device *qce)
}
}
static int qce_register_algs(struct qce_device *qce)
static int devm_qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
int i, ret = -ENODEV;
int i, j, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
if (ret)
break;
if (ret) {
for (j = i - 1; j >= 0; j--)
ops->unregister_algs(qce);
return ret;
}
}
return ret;
return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce);
}
static int qce_handle_request(struct crypto_async_request *async_req)
@ -84,55 +89,49 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
int ret = 0, err;
spin_lock_irqsave(&qce->lock, flags);
scoped_guard(mutex, &qce->lock) {
if (req)
ret = crypto_enqueue_request(&qce->queue, req);
/* busy, do not dequeue request */
if (qce->req) {
spin_unlock_irqrestore(&qce->lock, flags);
if (qce->req)
return ret;
}
backlog = crypto_get_backlog(&qce->queue);
async_req = crypto_dequeue_request(&qce->queue);
if (async_req)
qce->req = async_req;
spin_unlock_irqrestore(&qce->lock, flags);
}
if (!async_req)
return ret;
if (backlog) {
spin_lock_bh(&qce->lock);
scoped_guard(mutex, &qce->lock)
crypto_request_complete(backlog, -EINPROGRESS);
spin_unlock_bh(&qce->lock);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
tasklet_schedule(&qce->done_tasklet);
schedule_work(&qce->done_work);
}
return ret;
}
static void qce_tasklet_req_done(unsigned long data)
static void qce_req_done_work(struct work_struct *work)
{
struct qce_device *qce = (struct qce_device *)data;
struct qce_device *qce = container_of(work, struct qce_device,
done_work);
struct crypto_async_request *req;
unsigned long flags;
spin_lock_irqsave(&qce->lock, flags);
scoped_guard(mutex, &qce->lock) {
req = qce->req;
qce->req = NULL;
spin_unlock_irqrestore(&qce->lock, flags);
}
if (req)
crypto_request_complete(req, qce->result);
@ -149,7 +148,7 @@ static int qce_async_request_enqueue(struct qce_device *qce,
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
tasklet_schedule(&qce->done_tasklet);
schedule_work(&qce->done_work);
}
static int qce_check_version(struct qce_device *qce)
@ -209,15 +208,15 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
qce->core = devm_clk_get_optional(qce->dev, "core");
qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
qce->iface = devm_clk_get_optional(qce->dev, "iface");
qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
qce->bus = devm_clk_get_optional(qce->dev, "bus");
qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
@ -229,64 +228,25 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret)
return ret;
ret = clk_prepare_enable(qce->core);
ret = devm_qce_dma_request(qce->dev, &qce->dma);
if (ret)
goto err_mem_path_disable;
ret = clk_prepare_enable(qce->iface);
if (ret)
goto err_clks_core;
ret = clk_prepare_enable(qce->bus);
if (ret)
goto err_clks_iface;
ret = qce_dma_request(qce->dev, &qce->dma);
if (ret)
goto err_clks;
return ret;
ret = qce_check_version(qce);
if (ret)
goto err_clks;
return ret;
spin_lock_init(&qce->lock);
tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
(unsigned long)qce);
ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
return ret;
INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
ret = qce_register_algs(qce);
if (ret)
goto err_dma;
return 0;
err_dma:
qce_dma_release(&qce->dma);
err_clks:
clk_disable_unprepare(qce->bus);
err_clks_iface:
clk_disable_unprepare(qce->iface);
err_clks_core:
clk_disable_unprepare(qce->core);
err_mem_path_disable:
icc_set_bw(qce->mem_path, 0, 0);
return ret;
}
static void qce_crypto_remove(struct platform_device *pdev)
{
struct qce_device *qce = platform_get_drvdata(pdev);
tasklet_kill(&qce->done_tasklet);
qce_unregister_algs(qce);
qce_dma_release(&qce->dma);
clk_disable_unprepare(qce->bus);
clk_disable_unprepare(qce->iface);
clk_disable_unprepare(qce->core);
return devm_qce_register_algs(qce);
}
static const struct of_device_id qce_crypto_of_match[] = {
@ -299,7 +259,6 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
.remove = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,

View File

@ -6,13 +6,16 @@
#ifndef _CORE_H_
#define _CORE_H_
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include "dma.h"
/**
* struct qce_device - crypto engine device structure
* @queue: crypto request queue
* @lock: the lock protects queue and req
* @done_tasklet: done tasklet object
* @done_work: workqueue context
* @req: current active request
* @result: result of current transform
* @base: virtual IO base
@ -28,8 +31,8 @@
*/
struct qce_device {
struct crypto_queue queue;
spinlock_t lock;
struct tasklet_struct done_tasklet;
struct mutex lock;
struct work_struct done_work;
struct crypto_async_request *req;
int result;
void __iomem *base;

View File

@ -3,12 +3,22 @@
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
static void qce_dma_release(void *data)
{
struct qce_dma_data *dma = data;
dma_release_channel(dma->txchan);
dma_release_channel(dma->rxchan);
kfree(dma->result_buf);
}
int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
@ -31,7 +41,8 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
return 0;
return devm_add_action_or_reset(dev, qce_dma_release, dma);
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
@ -39,13 +50,6 @@ error_rx:
return ret;
}
void qce_dma_release(struct qce_dma_data *dma)
{
dma_release_channel(dma->txchan);
dma_release_channel(dma->rxchan);
kfree(dma->result_buf);
}
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)

View File

@ -34,8 +34,7 @@ struct qce_dma_data {
void *ignore_buf;
};
int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
void qce_dma_release(struct qce_dma_data *dma);
int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma);
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
int in_ents, struct scatterlist *sg_out, int out_ents,
dma_async_tx_callback cb, void *cb_param);

View File

@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
base->cra_priority = 300;
base->cra_priority = 175;
base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;

View File

@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->encrypt = qce_skcipher_encrypt;
alg->decrypt = qce_skcipher_decrypt;
alg->base.cra_priority = 300;
alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;

View File

@ -1750,10 +1750,13 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
int ret;
ret = tegra_cmac_init(req);
if (ret)
return ret;
tegra_cmac_init(req);
rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}

View File

@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
tegra_sha_init(req);
rctx->task |= SHA_UPDATE | SHA_FINAL;
ret = tegra_sha_init(req);
if (ret)
return ret;
rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}

View File

@ -158,12 +158,10 @@
64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
*/
/* A slow generic version of gf_mul, implemented for lle and bbe
/* A slow generic version of gf_mul, implemented for lle
* It multiplies a and b and puts the result in a */
void gf128mul_lle(be128 *a, const be128 *b);
void gf128mul_bbe(be128 *a, const be128 *b);
/*
* The following functions multiply a field element by x in
* the polynomial field representation. They use 64-bit word operations
@ -224,9 +222,7 @@ struct gf128mul_4k {
};
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{

View File

@ -12,20 +12,6 @@
#include <crypto/hash.h>
struct ahash_request;
struct scatterlist;
struct crypto_hash_walk {
char *data;
unsigned int offset;
unsigned int flags;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
};
struct ahash_instance {
void (*free)(struct ahash_instance *inst);
@ -57,15 +43,6 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
{
return !(walk->entrylen | walk->total);
}
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);

View File

@ -11,7 +11,6 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/skcipher.h>
#include <linux/list.h>
#include <linux/types.h>
/*
@ -58,12 +57,6 @@ struct crypto_lskcipher_spawn {
struct skcipher_walk {
union {
struct {
struct page *page;
unsigned long offset;
} phys;
struct {
u8 *page;
void *addr;
} virt;
} src, dst;
@ -74,8 +67,6 @@ struct skcipher_walk {
struct scatter_walk out;
unsigned int total;
struct list_head buffers;
u8 *page;
u8 *buffer;
u8 *oiv;
@ -209,13 +200,10 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err);
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req,
bool atomic);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
void skcipher_walk_complete(struct skcipher_walk *walk, int err);
static inline void skcipher_walk_abort(struct skcipher_walk *walk)
{

View File

@ -97,6 +97,8 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
#define QM_DEV_ALG_MAX_LEN 256
/* uacce mode of the driver */
#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
#define UACCE_MODE_SVA 1 /* use uacce sva mode */
@ -156,6 +158,7 @@ enum qm_cap_bits {
QM_SUPPORT_MB_COMMAND,
QM_SUPPORT_SVA_PREFETCH,
QM_SUPPORT_RPM,
QM_SUPPORT_DAE,
};
struct qm_dev_alg {
@ -266,6 +269,8 @@ struct hisi_qm_err_ini {
void (*show_last_dfx_regs)(struct hisi_qm *qm);
void (*err_info_init)(struct hisi_qm *qm);
enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
bool (*dev_is_abnormal)(struct hisi_qm *qm);
int (*set_priv_status)(struct hisi_qm *qm);
};
struct hisi_qm_cap_info {

View File

@ -970,7 +970,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
pinst = kobj2pinst(kobj);
pentry = attr2pentry(attr);
if (pentry->show)
if (pentry->store)
ret = pentry->store(pinst, attr, buf, count);
return ret;

View File

@ -697,7 +697,7 @@ static int __init libaesgcm_init(void)
u8 tagbuf[AES_BLOCK_SIZE];
int plen = aesgcm_tv[i].plen;
struct aesgcm_ctx ctx;
u8 buf[sizeof(ptext12)];
static u8 buf[sizeof(ptext12)];
if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
aesgcm_tv[i].clen - plen)) {

View File

@ -225,44 +225,6 @@ void gf128mul_lle(be128 *r, const be128 *b)
}
EXPORT_SYMBOL(gf128mul_lle);
void gf128mul_bbe(be128 *r, const be128 *b)
{
be128 p[8];
int i;
p[0] = *r;
for (i = 0; i < 7; ++i)
gf128mul_x_bbe(&p[i + 1], &p[i]);
memset(r, 0, sizeof(*r));
for (i = 0;;) {
u8 ch = ((u8 *)b)[i];
if (ch & 0x80)
be128_xor(r, r, &p[7]);
if (ch & 0x40)
be128_xor(r, r, &p[6]);
if (ch & 0x20)
be128_xor(r, r, &p[5]);
if (ch & 0x10)
be128_xor(r, r, &p[4]);
if (ch & 0x08)
be128_xor(r, r, &p[3]);
if (ch & 0x04)
be128_xor(r, r, &p[2]);
if (ch & 0x02)
be128_xor(r, r, &p[1]);
if (ch & 0x01)
be128_xor(r, r, &p[0]);
if (++i >= 16)
break;
gf128mul_x8_bbe(r);
}
}
EXPORT_SYMBOL(gf128mul_bbe);
/* This version uses 64k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(2^128). If we consider a GF(2^128) value in
@ -380,28 +342,6 @@ out:
}
EXPORT_SYMBOL(gf128mul_init_4k_lle);
struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
{
struct gf128mul_4k *t;
int j, k;
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
goto out;
t->t[1] = *g;
for (j = 1; j <= 64; j <<= 1)
gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
for (j = 2; j < 256; j += j)
for (k = 1; k < j; ++k)
be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
out:
return t;
}
EXPORT_SYMBOL(gf128mul_init_4k_bbe);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
@ -417,20 +357,5 @@ void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
}
EXPORT_SYMBOL(gf128mul_4k_lle);
void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
be128 r[1];
int i = 0;
*r = t->t[ap[0]];
while (++i < 16) {
gf128mul_x8_bbe(r);
be128_xor(r, r, &t->t[ap[i]]);
}
*a = *r;
}
EXPORT_SYMBOL(gf128mul_4k_bbe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");

View File

@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
*/
rht_assign_locked(bkt, obj);
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
return NULL;
}
@ -624,6 +620,12 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
data = ERR_CAST(new_tbl);
rht_unlock(tbl, bkt, flags);
if (PTR_ERR(data) == -ENOENT && !new_tbl) {
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
}
}
} while (!IS_ERR_OR_NULL(new_tbl));