mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.3: API: - the AEAD interface transition is now complete. - add top-level skcipher interface. Drivers: - x86-64 acceleration for chacha20/poly1305. - add sunxi-ss Allwinner Security System crypto accelerator. - add RSA algorithm to qat driver. - add SRIOV support to qat driver. - add LS1021A support to caam. - add i.MX6 support to caam" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (163 commits) crypto: algif_aead - fix for multiple operations on AF_ALG sockets crypto: qat - enable legacy VFs MPI: Fix mpi_read_buffer crypto: qat - silence a static checker warning crypto: vmx - Fixing opcode issue crypto: caam - Use the preferred style for memory allocations crypto: caam - Propagate the real error code in caam_probe crypto: caam - Fix the error handling in caam_probe crypto: caam - fix writing to JQCR_MS when using service interface crypto: hash - Add AHASH_REQUEST_ON_STACK crypto: testmgr - Use new skcipher interface crypto: skcipher - Add top-level skcipher interface crypto: cmac - allow usage in FIPS mode crypto: sahara - Use dmam_alloc_coherent crypto: caam - Add support for LS1021A crypto: qat - Don't move data inside output buffer crypto: vmx - Fixing GHASH Key issue on little endian crypto: vmx - Fixing AES-CTR counter bug crypto: null - Add missing Kconfig tristate for NULL2 crypto: nx - Add forward declaration for struct crypto_aead ...
This commit is contained in:
commit
d4c90396ed
@ -585,7 +585,7 @@ kernel crypto API | IPSEC Layer
|
||||
+-----------+ |
|
||||
| | (1)
|
||||
| aead | <----------------------------------- esp_output
|
||||
| (seqniv) | ---+
|
||||
| (seqiv) | ---+
|
||||
+-----------+ |
|
||||
| (2)
|
||||
+-----------+ |
|
||||
@ -1101,7 +1101,7 @@ kernel crypto API | Caller
|
||||
</para>
|
||||
|
||||
<para>
|
||||
[1] http://www.chronox.de/libkcapi.html
|
||||
[1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
|
||||
</para>
|
||||
|
||||
</sect1>
|
||||
@ -1661,7 +1661,7 @@ read(opfd, out, outlen);
|
||||
</para>
|
||||
|
||||
<para>
|
||||
[1] http://www.chronox.de/libkcapi.html
|
||||
[1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
|
||||
</para>
|
||||
|
||||
</sect1>
|
||||
@ -1687,7 +1687,7 @@ read(opfd, out, outlen);
|
||||
!Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
|
||||
!Finclude/linux/crypto.h crypto_alg
|
||||
!Finclude/linux/crypto.h ablkcipher_alg
|
||||
!Finclude/linux/crypto.h aead_alg
|
||||
!Finclude/crypto/aead.h aead_alg
|
||||
!Finclude/linux/crypto.h blkcipher_alg
|
||||
!Finclude/linux/crypto.h cipher_alg
|
||||
!Finclude/crypto/rng.h rng_alg
|
||||
|
@ -106,6 +106,18 @@ PROPERTIES
|
||||
to the interrupt parent to which the child domain
|
||||
is being mapped.
|
||||
|
||||
- clocks
|
||||
Usage: required if SEC 4.0 requires explicit enablement of clocks
|
||||
Value type: <prop_encoded-array>
|
||||
Definition: A list of phandle and clock specifier pairs describing
|
||||
the clocks required for enabling and disabling SEC 4.0.
|
||||
|
||||
- clock-names
|
||||
Usage: required if SEC 4.0 requires explicit enablement of clocks
|
||||
Value type: <string>
|
||||
Definition: A list of clock name strings in the same order as the
|
||||
clocks property.
|
||||
|
||||
Note: All other standard properties (see the ePAPR) are allowed
|
||||
but are optional.
|
||||
|
||||
@ -120,6 +132,11 @@ EXAMPLE
|
||||
ranges = <0 0x300000 0x10000>;
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <92 2>;
|
||||
clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
|
||||
<&clks IMX6QDL_CLK_CAAM_ACLK>,
|
||||
<&clks IMX6QDL_CLK_CAAM_IPG>,
|
||||
<&clks IMX6QDL_CLK_EIM_SLOW>;
|
||||
clock-names = "mem", "aclk", "ipg", "emi_slow";
|
||||
};
|
||||
|
||||
=====================================================================
|
||||
|
23
Documentation/devicetree/bindings/crypto/sun4i-ss.txt
Normal file
23
Documentation/devicetree/bindings/crypto/sun4i-ss.txt
Normal file
@ -0,0 +1,23 @@
|
||||
* Allwinner Security System found on A20 SoC
|
||||
|
||||
Required properties:
|
||||
- compatible : Should be "allwinner,sun4i-a10-crypto".
|
||||
- reg: Should contain the Security System register location and length.
|
||||
- interrupts: Should contain the IRQ line for the Security System.
|
||||
- clocks : List of clock specifiers, corresponding to ahb and ss.
|
||||
- clock-names : Name of the functional clock, should be
|
||||
* "ahb" : AHB gating clock
|
||||
* "mod" : SS controller clock
|
||||
|
||||
Optional properties:
|
||||
- resets : phandle + reset specifier pair
|
||||
- reset-names : must contain "ahb"
|
||||
|
||||
Example:
|
||||
crypto: crypto-engine@01c15000 {
|
||||
compatible = "allwinner,sun4i-a10-crypto";
|
||||
reg = <0x01c15000 0x1000>;
|
||||
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&ahb_gates 5>, <&ss_clk>;
|
||||
clock-names = "ahb", "mod";
|
||||
};
|
24
MAINTAINERS
24
MAINTAINERS
@ -556,6 +556,12 @@ S: Maintained
|
||||
F: Documentation/i2c/busses/i2c-ali1563
|
||||
F: drivers/i2c/busses/i2c-ali1563.c
|
||||
|
||||
ALLWINNER SECURITY SYSTEM
|
||||
M: Corentin Labbe <clabbe.montjoie@gmail.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/sunxi-ss/
|
||||
|
||||
ALPHA PORT
|
||||
M: Richard Henderson <rth@twiddle.net>
|
||||
M: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
|
||||
@ -5078,9 +5084,21 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
|
||||
S: Maintained
|
||||
F: arch/ia64/
|
||||
|
||||
IBM Power VMX Cryptographic instructions
|
||||
M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
|
||||
M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/crypto/vmx/Makefile
|
||||
F: drivers/crypto/vmx/Kconfig
|
||||
F: drivers/crypto/vmx/vmx.c
|
||||
F: drivers/crypto/vmx/aes*
|
||||
F: drivers/crypto/vmx/ghash*
|
||||
F: drivers/crypto/vmx/ppc-xlate.pl
|
||||
|
||||
IBM Power in-Nest Crypto Acceleration
|
||||
M: Marcelo Henrique Cerri <mhcerri@linux.vnet.ibm.com>
|
||||
M: Fionnuala Gunter <fin@linux.vnet.ibm.com>
|
||||
M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
|
||||
M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/crypto/nx/Makefile
|
||||
@ -5092,7 +5110,7 @@ F: drivers/crypto/nx/nx_csbcpb.h
|
||||
F: drivers/crypto/nx/nx_debugfs.h
|
||||
|
||||
IBM Power 842 compression accelerator
|
||||
M: Dan Streetman <ddstreet@us.ibm.com>
|
||||
M: Dan Streetman <ddstreet@ieee.org>
|
||||
S: Supported
|
||||
F: drivers/crypto/nx/Makefile
|
||||
F: drivers/crypto/nx/Kconfig
|
||||
|
@ -836,10 +836,31 @@ aips-bus@02100000 { /* AIPS2 */
|
||||
reg = <0x02100000 0x100000>;
|
||||
ranges;
|
||||
|
||||
caam@02100000 {
|
||||
reg = <0x02100000 0x40000>;
|
||||
interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 106 IRQ_TYPE_LEVEL_HIGH>;
|
||||
crypto: caam@2100000 {
|
||||
compatible = "fsl,sec-v4.0";
|
||||
fsl,sec-era = <4>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
reg = <0x2100000 0x10000>;
|
||||
ranges = <0 0x2100000 0x10000>;
|
||||
interrupt-parent = <&intc>;
|
||||
clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
|
||||
<&clks IMX6QDL_CLK_CAAM_ACLK>,
|
||||
<&clks IMX6QDL_CLK_CAAM_IPG>,
|
||||
<&clks IMX6QDL_CLK_EIM_SLOW>;
|
||||
clock-names = "mem", "aclk", "ipg", "emi_slow";
|
||||
|
||||
sec_jr0: jr0@1000 {
|
||||
compatible = "fsl,sec-v4.0-job-ring";
|
||||
reg = <0x1000 0x1000>;
|
||||
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
sec_jr1: jr1@2000 {
|
||||
compatible = "fsl,sec-v4.0-job-ring";
|
||||
reg = <0x2000 0x1000>;
|
||||
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
aipstz@0217c000 { /* AIPSTZ2 */
|
||||
|
@ -738,6 +738,33 @@ aips2: aips-bus@02100000 {
|
||||
reg = <0x02100000 0x100000>;
|
||||
ranges;
|
||||
|
||||
crypto: caam@2100000 {
|
||||
compatible = "fsl,sec-v4.0";
|
||||
fsl,sec-era = <4>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
reg = <0x2100000 0x10000>;
|
||||
ranges = <0 0x2100000 0x10000>;
|
||||
interrupt-parent = <&intc>;
|
||||
clocks = <&clks IMX6SX_CLK_CAAM_MEM>,
|
||||
<&clks IMX6SX_CLK_CAAM_ACLK>,
|
||||
<&clks IMX6SX_CLK_CAAM_IPG>,
|
||||
<&clks IMX6SX_CLK_EIM_SLOW>;
|
||||
clock-names = "mem", "aclk", "ipg", "emi_slow";
|
||||
|
||||
sec_jr0: jr0@1000 {
|
||||
compatible = "fsl,sec-v4.0-job-ring";
|
||||
reg = <0x1000 0x1000>;
|
||||
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
sec_jr1: jr1@2000 {
|
||||
compatible = "fsl,sec-v4.0-job-ring";
|
||||
reg = <0x2000 0x1000>;
|
||||
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
usbotg1: usb@02184000 {
|
||||
compatible = "fsl,imx6sx-usb", "fsl,imx27-usb";
|
||||
reg = <0x02184000 0x200>;
|
||||
|
@ -678,6 +678,14 @@ ohci0: usb@01c14400 {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
crypto: crypto-engine@01c15000 {
|
||||
compatible = "allwinner,sun4i-a10-crypto";
|
||||
reg = <0x01c15000 0x1000>;
|
||||
interrupts = <86>;
|
||||
clocks = <&ahb_gates 5>, <&ss_clk>;
|
||||
clock-names = "ahb", "mod";
|
||||
};
|
||||
|
||||
spi2: spi@01c17000 {
|
||||
compatible = "allwinner,sun4i-a10-spi";
|
||||
reg = <0x01c17000 0x1000>;
|
||||
|
@ -367,6 +367,14 @@ mmc3_clk: clk@01c20094 {
|
||||
"mmc3_sample";
|
||||
};
|
||||
|
||||
ss_clk: clk@01c2009c {
|
||||
#clock-cells = <0>;
|
||||
compatible = "allwinner,sun4i-a10-mod0-clk";
|
||||
reg = <0x01c2009c 0x4>;
|
||||
clocks = <&osc24M>, <&pll6 0>;
|
||||
clock-output-names = "ss";
|
||||
};
|
||||
|
||||
spi0_clk: clk@01c200a0 {
|
||||
#clock-cells = <0>;
|
||||
compatible = "allwinner,sun4i-a10-mod0-clk";
|
||||
@ -894,6 +902,16 @@ gmac: ethernet@01c30000 {
|
||||
#size-cells = <0>;
|
||||
};
|
||||
|
||||
crypto: crypto-engine@01c15000 {
|
||||
compatible = "allwinner,sun4i-a10-crypto";
|
||||
reg = <0x01c15000 0x1000>;
|
||||
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&ahb1_gates 5>, <&ss_clk>;
|
||||
clock-names = "ahb", "mod";
|
||||
resets = <&ahb1_rst 5>;
|
||||
reset-names = "ahb";
|
||||
};
|
||||
|
||||
timer@01c60000 {
|
||||
compatible = "allwinner,sun6i-a31-hstimer",
|
||||
"allwinner,sun7i-a20-hstimer";
|
||||
|
@ -754,6 +754,14 @@ ohci0: usb@01c14400 {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
crypto: crypto-engine@01c15000 {
|
||||
compatible = "allwinner,sun4i-a10-crypto";
|
||||
reg = <0x01c15000 0x1000>;
|
||||
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&ahb_gates 5>, <&ss_clk>;
|
||||
clock-names = "ahb", "mod";
|
||||
};
|
||||
|
||||
spi2: spi@01c17000 {
|
||||
compatible = "allwinner,sun4i-a10-spi";
|
||||
reg = <0x01c17000 0x1000>;
|
||||
|
@ -354,8 +354,7 @@ CONFIG_PROVE_LOCKING=y
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_ARM_UNWIND is not set
|
||||
CONFIG_SECURITYFS=y
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
CONFIG_CRYPTO_DEV_FSL_CAAM=y
|
||||
CONFIG_CRC_CCITT=m
|
||||
CONFIG_CRC_T10DIF=y
|
||||
CONFIG_CRC7=m
|
||||
|
2
arch/arm/crypto/.gitignore
vendored
2
arch/arm/crypto/.gitignore
vendored
@ -1 +1,3 @@
|
||||
aesbs-core.S
|
||||
sha256-core.S
|
||||
sha512-core.S
|
||||
|
@ -124,7 +124,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||
|
||||
ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, &macp, ctx->key_enc,
|
||||
num_rounds(ctx));
|
||||
scatterwalk_start(&walk, req->assoc);
|
||||
scatterwalk_start(&walk, req->src);
|
||||
|
||||
do {
|
||||
u32 n = scatterwalk_clamp(&walk, len);
|
||||
@ -151,6 +151,10 @@ static int ccm_encrypt(struct aead_request *req)
|
||||
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct blkcipher_desc desc = { .info = req->iv };
|
||||
struct blkcipher_walk walk;
|
||||
struct scatterlist srcbuf[2];
|
||||
struct scatterlist dstbuf[2];
|
||||
struct scatterlist *src;
|
||||
struct scatterlist *dst;
|
||||
u8 __aligned(8) mac[AES_BLOCK_SIZE];
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
u32 len = req->cryptlen;
|
||||
@ -168,7 +172,12 @@ static int ccm_encrypt(struct aead_request *req)
|
||||
/* preserve the original iv for the final round */
|
||||
memcpy(buf, req->iv, AES_BLOCK_SIZE);
|
||||
|
||||
blkcipher_walk_init(&walk, req->dst, req->src, len);
|
||||
src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
|
||||
dst = src;
|
||||
if (req->src != req->dst)
|
||||
dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, len);
|
||||
err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
|
||||
AES_BLOCK_SIZE);
|
||||
|
||||
@ -194,7 +203,7 @@ static int ccm_encrypt(struct aead_request *req)
|
||||
return err;
|
||||
|
||||
/* copy authtag to end of dst */
|
||||
scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
|
||||
scatterwalk_map_and_copy(mac, dst, req->cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
|
||||
return 0;
|
||||
@ -207,6 +216,10 @@ static int ccm_decrypt(struct aead_request *req)
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
struct blkcipher_desc desc = { .info = req->iv };
|
||||
struct blkcipher_walk walk;
|
||||
struct scatterlist srcbuf[2];
|
||||
struct scatterlist dstbuf[2];
|
||||
struct scatterlist *src;
|
||||
struct scatterlist *dst;
|
||||
u8 __aligned(8) mac[AES_BLOCK_SIZE];
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
u32 len = req->cryptlen - authsize;
|
||||
@ -224,7 +237,12 @@ static int ccm_decrypt(struct aead_request *req)
|
||||
/* preserve the original iv for the final round */
|
||||
memcpy(buf, req->iv, AES_BLOCK_SIZE);
|
||||
|
||||
blkcipher_walk_init(&walk, req->dst, req->src, len);
|
||||
src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
|
||||
dst = src;
|
||||
if (req->src != req->dst)
|
||||
dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, len);
|
||||
err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
|
||||
AES_BLOCK_SIZE);
|
||||
|
||||
@ -250,44 +268,42 @@ static int ccm_decrypt(struct aead_request *req)
|
||||
return err;
|
||||
|
||||
/* compare calculated auth tag with the stored one */
|
||||
scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
|
||||
scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize,
|
||||
authsize, 0);
|
||||
|
||||
if (memcmp(mac, buf, authsize))
|
||||
if (crypto_memneq(mac, buf, authsize))
|
||||
return -EBADMSG;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg ccm_aes_alg = {
|
||||
.cra_name = "ccm(aes)",
|
||||
.cra_driver_name = "ccm-aes-ce",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_aead = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = ccm_setkey,
|
||||
.setauthsize = ccm_setauthsize,
|
||||
.encrypt = ccm_encrypt,
|
||||
.decrypt = ccm_decrypt,
|
||||
}
|
||||
static struct aead_alg ccm_aes_alg = {
|
||||
.base = {
|
||||
.cra_name = "ccm(aes)",
|
||||
.cra_driver_name = "ccm-aes-ce",
|
||||
.cra_priority = 300,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = ccm_setkey,
|
||||
.setauthsize = ccm_setauthsize,
|
||||
.encrypt = ccm_encrypt,
|
||||
.decrypt = ccm_decrypt,
|
||||
};
|
||||
|
||||
static int __init aes_mod_init(void)
|
||||
{
|
||||
if (!(elf_hwcap & HWCAP_AES))
|
||||
return -ENODEV;
|
||||
return crypto_register_alg(&ccm_aes_alg);
|
||||
return crypto_register_aead(&ccm_aes_alg);
|
||||
}
|
||||
|
||||
static void __exit aes_mod_exit(void)
|
||||
{
|
||||
crypto_unregister_alg(&ccm_aes_alg);
|
||||
crypto_unregister_aead(&ccm_aes_alg);
|
||||
}
|
||||
|
||||
module_init(aes_mod_init);
|
||||
|
@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
|
||||
|
||||
extern void enable_kernel_fp(void);
|
||||
extern void enable_kernel_altivec(void);
|
||||
extern void enable_kernel_vsx(void);
|
||||
extern int emulate_altivec(struct pt_regs *);
|
||||
extern void __giveup_vsx(struct task_struct *);
|
||||
extern void giveup_vsx(struct task_struct *);
|
||||
|
@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
#if 0
|
||||
/* not currently used, but some crazy RAID module might want to later */
|
||||
void enable_kernel_vsx(void)
|
||||
{
|
||||
WARN_ON(preemptible());
|
||||
@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
EXPORT_SYMBOL(enable_kernel_vsx);
|
||||
#endif
|
||||
|
||||
void giveup_vsx(struct task_struct *tsk)
|
||||
{
|
||||
|
@ -20,6 +20,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
|
||||
obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
|
||||
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
|
||||
@ -30,6 +31,7 @@ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
|
||||
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
|
||||
obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
|
||||
obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
|
||||
|
||||
# These modules require assembler to support AVX.
|
||||
ifeq ($(avx_supported),yes)
|
||||
@ -60,6 +62,7 @@ blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
|
||||
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
|
||||
twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
|
||||
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
|
||||
chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o
|
||||
serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
|
||||
|
||||
ifeq ($(avx_supported),yes)
|
||||
@ -75,6 +78,7 @@ endif
|
||||
|
||||
ifeq ($(avx2_supported),yes)
|
||||
camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o
|
||||
chacha20-x86_64-y += chacha20-avx2-x86_64.o
|
||||
serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
|
||||
endif
|
||||
|
||||
@ -82,8 +86,10 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
|
||||
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
|
||||
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
|
||||
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
|
||||
poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o
|
||||
ifeq ($(avx2_supported),yes)
|
||||
sha1-ssse3-y += sha1_avx2_x86_64_asm.o
|
||||
poly1305-x86_64-y += poly1305-avx2-x86_64.o
|
||||
endif
|
||||
crc32c-intel-y := crc32c-intel_glue.o
|
||||
crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
|
||||
|
@ -803,10 +803,7 @@ static int rfc4106_init(struct crypto_aead *aead)
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
*ctx = cryptd_tfm;
|
||||
crypto_aead_set_reqsize(
|
||||
aead,
|
||||
sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -955,8 +952,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
|
||||
|
||||
/* Assuming we are supporting rfc4106 64-bit extended */
|
||||
/* sequence numbers We need to have the AAD length equal */
|
||||
/* to 8 or 12 bytes */
|
||||
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
|
||||
/* to 16 or 20 bytes */
|
||||
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
|
||||
return -EINVAL;
|
||||
|
||||
/* IV below built */
|
||||
@ -992,9 +989,9 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
|
||||
}
|
||||
|
||||
kernel_fpu_begin();
|
||||
aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
|
||||
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
|
||||
+ ((unsigned long)req->cryptlen), auth_tag_len);
|
||||
aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
|
||||
ctx->hash_subkey, assoc, req->assoclen - 8,
|
||||
dst + req->cryptlen, auth_tag_len);
|
||||
kernel_fpu_end();
|
||||
|
||||
/* The authTag (aka the Integrity Check Value) needs to be written
|
||||
@ -1033,12 +1030,12 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
|
||||
struct scatter_walk dst_sg_walk;
|
||||
unsigned int i;
|
||||
|
||||
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
|
||||
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
|
||||
return -EINVAL;
|
||||
|
||||
/* Assuming we are supporting rfc4106 64-bit extended */
|
||||
/* sequence numbers We need to have the AAD length */
|
||||
/* equal to 8 or 12 bytes */
|
||||
/* equal to 16 or 20 bytes */
|
||||
|
||||
tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
|
||||
/* IV below built */
|
||||
@ -1075,8 +1072,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
|
||||
|
||||
kernel_fpu_begin();
|
||||
aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
|
||||
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
|
||||
authTag, auth_tag_len);
|
||||
ctx->hash_subkey, assoc, req->assoclen - 8,
|
||||
authTag, auth_tag_len);
|
||||
kernel_fpu_end();
|
||||
|
||||
/* Compare generated tag with passed in tag. */
|
||||
@ -1105,19 +1102,12 @@ static int rfc4106_encrypt(struct aead_request *req)
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
|
||||
aead_request_set_tfm(subreq, irq_fpu_usable() ?
|
||||
cryptd_aead_child(cryptd_tfm) :
|
||||
&cryptd_tfm->base);
|
||||
aead_request_set_tfm(req, irq_fpu_usable() ?
|
||||
cryptd_aead_child(cryptd_tfm) :
|
||||
&cryptd_tfm->base);
|
||||
|
||||
aead_request_set_callback(subreq, req->base.flags,
|
||||
req->base.complete, req->base.data);
|
||||
aead_request_set_crypt(subreq, req->src, req->dst,
|
||||
req->cryptlen, req->iv);
|
||||
aead_request_set_ad(subreq, req->assoclen);
|
||||
|
||||
return crypto_aead_encrypt(subreq);
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
|
||||
static int rfc4106_decrypt(struct aead_request *req)
|
||||
@ -1125,19 +1115,12 @@ static int rfc4106_decrypt(struct aead_request *req)
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
|
||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
|
||||
aead_request_set_tfm(subreq, irq_fpu_usable() ?
|
||||
cryptd_aead_child(cryptd_tfm) :
|
||||
&cryptd_tfm->base);
|
||||
aead_request_set_tfm(req, irq_fpu_usable() ?
|
||||
cryptd_aead_child(cryptd_tfm) :
|
||||
&cryptd_tfm->base);
|
||||
|
||||
aead_request_set_callback(subreq, req->base.flags,
|
||||
req->base.complete, req->base.data);
|
||||
aead_request_set_crypt(subreq, req->src, req->dst,
|
||||
req->cryptlen, req->iv);
|
||||
aead_request_set_ad(subreq, req->assoclen);
|
||||
|
||||
return crypto_aead_decrypt(subreq);
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
443
arch/x86/crypto/chacha20-avx2-x86_64.S
Normal file
443
arch/x86/crypto/chacha20-avx2-x86_64.S
Normal file
@ -0,0 +1,443 @@
|
||||
/*
|
||||
* ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions
|
||||
*
|
||||
* Copyright (C) 2015 Martin Willi
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.data
|
||||
.align 32
|
||||
|
||||
ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003
|
||||
.octa 0x0e0d0c0f0a09080b0605040702010003
|
||||
ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
|
||||
.octa 0x0d0c0f0e09080b0a0504070601000302
|
||||
CTRINC: .octa 0x00000003000000020000000100000000
|
||||
.octa 0x00000007000000060000000500000004
|
||||
|
||||
.text
|
||||
|
||||
ENTRY(chacha20_8block_xor_avx2)
|
||||
# %rdi: Input state matrix, s
|
||||
# %rsi: 8 data blocks output, o
|
||||
# %rdx: 8 data blocks input, i
|
||||
|
||||
# This function encrypts eight consecutive ChaCha20 blocks by loading
|
||||
# the state matrix in AVX registers eight times. As we need some
|
||||
# scratch registers, we save the first four registers on the stack. The
|
||||
# algorithm performs each operation on the corresponding word of each
|
||||
# state matrix, hence requires no word shuffling. For final XORing step
|
||||
# we transpose the matrix by interleaving 32-, 64- and then 128-bit
|
||||
# words, which allows us to do XOR in AVX registers. 8/16-bit word
|
||||
# rotation is done with the slightly better performing byte shuffling,
|
||||
# 7/12-bit word rotation uses traditional shift+OR.
|
||||
|
||||
vzeroupper
|
||||
# 4 * 32 byte stack, 32-byte aligned
|
||||
mov %rsp, %r8
|
||||
and $~31, %rsp
|
||||
sub $0x80, %rsp
|
||||
|
||||
# x0..15[0-7] = s[0..15]
|
||||
vpbroadcastd 0x00(%rdi),%ymm0
|
||||
vpbroadcastd 0x04(%rdi),%ymm1
|
||||
vpbroadcastd 0x08(%rdi),%ymm2
|
||||
vpbroadcastd 0x0c(%rdi),%ymm3
|
||||
vpbroadcastd 0x10(%rdi),%ymm4
|
||||
vpbroadcastd 0x14(%rdi),%ymm5
|
||||
vpbroadcastd 0x18(%rdi),%ymm6
|
||||
vpbroadcastd 0x1c(%rdi),%ymm7
|
||||
vpbroadcastd 0x20(%rdi),%ymm8
|
||||
vpbroadcastd 0x24(%rdi),%ymm9
|
||||
vpbroadcastd 0x28(%rdi),%ymm10
|
||||
vpbroadcastd 0x2c(%rdi),%ymm11
|
||||
vpbroadcastd 0x30(%rdi),%ymm12
|
||||
vpbroadcastd 0x34(%rdi),%ymm13
|
||||
vpbroadcastd 0x38(%rdi),%ymm14
|
||||
vpbroadcastd 0x3c(%rdi),%ymm15
|
||||
# x0..3 on stack
|
||||
vmovdqa %ymm0,0x00(%rsp)
|
||||
vmovdqa %ymm1,0x20(%rsp)
|
||||
vmovdqa %ymm2,0x40(%rsp)
|
||||
vmovdqa %ymm3,0x60(%rsp)
|
||||
|
||||
vmovdqa CTRINC(%rip),%ymm1
|
||||
vmovdqa ROT8(%rip),%ymm2
|
||||
vmovdqa ROT16(%rip),%ymm3
|
||||
|
||||
# x12 += counter values 0-3
|
||||
vpaddd %ymm1,%ymm12,%ymm12
|
||||
|
||||
mov $10,%ecx
|
||||
|
||||
.Ldoubleround8:
|
||||
# x0 += x4, x12 = rotl32(x12 ^ x0, 16)
|
||||
vpaddd 0x00(%rsp),%ymm4,%ymm0
|
||||
vmovdqa %ymm0,0x00(%rsp)
|
||||
vpxor %ymm0,%ymm12,%ymm12
|
||||
vpshufb %ymm3,%ymm12,%ymm12
|
||||
# x1 += x5, x13 = rotl32(x13 ^ x1, 16)
|
||||
vpaddd 0x20(%rsp),%ymm5,%ymm0
|
||||
vmovdqa %ymm0,0x20(%rsp)
|
||||
vpxor %ymm0,%ymm13,%ymm13
|
||||
vpshufb %ymm3,%ymm13,%ymm13
|
||||
# x2 += x6, x14 = rotl32(x14 ^ x2, 16)
|
||||
vpaddd 0x40(%rsp),%ymm6,%ymm0
|
||||
vmovdqa %ymm0,0x40(%rsp)
|
||||
vpxor %ymm0,%ymm14,%ymm14
|
||||
vpshufb %ymm3,%ymm14,%ymm14
|
||||
# x3 += x7, x15 = rotl32(x15 ^ x3, 16)
|
||||
vpaddd 0x60(%rsp),%ymm7,%ymm0
|
||||
vmovdqa %ymm0,0x60(%rsp)
|
||||
vpxor %ymm0,%ymm15,%ymm15
|
||||
vpshufb %ymm3,%ymm15,%ymm15
|
||||
|
||||
# x8 += x12, x4 = rotl32(x4 ^ x8, 12)
|
||||
vpaddd %ymm12,%ymm8,%ymm8
|
||||
vpxor %ymm8,%ymm4,%ymm4
|
||||
vpslld $12,%ymm4,%ymm0
|
||||
vpsrld $20,%ymm4,%ymm4
|
||||
vpor %ymm0,%ymm4,%ymm4
|
||||
# x9 += x13, x5 = rotl32(x5 ^ x9, 12)
|
||||
vpaddd %ymm13,%ymm9,%ymm9
|
||||
vpxor %ymm9,%ymm5,%ymm5
|
||||
vpslld $12,%ymm5,%ymm0
|
||||
vpsrld $20,%ymm5,%ymm5
|
||||
vpor %ymm0,%ymm5,%ymm5
|
||||
# x10 += x14, x6 = rotl32(x6 ^ x10, 12)
|
||||
vpaddd %ymm14,%ymm10,%ymm10
|
||||
vpxor %ymm10,%ymm6,%ymm6
|
||||
vpslld $12,%ymm6,%ymm0
|
||||
vpsrld $20,%ymm6,%ymm6
|
||||
vpor %ymm0,%ymm6,%ymm6
|
||||
# x11 += x15, x7 = rotl32(x7 ^ x11, 12)
|
||||
vpaddd %ymm15,%ymm11,%ymm11
|
||||
vpxor %ymm11,%ymm7,%ymm7
|
||||
vpslld $12,%ymm7,%ymm0
|
||||
vpsrld $20,%ymm7,%ymm7
|
||||
vpor %ymm0,%ymm7,%ymm7
|
||||
|
||||
# x0 += x4, x12 = rotl32(x12 ^ x0, 8)
|
||||
vpaddd 0x00(%rsp),%ymm4,%ymm0
|
||||
vmovdqa %ymm0,0x00(%rsp)
|
||||
vpxor %ymm0,%ymm12,%ymm12
|
||||
vpshufb %ymm2,%ymm12,%ymm12
|
||||
# x1 += x5, x13 = rotl32(x13 ^ x1, 8)
|
||||
vpaddd 0x20(%rsp),%ymm5,%ymm0
|
||||
vmovdqa %ymm0,0x20(%rsp)
|
||||
vpxor %ymm0,%ymm13,%ymm13
|
||||
vpshufb %ymm2,%ymm13,%ymm13
|
||||
# x2 += x6, x14 = rotl32(x14 ^ x2, 8)
|
||||
vpaddd 0x40(%rsp),%ymm6,%ymm0
|
||||
vmovdqa %ymm0,0x40(%rsp)
|
||||
vpxor %ymm0,%ymm14,%ymm14
|
||||
vpshufb %ymm2,%ymm14,%ymm14
|
||||
# x3 += x7, x15 = rotl32(x15 ^ x3, 8)
|
||||
vpaddd 0x60(%rsp),%ymm7,%ymm0
|
||||
vmovdqa %ymm0,0x60(%rsp)
|
||||
vpxor %ymm0,%ymm15,%ymm15
|
||||
vpshufb %ymm2,%ymm15,%ymm15
|
||||
|
||||
# x8 += x12, x4 = rotl32(x4 ^ x8, 7)
|
||||
vpaddd %ymm12,%ymm8,%ymm8
|
||||
vpxor %ymm8,%ymm4,%ymm4
|
||||
vpslld $7,%ymm4,%ymm0
|
||||
vpsrld $25,%ymm4,%ymm4
|
||||
vpor %ymm0,%ymm4,%ymm4
|
||||
# x9 += x13, x5 = rotl32(x5 ^ x9, 7)
|
||||
vpaddd %ymm13,%ymm9,%ymm9
|
||||
vpxor %ymm9,%ymm5,%ymm5
|
||||
vpslld $7,%ymm5,%ymm0
|
||||
vpsrld $25,%ymm5,%ymm5
|
||||
vpor %ymm0,%ymm5,%ymm5
|
||||
# x10 += x14, x6 = rotl32(x6 ^ x10, 7)
|
||||
vpaddd %ymm14,%ymm10,%ymm10
|
||||
vpxor %ymm10,%ymm6,%ymm6
|
||||
vpslld $7,%ymm6,%ymm0
|
||||
vpsrld $25,%ymm6,%ymm6
|
||||
vpor %ymm0,%ymm6,%ymm6
|
||||
# x11 += x15, x7 = rotl32(x7 ^ x11, 7)
|
||||
vpaddd %ymm15,%ymm11,%ymm11
|
||||
vpxor %ymm11,%ymm7,%ymm7
|
||||
vpslld $7,%ymm7,%ymm0
|
||||
vpsrld $25,%ymm7,%ymm7
|
||||
vpor %ymm0,%ymm7,%ymm7
|
||||
|
||||
# x0 += x5, x15 = rotl32(x15 ^ x0, 16)
|
||||
vpaddd 0x00(%rsp),%ymm5,%ymm0
|
||||
vmovdqa %ymm0,0x00(%rsp)
|
||||
vpxor %ymm0,%ymm15,%ymm15
|
||||
vpshufb %ymm3,%ymm15,%ymm15
|
||||
# x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0
|
||||
vpaddd 0x20(%rsp),%ymm6,%ymm0
|
||||
vmovdqa %ymm0,0x20(%rsp)
|
||||
vpxor %ymm0,%ymm12,%ymm12
|
||||
vpshufb %ymm3,%ymm12,%ymm12
|
||||
# x2 += x7, x13 = rotl32(x13 ^ x2, 16)
|
||||
vpaddd 0x40(%rsp),%ymm7,%ymm0
|
||||
vmovdqa %ymm0,0x40(%rsp)
|
||||
vpxor %ymm0,%ymm13,%ymm13
|
||||
vpshufb %ymm3,%ymm13,%ymm13
|
||||
# x3 += x4, x14 = rotl32(x14 ^ x3, 16)
|
||||
vpaddd 0x60(%rsp),%ymm4,%ymm0
|
||||
vmovdqa %ymm0,0x60(%rsp)
|
||||
vpxor %ymm0,%ymm14,%ymm14
|
||||
vpshufb %ymm3,%ymm14,%ymm14
|
||||
|
||||
# x10 += x15, x5 = rotl32(x5 ^ x10, 12)
|
||||
vpaddd %ymm15,%ymm10,%ymm10
|
||||
vpxor %ymm10,%ymm5,%ymm5
|
||||
vpslld $12,%ymm5,%ymm0
|
||||
vpsrld $20,%ymm5,%ymm5
|
||||
vpor %ymm0,%ymm5,%ymm5
|
||||
# x11 += x12, x6 = rotl32(x6 ^ x11, 12)
|
||||
vpaddd %ymm12,%ymm11,%ymm11
|
||||
vpxor %ymm11,%ymm6,%ymm6
|
||||
vpslld $12,%ymm6,%ymm0
|
||||
vpsrld $20,%ymm6,%ymm6
|
||||
vpor %ymm0,%ymm6,%ymm6
|
||||
# x8 += x13, x7 = rotl32(x7 ^ x8, 12)
|
||||
vpaddd %ymm13,%ymm8,%ymm8
|
||||
vpxor %ymm8,%ymm7,%ymm7
|
||||
vpslld $12,%ymm7,%ymm0
|
||||
vpsrld $20,%ymm7,%ymm7
|
||||
vpor %ymm0,%ymm7,%ymm7
|
||||
# x9 += x14, x4 = rotl32(x4 ^ x9, 12)
|
||||
vpaddd %ymm14,%ymm9,%ymm9
|
||||
vpxor %ymm9,%ymm4,%ymm4
|
||||
vpslld $12,%ymm4,%ymm0
|
||||
vpsrld $20,%ymm4,%ymm4
|
||||
vpor %ymm0,%ymm4,%ymm4
|
||||
|
||||
# x0 += x5, x15 = rotl32(x15 ^ x0, 8)
|
||||
vpaddd 0x00(%rsp),%ymm5,%ymm0
|
||||
vmovdqa %ymm0,0x00(%rsp)
|
||||
vpxor %ymm0,%ymm15,%ymm15
|
||||
vpshufb %ymm2,%ymm15,%ymm15
|
||||
# x1 += x6, x12 = rotl32(x12 ^ x1, 8)
|
||||
vpaddd 0x20(%rsp),%ymm6,%ymm0
|
||||
vmovdqa %ymm0,0x20(%rsp)
|
||||
vpxor %ymm0,%ymm12,%ymm12
|
||||
vpshufb %ymm2,%ymm12,%ymm12
|
||||
# x2 += x7, x13 = rotl32(x13 ^ x2, 8)
|
||||
vpaddd 0x40(%rsp),%ymm7,%ymm0
|
||||
vmovdqa %ymm0,0x40(%rsp)
|
||||
vpxor %ymm0,%ymm13,%ymm13
|
||||
vpshufb %ymm2,%ymm13,%ymm13
|
||||
# x3 += x4, x14 = rotl32(x14 ^ x3, 8)
|
||||
vpaddd 0x60(%rsp),%ymm4,%ymm0
|
||||
vmovdqa %ymm0,0x60(%rsp)
|
||||
vpxor %ymm0,%ymm14,%ymm14
|
||||
vpshufb %ymm2,%ymm14,%ymm14
|
||||
|
||||
# x10 += x15, x5 = rotl32(x5 ^ x10, 7)
|
||||
vpaddd %ymm15,%ymm10,%ymm10
|
||||
vpxor %ymm10,%ymm5,%ymm5
|
||||
vpslld $7,%ymm5,%ymm0
|
||||
vpsrld $25,%ymm5,%ymm5
|
||||
vpor %ymm0,%ymm5,%ymm5
|
||||
# x11 += x12, x6 = rotl32(x6 ^ x11, 7)
|
||||
vpaddd %ymm12,%ymm11,%ymm11
|
||||
vpxor %ymm11,%ymm6,%ymm6
|
||||
vpslld $7,%ymm6,%ymm0
|
||||
vpsrld $25,%ymm6,%ymm6
|
||||
vpor %ymm0,%ymm6,%ymm6
|
||||
# x8 += x13, x7 = rotl32(x7 ^ x8, 7)
|
||||
vpaddd %ymm13,%ymm8,%ymm8
|
||||
vpxor %ymm8,%ymm7,%ymm7
|
||||
vpslld $7,%ymm7,%ymm0
|
||||
vpsrld $25,%ymm7,%ymm7
|
||||
vpor %ymm0,%ymm7,%ymm7
|
||||
# x9 += x14, x4 = rotl32(x4 ^ x9, 7)
|
||||
vpaddd %ymm14,%ymm9,%ymm9
|
||||
vpxor %ymm9,%ymm4,%ymm4
|
||||
vpslld $7,%ymm4,%ymm0
|
||||
vpsrld $25,%ymm4,%ymm4
|
||||
vpor %ymm0,%ymm4,%ymm4
|
||||
|
||||
dec %ecx
|
||||
jnz .Ldoubleround8
|
||||
|
||||
# x0..15[0-3] += s[0..15]
|
||||
vpbroadcastd 0x00(%rdi),%ymm0
|
||||
vpaddd 0x00(%rsp),%ymm0,%ymm0
|
||||
vmovdqa %ymm0,0x00(%rsp)
|
||||
vpbroadcastd 0x04(%rdi),%ymm0
|
||||
vpaddd 0x20(%rsp),%ymm0,%ymm0
|
||||
vmovdqa %ymm0,0x20(%rsp)
|
||||
vpbroadcastd 0x08(%rdi),%ymm0
|
||||
vpaddd 0x40(%rsp),%ymm0,%ymm0
|
||||
vmovdqa %ymm0,0x40(%rsp)
|
||||
vpbroadcastd 0x0c(%rdi),%ymm0
|
||||
vpaddd 0x60(%rsp),%ymm0,%ymm0
|
||||
vmovdqa %ymm0,0x60(%rsp)
|
||||
vpbroadcastd 0x10(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm4,%ymm4
|
||||
vpbroadcastd 0x14(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm5,%ymm5
|
||||
vpbroadcastd 0x18(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm6,%ymm6
|
||||
vpbroadcastd 0x1c(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm7,%ymm7
|
||||
vpbroadcastd 0x20(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm8,%ymm8
|
||||
vpbroadcastd 0x24(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm9,%ymm9
|
||||
vpbroadcastd 0x28(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm10,%ymm10
|
||||
vpbroadcastd 0x2c(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm11,%ymm11
|
||||
vpbroadcastd 0x30(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm12,%ymm12
|
||||
vpbroadcastd 0x34(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm13,%ymm13
|
||||
vpbroadcastd 0x38(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm14,%ymm14
|
||||
vpbroadcastd 0x3c(%rdi),%ymm0
|
||||
vpaddd %ymm0,%ymm15,%ymm15
|
||||
|
||||
# x12 += counter values 0-3
|
||||
vpaddd %ymm1,%ymm12,%ymm12
|
||||
|
||||
# interleave 32-bit words in state n, n+1
|
||||
vmovdqa 0x00(%rsp),%ymm0
|
||||
vmovdqa 0x20(%rsp),%ymm1
|
||||
vpunpckldq %ymm1,%ymm0,%ymm2
|
||||
vpunpckhdq %ymm1,%ymm0,%ymm1
|
||||
vmovdqa %ymm2,0x00(%rsp)
|
||||
vmovdqa %ymm1,0x20(%rsp)
|
||||
vmovdqa 0x40(%rsp),%ymm0
|
||||
vmovdqa 0x60(%rsp),%ymm1
|
||||
vpunpckldq %ymm1,%ymm0,%ymm2
|
||||
vpunpckhdq %ymm1,%ymm0,%ymm1
|
||||
vmovdqa %ymm2,0x40(%rsp)
|
||||
vmovdqa %ymm1,0x60(%rsp)
|
||||
vmovdqa %ymm4,%ymm0
|
||||
vpunpckldq %ymm5,%ymm0,%ymm4
|
||||
vpunpckhdq %ymm5,%ymm0,%ymm5
|
||||
vmovdqa %ymm6,%ymm0
|
||||
vpunpckldq %ymm7,%ymm0,%ymm6
|
||||
vpunpckhdq %ymm7,%ymm0,%ymm7
|
||||
vmovdqa %ymm8,%ymm0
|
||||
vpunpckldq %ymm9,%ymm0,%ymm8
|
||||
vpunpckhdq %ymm9,%ymm0,%ymm9
|
||||
vmovdqa %ymm10,%ymm0
|
||||
vpunpckldq %ymm11,%ymm0,%ymm10
|
||||
vpunpckhdq %ymm11,%ymm0,%ymm11
|
||||
vmovdqa %ymm12,%ymm0
|
||||
vpunpckldq %ymm13,%ymm0,%ymm12
|
||||
vpunpckhdq %ymm13,%ymm0,%ymm13
|
||||
vmovdqa %ymm14,%ymm0
|
||||
vpunpckldq %ymm15,%ymm0,%ymm14
|
||||
vpunpckhdq %ymm15,%ymm0,%ymm15
|
||||
|
||||
# interleave 64-bit words in state n, n+2
|
||||
vmovdqa 0x00(%rsp),%ymm0
|
||||
vmovdqa 0x40(%rsp),%ymm2
|
||||
vpunpcklqdq %ymm2,%ymm0,%ymm1
|
||||
vpunpckhqdq %ymm2,%ymm0,%ymm2
|
||||
vmovdqa %ymm1,0x00(%rsp)
|
||||
vmovdqa %ymm2,0x40(%rsp)
|
||||
vmovdqa 0x20(%rsp),%ymm0
|
||||
vmovdqa 0x60(%rsp),%ymm2
|
||||
vpunpcklqdq %ymm2,%ymm0,%ymm1
|
||||
vpunpckhqdq %ymm2,%ymm0,%ymm2
|
||||
vmovdqa %ymm1,0x20(%rsp)
|
||||
vmovdqa %ymm2,0x60(%rsp)
|
||||
vmovdqa %ymm4,%ymm0
|
||||
vpunpcklqdq %ymm6,%ymm0,%ymm4
|
||||
vpunpckhqdq %ymm6,%ymm0,%ymm6
|
||||
vmovdqa %ymm5,%ymm0
|
||||
vpunpcklqdq %ymm7,%ymm0,%ymm5
|
||||
vpunpckhqdq %ymm7,%ymm0,%ymm7
|
||||
vmovdqa %ymm8,%ymm0
|
||||
vpunpcklqdq %ymm10,%ymm0,%ymm8
|
||||
vpunpckhqdq %ymm10,%ymm0,%ymm10
|
||||
vmovdqa %ymm9,%ymm0
|
||||
vpunpcklqdq %ymm11,%ymm0,%ymm9
|
||||
vpunpckhqdq %ymm11,%ymm0,%ymm11
|
||||
vmovdqa %ymm12,%ymm0
|
||||
vpunpcklqdq %ymm14,%ymm0,%ymm12
|
||||
vpunpckhqdq %ymm14,%ymm0,%ymm14
|
||||
vmovdqa %ymm13,%ymm0
|
||||
vpunpcklqdq %ymm15,%ymm0,%ymm13
|
||||
vpunpckhqdq %ymm15,%ymm0,%ymm15
|
||||
|
||||
# interleave 128-bit words in state n, n+4
|
||||
vmovdqa 0x00(%rsp),%ymm0
|
||||
vperm2i128 $0x20,%ymm4,%ymm0,%ymm1
|
||||
vperm2i128 $0x31,%ymm4,%ymm0,%ymm4
|
||||
vmovdqa %ymm1,0x00(%rsp)
|
||||
vmovdqa 0x20(%rsp),%ymm0
|
||||
vperm2i128 $0x20,%ymm5,%ymm0,%ymm1
|
||||
vperm2i128 $0x31,%ymm5,%ymm0,%ymm5
|
||||
vmovdqa %ymm1,0x20(%rsp)
|
||||
vmovdqa 0x40(%rsp),%ymm0
|
||||
vperm2i128 $0x20,%ymm6,%ymm0,%ymm1
|
||||
vperm2i128 $0x31,%ymm6,%ymm0,%ymm6
|
||||
vmovdqa %ymm1,0x40(%rsp)
|
||||
vmovdqa 0x60(%rsp),%ymm0
|
||||
vperm2i128 $0x20,%ymm7,%ymm0,%ymm1
|
||||
vperm2i128 $0x31,%ymm7,%ymm0,%ymm7
|
||||
vmovdqa %ymm1,0x60(%rsp)
|
||||
vperm2i128 $0x20,%ymm12,%ymm8,%ymm0
|
||||
vperm2i128 $0x31,%ymm12,%ymm8,%ymm12
|
||||
vmovdqa %ymm0,%ymm8
|
||||
vperm2i128 $0x20,%ymm13,%ymm9,%ymm0
|
||||
vperm2i128 $0x31,%ymm13,%ymm9,%ymm13
|
||||
vmovdqa %ymm0,%ymm9
|
||||
vperm2i128 $0x20,%ymm14,%ymm10,%ymm0
|
||||
vperm2i128 $0x31,%ymm14,%ymm10,%ymm14
|
||||
vmovdqa %ymm0,%ymm10
|
||||
vperm2i128 $0x20,%ymm15,%ymm11,%ymm0
|
||||
vperm2i128 $0x31,%ymm15,%ymm11,%ymm15
|
||||
vmovdqa %ymm0,%ymm11
|
||||
|
||||
# xor with corresponding input, write to output
|
||||
vmovdqa 0x00(%rsp),%ymm0
|
||||
vpxor 0x0000(%rdx),%ymm0,%ymm0
|
||||
vmovdqu %ymm0,0x0000(%rsi)
|
||||
vmovdqa 0x20(%rsp),%ymm0
|
||||
vpxor 0x0080(%rdx),%ymm0,%ymm0
|
||||
vmovdqu %ymm0,0x0080(%rsi)
|
||||
vmovdqa 0x40(%rsp),%ymm0
|
||||
vpxor 0x0040(%rdx),%ymm0,%ymm0
|
||||
vmovdqu %ymm0,0x0040(%rsi)
|
||||
vmovdqa 0x60(%rsp),%ymm0
|
||||
vpxor 0x00c0(%rdx),%ymm0,%ymm0
|
||||
vmovdqu %ymm0,0x00c0(%rsi)
|
||||
vpxor 0x0100(%rdx),%ymm4,%ymm4
|
||||
vmovdqu %ymm4,0x0100(%rsi)
|
||||
vpxor 0x0180(%rdx),%ymm5,%ymm5
|
||||
vmovdqu %ymm5,0x00180(%rsi)
|
||||
vpxor 0x0140(%rdx),%ymm6,%ymm6
|
||||
vmovdqu %ymm6,0x0140(%rsi)
|
||||
vpxor 0x01c0(%rdx),%ymm7,%ymm7
|
||||
vmovdqu %ymm7,0x01c0(%rsi)
|
||||
vpxor 0x0020(%rdx),%ymm8,%ymm8
|
||||
vmovdqu %ymm8,0x0020(%rsi)
|
||||
vpxor 0x00a0(%rdx),%ymm9,%ymm9
|
||||
vmovdqu %ymm9,0x00a0(%rsi)
|
||||
vpxor 0x0060(%rdx),%ymm10,%ymm10
|
||||
vmovdqu %ymm10,0x0060(%rsi)
|
||||
vpxor 0x00e0(%rdx),%ymm11,%ymm11
|
||||
vmovdqu %ymm11,0x00e0(%rsi)
|
||||
vpxor 0x0120(%rdx),%ymm12,%ymm12
|
||||
vmovdqu %ymm12,0x0120(%rsi)
|
||||
vpxor 0x01a0(%rdx),%ymm13,%ymm13
|
||||
vmovdqu %ymm13,0x01a0(%rsi)
|
||||
vpxor 0x0160(%rdx),%ymm14,%ymm14
|
||||
vmovdqu %ymm14,0x0160(%rsi)
|
||||
vpxor 0x01e0(%rdx),%ymm15,%ymm15
|
||||
vmovdqu %ymm15,0x01e0(%rsi)
|
||||
|
||||
vzeroupper
|
||||
mov %r8,%rsp
|
||||
ret
|
||||
ENDPROC(chacha20_8block_xor_avx2)
|
625
arch/x86/crypto/chacha20-ssse3-x86_64.S
Normal file
625
arch/x86/crypto/chacha20-ssse3-x86_64.S
Normal file
@ -0,0 +1,625 @@
|
||||
/*
|
||||
* ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions
|
||||
*
|
||||
* Copyright (C) 2015 Martin Willi
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.data
|
||||
.align 16
|
||||
|
||||
ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003
|
||||
ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
|
||||
CTRINC: .octa 0x00000003000000020000000100000000
|
||||
|
||||
.text
|
||||
|
||||
ENTRY(chacha20_block_xor_ssse3)
|
||||
# %rdi: Input state matrix, s
|
||||
# %rsi: 1 data block output, o
|
||||
# %rdx: 1 data block input, i
|
||||
|
||||
# This function encrypts one ChaCha20 block by loading the state matrix
|
||||
# in four SSE registers. It performs matrix operation on four words in
|
||||
# parallel, but requireds shuffling to rearrange the words after each
|
||||
# round. 8/16-bit word rotation is done with the slightly better
|
||||
# performing SSSE3 byte shuffling, 7/12-bit word rotation uses
|
||||
# traditional shift+OR.
|
||||
|
||||
# x0..3 = s0..3
|
||||
movdqa 0x00(%rdi),%xmm0
|
||||
movdqa 0x10(%rdi),%xmm1
|
||||
movdqa 0x20(%rdi),%xmm2
|
||||
movdqa 0x30(%rdi),%xmm3
|
||||
movdqa %xmm0,%xmm8
|
||||
movdqa %xmm1,%xmm9
|
||||
movdqa %xmm2,%xmm10
|
||||
movdqa %xmm3,%xmm11
|
||||
|
||||
movdqa ROT8(%rip),%xmm4
|
||||
movdqa ROT16(%rip),%xmm5
|
||||
|
||||
mov $10,%ecx
|
||||
|
||||
.Ldoubleround:
|
||||
|
||||
# x0 += x1, x3 = rotl32(x3 ^ x0, 16)
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
pshufb %xmm5,%xmm3
|
||||
|
||||
# x2 += x3, x1 = rotl32(x1 ^ x2, 12)
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm6
|
||||
pslld $12,%xmm6
|
||||
psrld $20,%xmm1
|
||||
por %xmm6,%xmm1
|
||||
|
||||
# x0 += x1, x3 = rotl32(x3 ^ x0, 8)
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
pshufb %xmm4,%xmm3
|
||||
|
||||
# x2 += x3, x1 = rotl32(x1 ^ x2, 7)
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm7
|
||||
pslld $7,%xmm7
|
||||
psrld $25,%xmm1
|
||||
por %xmm7,%xmm1
|
||||
|
||||
# x1 = shuffle32(x1, MASK(0, 3, 2, 1))
|
||||
pshufd $0x39,%xmm1,%xmm1
|
||||
# x2 = shuffle32(x2, MASK(1, 0, 3, 2))
|
||||
pshufd $0x4e,%xmm2,%xmm2
|
||||
# x3 = shuffle32(x3, MASK(2, 1, 0, 3))
|
||||
pshufd $0x93,%xmm3,%xmm3
|
||||
|
||||
# x0 += x1, x3 = rotl32(x3 ^ x0, 16)
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
pshufb %xmm5,%xmm3
|
||||
|
||||
# x2 += x3, x1 = rotl32(x1 ^ x2, 12)
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm6
|
||||
pslld $12,%xmm6
|
||||
psrld $20,%xmm1
|
||||
por %xmm6,%xmm1
|
||||
|
||||
# x0 += x1, x3 = rotl32(x3 ^ x0, 8)
|
||||
paddd %xmm1,%xmm0
|
||||
pxor %xmm0,%xmm3
|
||||
pshufb %xmm4,%xmm3
|
||||
|
||||
# x2 += x3, x1 = rotl32(x1 ^ x2, 7)
|
||||
paddd %xmm3,%xmm2
|
||||
pxor %xmm2,%xmm1
|
||||
movdqa %xmm1,%xmm7
|
||||
pslld $7,%xmm7
|
||||
psrld $25,%xmm1
|
||||
por %xmm7,%xmm1
|
||||
|
||||
# x1 = shuffle32(x1, MASK(2, 1, 0, 3))
|
||||
pshufd $0x93,%xmm1,%xmm1
|
||||
# x2 = shuffle32(x2, MASK(1, 0, 3, 2))
|
||||
pshufd $0x4e,%xmm2,%xmm2
|
||||
# x3 = shuffle32(x3, MASK(0, 3, 2, 1))
|
||||
pshufd $0x39,%xmm3,%xmm3
|
||||
|
||||
dec %ecx
|
||||
jnz .Ldoubleround
|
||||
|
||||
# o0 = i0 ^ (x0 + s0)
|
||||
movdqu 0x00(%rdx),%xmm4
|
||||
paddd %xmm8,%xmm0
|
||||
pxor %xmm4,%xmm0
|
||||
movdqu %xmm0,0x00(%rsi)
|
||||
# o1 = i1 ^ (x1 + s1)
|
||||
movdqu 0x10(%rdx),%xmm5
|
||||
paddd %xmm9,%xmm1
|
||||
pxor %xmm5,%xmm1
|
||||
movdqu %xmm1,0x10(%rsi)
|
||||
# o2 = i2 ^ (x2 + s2)
|
||||
movdqu 0x20(%rdx),%xmm6
|
||||
paddd %xmm10,%xmm2
|
||||
pxor %xmm6,%xmm2
|
||||
movdqu %xmm2,0x20(%rsi)
|
||||
# o3 = i3 ^ (x3 + s3)
|
||||
movdqu 0x30(%rdx),%xmm7
|
||||
paddd %xmm11,%xmm3
|
||||
pxor %xmm7,%xmm3
|
||||
movdqu %xmm3,0x30(%rsi)
|
||||
|
||||
ret
|
||||
ENDPROC(chacha20_block_xor_ssse3)
|
||||
|
||||
ENTRY(chacha20_4block_xor_ssse3)
|
||||
# %rdi: Input state matrix, s
|
||||
# %rsi: 4 data blocks output, o
|
||||
# %rdx: 4 data blocks input, i
|
||||
|
||||
# This function encrypts four consecutive ChaCha20 blocks by loading the
|
||||
# the state matrix in SSE registers four times. As we need some scratch
|
||||
# registers, we save the first four registers on the stack. The
|
||||
# algorithm performs each operation on the corresponding word of each
|
||||
# state matrix, hence requires no word shuffling. For final XORing step
|
||||
# we transpose the matrix by interleaving 32- and then 64-bit words,
|
||||
# which allows us to do XOR in SSE registers. 8/16-bit word rotation is
|
||||
# done with the slightly better performing SSSE3 byte shuffling,
|
||||
# 7/12-bit word rotation uses traditional shift+OR.
|
||||
|
||||
sub $0x40,%rsp
|
||||
|
||||
# x0..15[0-3] = s0..3[0..3]
|
||||
movq 0x00(%rdi),%xmm1
|
||||
pshufd $0x00,%xmm1,%xmm0
|
||||
pshufd $0x55,%xmm1,%xmm1
|
||||
movq 0x08(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
movq 0x10(%rdi),%xmm5
|
||||
pshufd $0x00,%xmm5,%xmm4
|
||||
pshufd $0x55,%xmm5,%xmm5
|
||||
movq 0x18(%rdi),%xmm7
|
||||
pshufd $0x00,%xmm7,%xmm6
|
||||
pshufd $0x55,%xmm7,%xmm7
|
||||
movq 0x20(%rdi),%xmm9
|
||||
pshufd $0x00,%xmm9,%xmm8
|
||||
pshufd $0x55,%xmm9,%xmm9
|
||||
movq 0x28(%rdi),%xmm11
|
||||
pshufd $0x00,%xmm11,%xmm10
|
||||
pshufd $0x55,%xmm11,%xmm11
|
||||
movq 0x30(%rdi),%xmm13
|
||||
pshufd $0x00,%xmm13,%xmm12
|
||||
pshufd $0x55,%xmm13,%xmm13
|
||||
movq 0x38(%rdi),%xmm15
|
||||
pshufd $0x00,%xmm15,%xmm14
|
||||
pshufd $0x55,%xmm15,%xmm15
|
||||
# x0..3 on stack
|
||||
movdqa %xmm0,0x00(%rsp)
|
||||
movdqa %xmm1,0x10(%rsp)
|
||||
movdqa %xmm2,0x20(%rsp)
|
||||
movdqa %xmm3,0x30(%rsp)
|
||||
|
||||
movdqa CTRINC(%rip),%xmm1
|
||||
movdqa ROT8(%rip),%xmm2
|
||||
movdqa ROT16(%rip),%xmm3
|
||||
|
||||
# x12 += counter values 0-3
|
||||
paddd %xmm1,%xmm12
|
||||
|
||||
mov $10,%ecx
|
||||
|
||||
.Ldoubleround4:
|
||||
# x0 += x4, x12 = rotl32(x12 ^ x0, 16)
|
||||
movdqa 0x00(%rsp),%xmm0
|
||||
paddd %xmm4,%xmm0
|
||||
movdqa %xmm0,0x00(%rsp)
|
||||
pxor %xmm0,%xmm12
|
||||
pshufb %xmm3,%xmm12
|
||||
# x1 += x5, x13 = rotl32(x13 ^ x1, 16)
|
||||
movdqa 0x10(%rsp),%xmm0
|
||||
paddd %xmm5,%xmm0
|
||||
movdqa %xmm0,0x10(%rsp)
|
||||
pxor %xmm0,%xmm13
|
||||
pshufb %xmm3,%xmm13
|
||||
# x2 += x6, x14 = rotl32(x14 ^ x2, 16)
|
||||
movdqa 0x20(%rsp),%xmm0
|
||||
paddd %xmm6,%xmm0
|
||||
movdqa %xmm0,0x20(%rsp)
|
||||
pxor %xmm0,%xmm14
|
||||
pshufb %xmm3,%xmm14
|
||||
# x3 += x7, x15 = rotl32(x15 ^ x3, 16)
|
||||
movdqa 0x30(%rsp),%xmm0
|
||||
paddd %xmm7,%xmm0
|
||||
movdqa %xmm0,0x30(%rsp)
|
||||
pxor %xmm0,%xmm15
|
||||
pshufb %xmm3,%xmm15
|
||||
|
||||
# x8 += x12, x4 = rotl32(x4 ^ x8, 12)
|
||||
paddd %xmm12,%xmm8
|
||||
pxor %xmm8,%xmm4
|
||||
movdqa %xmm4,%xmm0
|
||||
pslld $12,%xmm0
|
||||
psrld $20,%xmm4
|
||||
por %xmm0,%xmm4
|
||||
# x9 += x13, x5 = rotl32(x5 ^ x9, 12)
|
||||
paddd %xmm13,%xmm9
|
||||
pxor %xmm9,%xmm5
|
||||
movdqa %xmm5,%xmm0
|
||||
pslld $12,%xmm0
|
||||
psrld $20,%xmm5
|
||||
por %xmm0,%xmm5
|
||||
# x10 += x14, x6 = rotl32(x6 ^ x10, 12)
|
||||
paddd %xmm14,%xmm10
|
||||
pxor %xmm10,%xmm6
|
||||
movdqa %xmm6,%xmm0
|
||||
pslld $12,%xmm0
|
||||
psrld $20,%xmm6
|
||||
por %xmm0,%xmm6
|
||||
# x11 += x15, x7 = rotl32(x7 ^ x11, 12)
|
||||
paddd %xmm15,%xmm11
|
||||
pxor %xmm11,%xmm7
|
||||
movdqa %xmm7,%xmm0
|
||||
pslld $12,%xmm0
|
||||
psrld $20,%xmm7
|
||||
por %xmm0,%xmm7
|
||||
|
||||
# x0 += x4, x12 = rotl32(x12 ^ x0, 8)
|
||||
movdqa 0x00(%rsp),%xmm0
|
||||
paddd %xmm4,%xmm0
|
||||
movdqa %xmm0,0x00(%rsp)
|
||||
pxor %xmm0,%xmm12
|
||||
pshufb %xmm2,%xmm12
|
||||
# x1 += x5, x13 = rotl32(x13 ^ x1, 8)
|
||||
movdqa 0x10(%rsp),%xmm0
|
||||
paddd %xmm5,%xmm0
|
||||
movdqa %xmm0,0x10(%rsp)
|
||||
pxor %xmm0,%xmm13
|
||||
pshufb %xmm2,%xmm13
|
||||
# x2 += x6, x14 = rotl32(x14 ^ x2, 8)
|
||||
movdqa 0x20(%rsp),%xmm0
|
||||
paddd %xmm6,%xmm0
|
||||
movdqa %xmm0,0x20(%rsp)
|
||||
pxor %xmm0,%xmm14
|
||||
pshufb %xmm2,%xmm14
|
||||
# x3 += x7, x15 = rotl32(x15 ^ x3, 8)
|
||||
movdqa 0x30(%rsp),%xmm0
|
||||
paddd %xmm7,%xmm0
|
||||
movdqa %xmm0,0x30(%rsp)
|
||||
pxor %xmm0,%xmm15
|
||||
pshufb %xmm2,%xmm15
|
||||
|
||||
# x8 += x12, x4 = rotl32(x4 ^ x8, 7)
|
||||
paddd %xmm12,%xmm8
|
||||
pxor %xmm8,%xmm4
|
||||
movdqa %xmm4,%xmm0
|
||||
pslld $7,%xmm0
|
||||
psrld $25,%xmm4
|
||||
por %xmm0,%xmm4
|
||||
# x9 += x13, x5 = rotl32(x5 ^ x9, 7)
|
||||
paddd %xmm13,%xmm9
|
||||
pxor %xmm9,%xmm5
|
||||
movdqa %xmm5,%xmm0
|
||||
pslld $7,%xmm0
|
||||
psrld $25,%xmm5
|
||||
por %xmm0,%xmm5
|
||||
# x10 += x14, x6 = rotl32(x6 ^ x10, 7)
|
||||
paddd %xmm14,%xmm10
|
||||
pxor %xmm10,%xmm6
|
||||
movdqa %xmm6,%xmm0
|
||||
pslld $7,%xmm0
|
||||
psrld $25,%xmm6
|
||||
por %xmm0,%xmm6
|
||||
# x11 += x15, x7 = rotl32(x7 ^ x11, 7)
|
||||
paddd %xmm15,%xmm11
|
||||
pxor %xmm11,%xmm7
|
||||
movdqa %xmm7,%xmm0
|
||||
pslld $7,%xmm0
|
||||
psrld $25,%xmm7
|
||||
por %xmm0,%xmm7
|
||||
|
||||
# x0 += x5, x15 = rotl32(x15 ^ x0, 16)
|
||||
movdqa 0x00(%rsp),%xmm0
|
||||
paddd %xmm5,%xmm0
|
||||
movdqa %xmm0,0x00(%rsp)
|
||||
pxor %xmm0,%xmm15
|
||||
pshufb %xmm3,%xmm15
|
||||
# x1 += x6, x12 = rotl32(x12 ^ x1, 16)
|
||||
movdqa 0x10(%rsp),%xmm0
|
||||
paddd %xmm6,%xmm0
|
||||
movdqa %xmm0,0x10(%rsp)
|
||||
pxor %xmm0,%xmm12
|
||||
pshufb %xmm3,%xmm12
|
||||
# x2 += x7, x13 = rotl32(x13 ^ x2, 16)
|
||||
movdqa 0x20(%rsp),%xmm0
|
||||
paddd %xmm7,%xmm0
|
||||
movdqa %xmm0,0x20(%rsp)
|
||||
pxor %xmm0,%xmm13
|
||||
pshufb %xmm3,%xmm13
|
||||
# x3 += x4, x14 = rotl32(x14 ^ x3, 16)
|
||||
movdqa 0x30(%rsp),%xmm0
|
||||
paddd %xmm4,%xmm0
|
||||
movdqa %xmm0,0x30(%rsp)
|
||||
pxor %xmm0,%xmm14
|
||||
pshufb %xmm3,%xmm14
|
||||
|
||||
# x10 += x15, x5 = rotl32(x5 ^ x10, 12)
|
||||
paddd %xmm15,%xmm10
|
||||
pxor %xmm10,%xmm5
|
||||
movdqa %xmm5,%xmm0
|
||||
pslld $12,%xmm0
|
||||
psrld $20,%xmm5
|
||||
por %xmm0,%xmm5
|
||||
# x11 += x12, x6 = rotl32(x6 ^ x11, 12)
|
||||
paddd %xmm12,%xmm11
|
||||
pxor %xmm11,%xmm6
|
||||
movdqa %xmm6,%xmm0
|
||||
pslld $12,%xmm0
|
||||
psrld $20,%xmm6
|
||||
por %xmm0,%xmm6
|
||||
# x8 += x13, x7 = rotl32(x7 ^ x8, 12)
|
||||
paddd %xmm13,%xmm8
|
||||
pxor %xmm8,%xmm7
|
||||
movdqa %xmm7,%xmm0
|
||||
pslld $12,%xmm0
|
||||
psrld $20,%xmm7
|
||||
por %xmm0,%xmm7
|
||||
# x9 += x14, x4 = rotl32(x4 ^ x9, 12)
|
||||
paddd %xmm14,%xmm9
|
||||
pxor %xmm9,%xmm4
|
||||
movdqa %xmm4,%xmm0
|
||||
pslld $12,%xmm0
|
||||
psrld $20,%xmm4
|
||||
por %xmm0,%xmm4
|
||||
|
||||
# x0 += x5, x15 = rotl32(x15 ^ x0, 8)
|
||||
movdqa 0x00(%rsp),%xmm0
|
||||
paddd %xmm5,%xmm0
|
||||
movdqa %xmm0,0x00(%rsp)
|
||||
pxor %xmm0,%xmm15
|
||||
pshufb %xmm2,%xmm15
|
||||
# x1 += x6, x12 = rotl32(x12 ^ x1, 8)
|
||||
movdqa 0x10(%rsp),%xmm0
|
||||
paddd %xmm6,%xmm0
|
||||
movdqa %xmm0,0x10(%rsp)
|
||||
pxor %xmm0,%xmm12
|
||||
pshufb %xmm2,%xmm12
|
||||
# x2 += x7, x13 = rotl32(x13 ^ x2, 8)
|
||||
movdqa 0x20(%rsp),%xmm0
|
||||
paddd %xmm7,%xmm0
|
||||
movdqa %xmm0,0x20(%rsp)
|
||||
pxor %xmm0,%xmm13
|
||||
pshufb %xmm2,%xmm13
|
||||
# x3 += x4, x14 = rotl32(x14 ^ x3, 8)
|
||||
movdqa 0x30(%rsp),%xmm0
|
||||
paddd %xmm4,%xmm0
|
||||
movdqa %xmm0,0x30(%rsp)
|
||||
pxor %xmm0,%xmm14
|
||||
pshufb %xmm2,%xmm14
|
||||
|
||||
# x10 += x15, x5 = rotl32(x5 ^ x10, 7)
|
||||
paddd %xmm15,%xmm10
|
||||
pxor %xmm10,%xmm5
|
||||
movdqa %xmm5,%xmm0
|
||||
pslld $7,%xmm0
|
||||
psrld $25,%xmm5
|
||||
por %xmm0,%xmm5
|
||||
# x11 += x12, x6 = rotl32(x6 ^ x11, 7)
|
||||
paddd %xmm12,%xmm11
|
||||
pxor %xmm11,%xmm6
|
||||
movdqa %xmm6,%xmm0
|
||||
pslld $7,%xmm0
|
||||
psrld $25,%xmm6
|
||||
por %xmm0,%xmm6
|
||||
# x8 += x13, x7 = rotl32(x7 ^ x8, 7)
|
||||
paddd %xmm13,%xmm8
|
||||
pxor %xmm8,%xmm7
|
||||
movdqa %xmm7,%xmm0
|
||||
pslld $7,%xmm0
|
||||
psrld $25,%xmm7
|
||||
por %xmm0,%xmm7
|
||||
# x9 += x14, x4 = rotl32(x4 ^ x9, 7)
|
||||
paddd %xmm14,%xmm9
|
||||
pxor %xmm9,%xmm4
|
||||
movdqa %xmm4,%xmm0
|
||||
pslld $7,%xmm0
|
||||
psrld $25,%xmm4
|
||||
por %xmm0,%xmm4
|
||||
|
||||
dec %ecx
|
||||
jnz .Ldoubleround4
|
||||
|
||||
# x0[0-3] += s0[0]
|
||||
# x1[0-3] += s0[1]
|
||||
movq 0x00(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
paddd 0x00(%rsp),%xmm2
|
||||
movdqa %xmm2,0x00(%rsp)
|
||||
paddd 0x10(%rsp),%xmm3
|
||||
movdqa %xmm3,0x10(%rsp)
|
||||
# x2[0-3] += s0[2]
|
||||
# x3[0-3] += s0[3]
|
||||
movq 0x08(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
paddd 0x20(%rsp),%xmm2
|
||||
movdqa %xmm2,0x20(%rsp)
|
||||
paddd 0x30(%rsp),%xmm3
|
||||
movdqa %xmm3,0x30(%rsp)
|
||||
|
||||
# x4[0-3] += s1[0]
|
||||
# x5[0-3] += s1[1]
|
||||
movq 0x10(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
paddd %xmm2,%xmm4
|
||||
paddd %xmm3,%xmm5
|
||||
# x6[0-3] += s1[2]
|
||||
# x7[0-3] += s1[3]
|
||||
movq 0x18(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
paddd %xmm2,%xmm6
|
||||
paddd %xmm3,%xmm7
|
||||
|
||||
# x8[0-3] += s2[0]
|
||||
# x9[0-3] += s2[1]
|
||||
movq 0x20(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
paddd %xmm2,%xmm8
|
||||
paddd %xmm3,%xmm9
|
||||
# x10[0-3] += s2[2]
|
||||
# x11[0-3] += s2[3]
|
||||
movq 0x28(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
paddd %xmm2,%xmm10
|
||||
paddd %xmm3,%xmm11
|
||||
|
||||
# x12[0-3] += s3[0]
|
||||
# x13[0-3] += s3[1]
|
||||
movq 0x30(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
paddd %xmm2,%xmm12
|
||||
paddd %xmm3,%xmm13
|
||||
# x14[0-3] += s3[2]
|
||||
# x15[0-3] += s3[3]
|
||||
movq 0x38(%rdi),%xmm3
|
||||
pshufd $0x00,%xmm3,%xmm2
|
||||
pshufd $0x55,%xmm3,%xmm3
|
||||
paddd %xmm2,%xmm14
|
||||
paddd %xmm3,%xmm15
|
||||
|
||||
# x12 += counter values 0-3
|
||||
paddd %xmm1,%xmm12
|
||||
|
||||
# interleave 32-bit words in state n, n+1
|
||||
movdqa 0x00(%rsp),%xmm0
|
||||
movdqa 0x10(%rsp),%xmm1
|
||||
movdqa %xmm0,%xmm2
|
||||
punpckldq %xmm1,%xmm2
|
||||
punpckhdq %xmm1,%xmm0
|
||||
movdqa %xmm2,0x00(%rsp)
|
||||
movdqa %xmm0,0x10(%rsp)
|
||||
movdqa 0x20(%rsp),%xmm0
|
||||
movdqa 0x30(%rsp),%xmm1
|
||||
movdqa %xmm0,%xmm2
|
||||
punpckldq %xmm1,%xmm2
|
||||
punpckhdq %xmm1,%xmm0
|
||||
movdqa %xmm2,0x20(%rsp)
|
||||
movdqa %xmm0,0x30(%rsp)
|
||||
movdqa %xmm4,%xmm0
|
||||
punpckldq %xmm5,%xmm4
|
||||
punpckhdq %xmm5,%xmm0
|
||||
movdqa %xmm0,%xmm5
|
||||
movdqa %xmm6,%xmm0
|
||||
punpckldq %xmm7,%xmm6
|
||||
punpckhdq %xmm7,%xmm0
|
||||
movdqa %xmm0,%xmm7
|
||||
movdqa %xmm8,%xmm0
|
||||
punpckldq %xmm9,%xmm8
|
||||
punpckhdq %xmm9,%xmm0
|
||||
movdqa %xmm0,%xmm9
|
||||
movdqa %xmm10,%xmm0
|
||||
punpckldq %xmm11,%xmm10
|
||||
punpckhdq %xmm11,%xmm0
|
||||
movdqa %xmm0,%xmm11
|
||||
movdqa %xmm12,%xmm0
|
||||
punpckldq %xmm13,%xmm12
|
||||
punpckhdq %xmm13,%xmm0
|
||||
movdqa %xmm0,%xmm13
|
||||
movdqa %xmm14,%xmm0
|
||||
punpckldq %xmm15,%xmm14
|
||||
punpckhdq %xmm15,%xmm0
|
||||
movdqa %xmm0,%xmm15
|
||||
|
||||
# interleave 64-bit words in state n, n+2
|
||||
movdqa 0x00(%rsp),%xmm0
|
||||
movdqa 0x20(%rsp),%xmm1
|
||||
movdqa %xmm0,%xmm2
|
||||
punpcklqdq %xmm1,%xmm2
|
||||
punpckhqdq %xmm1,%xmm0
|
||||
movdqa %xmm2,0x00(%rsp)
|
||||
movdqa %xmm0,0x20(%rsp)
|
||||
movdqa 0x10(%rsp),%xmm0
|
||||
movdqa 0x30(%rsp),%xmm1
|
||||
movdqa %xmm0,%xmm2
|
||||
punpcklqdq %xmm1,%xmm2
|
||||
punpckhqdq %xmm1,%xmm0
|
||||
movdqa %xmm2,0x10(%rsp)
|
||||
movdqa %xmm0,0x30(%rsp)
|
||||
movdqa %xmm4,%xmm0
|
||||
punpcklqdq %xmm6,%xmm4
|
||||
punpckhqdq %xmm6,%xmm0
|
||||
movdqa %xmm0,%xmm6
|
||||
movdqa %xmm5,%xmm0
|
||||
punpcklqdq %xmm7,%xmm5
|
||||
punpckhqdq %xmm7,%xmm0
|
||||
movdqa %xmm0,%xmm7
|
||||
movdqa %xmm8,%xmm0
|
||||
punpcklqdq %xmm10,%xmm8
|
||||
punpckhqdq %xmm10,%xmm0
|
||||
movdqa %xmm0,%xmm10
|
||||
movdqa %xmm9,%xmm0
|
||||
punpcklqdq %xmm11,%xmm9
|
||||
punpckhqdq %xmm11,%xmm0
|
||||
movdqa %xmm0,%xmm11
|
||||
movdqa %xmm12,%xmm0
|
||||
punpcklqdq %xmm14,%xmm12
|
||||
punpckhqdq %xmm14,%xmm0
|
||||
movdqa %xmm0,%xmm14
|
||||
movdqa %xmm13,%xmm0
|
||||
punpcklqdq %xmm15,%xmm13
|
||||
punpckhqdq %xmm15,%xmm0
|
||||
movdqa %xmm0,%xmm15
|
||||
|
||||
# xor with corresponding input, write to output
|
||||
movdqa 0x00(%rsp),%xmm0
|
||||
movdqu 0x00(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm0
|
||||
movdqu %xmm0,0x00(%rsi)
|
||||
movdqa 0x10(%rsp),%xmm0
|
||||
movdqu 0x80(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm0
|
||||
movdqu %xmm0,0x80(%rsi)
|
||||
movdqa 0x20(%rsp),%xmm0
|
||||
movdqu 0x40(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm0
|
||||
movdqu %xmm0,0x40(%rsi)
|
||||
movdqa 0x30(%rsp),%xmm0
|
||||
movdqu 0xc0(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm0
|
||||
movdqu %xmm0,0xc0(%rsi)
|
||||
movdqu 0x10(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm4
|
||||
movdqu %xmm4,0x10(%rsi)
|
||||
movdqu 0x90(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm5
|
||||
movdqu %xmm5,0x90(%rsi)
|
||||
movdqu 0x50(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm6
|
||||
movdqu %xmm6,0x50(%rsi)
|
||||
movdqu 0xd0(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm7
|
||||
movdqu %xmm7,0xd0(%rsi)
|
||||
movdqu 0x20(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm8
|
||||
movdqu %xmm8,0x20(%rsi)
|
||||
movdqu 0xa0(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm9
|
||||
movdqu %xmm9,0xa0(%rsi)
|
||||
movdqu 0x60(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm10
|
||||
movdqu %xmm10,0x60(%rsi)
|
||||
movdqu 0xe0(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm11
|
||||
movdqu %xmm11,0xe0(%rsi)
|
||||
movdqu 0x30(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm12
|
||||
movdqu %xmm12,0x30(%rsi)
|
||||
movdqu 0xb0(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm13
|
||||
movdqu %xmm13,0xb0(%rsi)
|
||||
movdqu 0x70(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm14
|
||||
movdqu %xmm14,0x70(%rsi)
|
||||
movdqu 0xf0(%rdx),%xmm1
|
||||
pxor %xmm1,%xmm15
|
||||
movdqu %xmm15,0xf0(%rsi)
|
||||
|
||||
add $0x40,%rsp
|
||||
ret
|
||||
ENDPROC(chacha20_4block_xor_ssse3)
|
150
arch/x86/crypto/chacha20_glue.c
Normal file
150
arch/x86/crypto/chacha20_glue.c
Normal file
@ -0,0 +1,150 @@
|
||||
/*
|
||||
* ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
|
||||
*
|
||||
* Copyright (C) 2015 Martin Willi
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/chacha20.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
#define CHACHA20_STATE_ALIGN 16
|
||||
|
||||
asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
|
||||
asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src);
|
||||
static bool chacha20_use_avx2;
|
||||
#endif
|
||||
|
||||
static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int bytes)
|
||||
{
|
||||
u8 buf[CHACHA20_BLOCK_SIZE];
|
||||
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
if (chacha20_use_avx2) {
|
||||
while (bytes >= CHACHA20_BLOCK_SIZE * 8) {
|
||||
chacha20_8block_xor_avx2(state, dst, src);
|
||||
bytes -= CHACHA20_BLOCK_SIZE * 8;
|
||||
src += CHACHA20_BLOCK_SIZE * 8;
|
||||
dst += CHACHA20_BLOCK_SIZE * 8;
|
||||
state[12] += 8;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
|
||||
chacha20_4block_xor_ssse3(state, dst, src);
|
||||
bytes -= CHACHA20_BLOCK_SIZE * 4;
|
||||
src += CHACHA20_BLOCK_SIZE * 4;
|
||||
dst += CHACHA20_BLOCK_SIZE * 4;
|
||||
state[12] += 4;
|
||||
}
|
||||
while (bytes >= CHACHA20_BLOCK_SIZE) {
|
||||
chacha20_block_xor_ssse3(state, dst, src);
|
||||
bytes -= CHACHA20_BLOCK_SIZE;
|
||||
src += CHACHA20_BLOCK_SIZE;
|
||||
dst += CHACHA20_BLOCK_SIZE;
|
||||
state[12]++;
|
||||
}
|
||||
if (bytes) {
|
||||
memcpy(buf, src, bytes);
|
||||
chacha20_block_xor_ssse3(state, buf, buf);
|
||||
memcpy(dst, buf, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1];
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
if (!may_use_simd())
|
||||
return crypto_chacha20_crypt(desc, dst, src, nbytes);
|
||||
|
||||
state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
|
||||
|
||||
crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
|
||||
chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
|
||||
err = blkcipher_walk_done(desc, &walk,
|
||||
walk.nbytes % CHACHA20_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
walk.nbytes);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
kernel_fpu_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "chacha20",
|
||||
.cra_driver_name = "chacha20-simd",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_ctxsize = sizeof(struct chacha20_ctx),
|
||||
.cra_alignmask = sizeof(u32) - 1,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CHACHA20_KEY_SIZE,
|
||||
.max_keysize = CHACHA20_KEY_SIZE,
|
||||
.ivsize = CHACHA20_IV_SIZE,
|
||||
.geniv = "seqiv",
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.encrypt = chacha20_simd,
|
||||
.decrypt = chacha20_simd,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static int __init chacha20_simd_mod_init(void)
|
||||
{
|
||||
if (!cpu_has_ssse3)
|
||||
return -ENODEV;
|
||||
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
|
||||
cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
|
||||
#endif
|
||||
return crypto_register_alg(&alg);
|
||||
}
|
||||
|
||||
static void __exit chacha20_simd_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(chacha20_simd_mod_init);
|
||||
module_exit(chacha20_simd_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
|
||||
MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated");
|
||||
MODULE_ALIAS_CRYPTO("chacha20");
|
||||
MODULE_ALIAS_CRYPTO("chacha20-simd");
|
386
arch/x86/crypto/poly1305-avx2-x86_64.S
Normal file
386
arch/x86/crypto/poly1305-avx2-x86_64.S
Normal file
@ -0,0 +1,386 @@
|
||||
/*
|
||||
* Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
|
||||
*
|
||||
* Copyright (C) 2015 Martin Willi
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.data
|
||||
.align 32
|
||||
|
||||
ANMASK: .octa 0x0000000003ffffff0000000003ffffff
|
||||
.octa 0x0000000003ffffff0000000003ffffff
|
||||
ORMASK: .octa 0x00000000010000000000000001000000
|
||||
.octa 0x00000000010000000000000001000000
|
||||
|
||||
.text
|
||||
|
||||
#define h0 0x00(%rdi)
|
||||
#define h1 0x04(%rdi)
|
||||
#define h2 0x08(%rdi)
|
||||
#define h3 0x0c(%rdi)
|
||||
#define h4 0x10(%rdi)
|
||||
#define r0 0x00(%rdx)
|
||||
#define r1 0x04(%rdx)
|
||||
#define r2 0x08(%rdx)
|
||||
#define r3 0x0c(%rdx)
|
||||
#define r4 0x10(%rdx)
|
||||
#define u0 0x00(%r8)
|
||||
#define u1 0x04(%r8)
|
||||
#define u2 0x08(%r8)
|
||||
#define u3 0x0c(%r8)
|
||||
#define u4 0x10(%r8)
|
||||
#define w0 0x14(%r8)
|
||||
#define w1 0x18(%r8)
|
||||
#define w2 0x1c(%r8)
|
||||
#define w3 0x20(%r8)
|
||||
#define w4 0x24(%r8)
|
||||
#define y0 0x28(%r8)
|
||||
#define y1 0x2c(%r8)
|
||||
#define y2 0x30(%r8)
|
||||
#define y3 0x34(%r8)
|
||||
#define y4 0x38(%r8)
|
||||
#define m %rsi
|
||||
#define hc0 %ymm0
|
||||
#define hc1 %ymm1
|
||||
#define hc2 %ymm2
|
||||
#define hc3 %ymm3
|
||||
#define hc4 %ymm4
|
||||
#define hc0x %xmm0
|
||||
#define hc1x %xmm1
|
||||
#define hc2x %xmm2
|
||||
#define hc3x %xmm3
|
||||
#define hc4x %xmm4
|
||||
#define t1 %ymm5
|
||||
#define t2 %ymm6
|
||||
#define t1x %xmm5
|
||||
#define t2x %xmm6
|
||||
#define ruwy0 %ymm7
|
||||
#define ruwy1 %ymm8
|
||||
#define ruwy2 %ymm9
|
||||
#define ruwy3 %ymm10
|
||||
#define ruwy4 %ymm11
|
||||
#define ruwy0x %xmm7
|
||||
#define ruwy1x %xmm8
|
||||
#define ruwy2x %xmm9
|
||||
#define ruwy3x %xmm10
|
||||
#define ruwy4x %xmm11
|
||||
#define svxz1 %ymm12
|
||||
#define svxz2 %ymm13
|
||||
#define svxz3 %ymm14
|
||||
#define svxz4 %ymm15
|
||||
#define d0 %r9
|
||||
#define d1 %r10
|
||||
#define d2 %r11
|
||||
#define d3 %r12
|
||||
#define d4 %r13
|
||||
|
||||
ENTRY(poly1305_4block_avx2)
|
||||
# %rdi: Accumulator h[5]
|
||||
# %rsi: 64 byte input block m
|
||||
# %rdx: Poly1305 key r[5]
|
||||
# %rcx: Quadblock count
|
||||
# %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],
|
||||
|
||||
# This four-block variant uses loop unrolled block processing. It
|
||||
# requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
|
||||
# h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r
|
||||
|
||||
vzeroupper
|
||||
push %rbx
|
||||
push %r12
|
||||
push %r13
|
||||
|
||||
# combine r0,u0,w0,y0
|
||||
vmovd y0,ruwy0x
|
||||
vmovd w0,t1x
|
||||
vpunpcklqdq t1,ruwy0,ruwy0
|
||||
vmovd u0,t1x
|
||||
vmovd r0,t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,ruwy0,ruwy0
|
||||
|
||||
# combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
|
||||
vmovd y1,ruwy1x
|
||||
vmovd w1,t1x
|
||||
vpunpcklqdq t1,ruwy1,ruwy1
|
||||
vmovd u1,t1x
|
||||
vmovd r1,t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,ruwy1,ruwy1
|
||||
vpslld $2,ruwy1,svxz1
|
||||
vpaddd ruwy1,svxz1,svxz1
|
||||
|
||||
# combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
|
||||
vmovd y2,ruwy2x
|
||||
vmovd w2,t1x
|
||||
vpunpcklqdq t1,ruwy2,ruwy2
|
||||
vmovd u2,t1x
|
||||
vmovd r2,t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,ruwy2,ruwy2
|
||||
vpslld $2,ruwy2,svxz2
|
||||
vpaddd ruwy2,svxz2,svxz2
|
||||
|
||||
# combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
|
||||
vmovd y3,ruwy3x
|
||||
vmovd w3,t1x
|
||||
vpunpcklqdq t1,ruwy3,ruwy3
|
||||
vmovd u3,t1x
|
||||
vmovd r3,t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,ruwy3,ruwy3
|
||||
vpslld $2,ruwy3,svxz3
|
||||
vpaddd ruwy3,svxz3,svxz3
|
||||
|
||||
# combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
|
||||
vmovd y4,ruwy4x
|
||||
vmovd w4,t1x
|
||||
vpunpcklqdq t1,ruwy4,ruwy4
|
||||
vmovd u4,t1x
|
||||
vmovd r4,t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,ruwy4,ruwy4
|
||||
vpslld $2,ruwy4,svxz4
|
||||
vpaddd ruwy4,svxz4,svxz4
|
||||
|
||||
.Ldoblock4:
|
||||
# hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
|
||||
# m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
|
||||
vmovd 0x00(m),hc0x
|
||||
vmovd 0x10(m),t1x
|
||||
vpunpcklqdq t1,hc0,hc0
|
||||
vmovd 0x20(m),t1x
|
||||
vmovd 0x30(m),t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,hc0,hc0
|
||||
vpand ANMASK(%rip),hc0,hc0
|
||||
vmovd h0,t1x
|
||||
vpaddd t1,hc0,hc0
|
||||
# hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
|
||||
# (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
|
||||
vmovd 0x03(m),hc1x
|
||||
vmovd 0x13(m),t1x
|
||||
vpunpcklqdq t1,hc1,hc1
|
||||
vmovd 0x23(m),t1x
|
||||
vmovd 0x33(m),t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,hc1,hc1
|
||||
vpsrld $2,hc1,hc1
|
||||
vpand ANMASK(%rip),hc1,hc1
|
||||
vmovd h1,t1x
|
||||
vpaddd t1,hc1,hc1
|
||||
# hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
|
||||
# (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
|
||||
vmovd 0x06(m),hc2x
|
||||
vmovd 0x16(m),t1x
|
||||
vpunpcklqdq t1,hc2,hc2
|
||||
vmovd 0x26(m),t1x
|
||||
vmovd 0x36(m),t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,hc2,hc2
|
||||
vpsrld $4,hc2,hc2
|
||||
vpand ANMASK(%rip),hc2,hc2
|
||||
vmovd h2,t1x
|
||||
vpaddd t1,hc2,hc2
|
||||
# hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
|
||||
# (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
|
||||
vmovd 0x09(m),hc3x
|
||||
vmovd 0x19(m),t1x
|
||||
vpunpcklqdq t1,hc3,hc3
|
||||
vmovd 0x29(m),t1x
|
||||
vmovd 0x39(m),t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,hc3,hc3
|
||||
vpsrld $6,hc3,hc3
|
||||
vpand ANMASK(%rip),hc3,hc3
|
||||
vmovd h3,t1x
|
||||
vpaddd t1,hc3,hc3
|
||||
# hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
|
||||
# (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
|
||||
vmovd 0x0c(m),hc4x
|
||||
vmovd 0x1c(m),t1x
|
||||
vpunpcklqdq t1,hc4,hc4
|
||||
vmovd 0x2c(m),t1x
|
||||
vmovd 0x3c(m),t2x
|
||||
vpunpcklqdq t2,t1,t1
|
||||
vperm2i128 $0x20,t1,hc4,hc4
|
||||
vpsrld $8,hc4,hc4
|
||||
vpor ORMASK(%rip),hc4,hc4
|
||||
vmovd h4,t1x
|
||||
vpaddd t1,hc4,hc4
|
||||
|
||||
# t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
|
||||
vpmuludq hc0,ruwy0,t1
|
||||
# t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
|
||||
vpmuludq hc1,svxz4,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
|
||||
vpmuludq hc2,svxz3,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
|
||||
vpmuludq hc3,svxz2,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
|
||||
vpmuludq hc4,svxz1,t2
|
||||
vpaddq t2,t1,t1
|
||||
# d0 = t1[0] + t1[1] + t[2] + t[3]
|
||||
vpermq $0xee,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vpsrldq $8,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vmovq t1x,d0
|
||||
|
||||
# t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
|
||||
vpmuludq hc0,ruwy1,t1
|
||||
# t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
|
||||
vpmuludq hc1,ruwy0,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
|
||||
vpmuludq hc2,svxz4,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
|
||||
vpmuludq hc3,svxz3,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
|
||||
vpmuludq hc4,svxz2,t2
|
||||
vpaddq t2,t1,t1
|
||||
# d1 = t1[0] + t1[1] + t1[3] + t1[4]
|
||||
vpermq $0xee,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vpsrldq $8,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vmovq t1x,d1
|
||||
|
||||
# t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
|
||||
vpmuludq hc0,ruwy2,t1
|
||||
# t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
|
||||
vpmuludq hc1,ruwy1,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
|
||||
vpmuludq hc2,ruwy0,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
|
||||
vpmuludq hc3,svxz4,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
|
||||
vpmuludq hc4,svxz3,t2
|
||||
vpaddq t2,t1,t1
|
||||
# d2 = t1[0] + t1[1] + t1[2] + t1[3]
|
||||
vpermq $0xee,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vpsrldq $8,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vmovq t1x,d2
|
||||
|
||||
# t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
|
||||
vpmuludq hc0,ruwy3,t1
|
||||
# t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
|
||||
vpmuludq hc1,ruwy2,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
|
||||
vpmuludq hc2,ruwy1,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
|
||||
vpmuludq hc3,ruwy0,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
|
||||
vpmuludq hc4,svxz4,t2
|
||||
vpaddq t2,t1,t1
|
||||
# d3 = t1[0] + t1[1] + t1[2] + t1[3]
|
||||
vpermq $0xee,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vpsrldq $8,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vmovq t1x,d3
|
||||
|
||||
# t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
|
||||
vpmuludq hc0,ruwy4,t1
|
||||
# t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
|
||||
vpmuludq hc1,ruwy3,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
|
||||
vpmuludq hc2,ruwy2,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
|
||||
vpmuludq hc3,ruwy1,t2
|
||||
vpaddq t2,t1,t1
|
||||
# t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
|
||||
vpmuludq hc4,ruwy0,t2
|
||||
vpaddq t2,t1,t1
|
||||
# d4 = t1[0] + t1[1] + t1[2] + t1[3]
|
||||
vpermq $0xee,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vpsrldq $8,t1,t2
|
||||
vpaddq t2,t1,t1
|
||||
vmovq t1x,d4
|
||||
|
||||
# d1 += d0 >> 26
|
||||
mov d0,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d1
|
||||
# h0 = d0 & 0x3ffffff
|
||||
mov d0,%rbx
|
||||
and $0x3ffffff,%ebx
|
||||
|
||||
# d2 += d1 >> 26
|
||||
mov d1,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d2
|
||||
# h1 = d1 & 0x3ffffff
|
||||
mov d1,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h1
|
||||
|
||||
# d3 += d2 >> 26
|
||||
mov d2,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d3
|
||||
# h2 = d2 & 0x3ffffff
|
||||
mov d2,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h2
|
||||
|
||||
# d4 += d3 >> 26
|
||||
mov d3,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d4
|
||||
# h3 = d3 & 0x3ffffff
|
||||
mov d3,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h3
|
||||
|
||||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
mov %ebx,h0
|
||||
|
||||
add $0x40,m
|
||||
dec %rcx
|
||||
jnz .Ldoblock4
|
||||
|
||||
vzeroupper
|
||||
pop %r13
|
||||
pop %r12
|
||||
pop %rbx
|
||||
ret
|
||||
ENDPROC(poly1305_4block_avx2)
|
582
arch/x86/crypto/poly1305-sse2-x86_64.S
Normal file
582
arch/x86/crypto/poly1305-sse2-x86_64.S
Normal file
@ -0,0 +1,582 @@
|
||||
/*
|
||||
* Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions
|
||||
*
|
||||
* Copyright (C) 2015 Martin Willi
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.data
|
||||
.align 16
|
||||
|
||||
ANMASK: .octa 0x0000000003ffffff0000000003ffffff
|
||||
ORMASK: .octa 0x00000000010000000000000001000000
|
||||
|
||||
.text
|
||||
|
||||
#define h0 0x00(%rdi)
|
||||
#define h1 0x04(%rdi)
|
||||
#define h2 0x08(%rdi)
|
||||
#define h3 0x0c(%rdi)
|
||||
#define h4 0x10(%rdi)
|
||||
#define r0 0x00(%rdx)
|
||||
#define r1 0x04(%rdx)
|
||||
#define r2 0x08(%rdx)
|
||||
#define r3 0x0c(%rdx)
|
||||
#define r4 0x10(%rdx)
|
||||
#define s1 0x00(%rsp)
|
||||
#define s2 0x04(%rsp)
|
||||
#define s3 0x08(%rsp)
|
||||
#define s4 0x0c(%rsp)
|
||||
#define m %rsi
|
||||
#define h01 %xmm0
|
||||
#define h23 %xmm1
|
||||
#define h44 %xmm2
|
||||
#define t1 %xmm3
|
||||
#define t2 %xmm4
|
||||
#define t3 %xmm5
|
||||
#define t4 %xmm6
|
||||
#define mask %xmm7
|
||||
#define d0 %r8
|
||||
#define d1 %r9
|
||||
#define d2 %r10
|
||||
#define d3 %r11
|
||||
#define d4 %r12
|
||||
|
||||
ENTRY(poly1305_block_sse2)
|
||||
# %rdi: Accumulator h[5]
|
||||
# %rsi: 16 byte input block m
|
||||
# %rdx: Poly1305 key r[5]
|
||||
# %rcx: Block count
|
||||
|
||||
# This single block variant tries to improve performance by doing two
|
||||
# multiplications in parallel using SSE instructions. There is quite
|
||||
# some quardword packing involved, hence the speedup is marginal.
|
||||
|
||||
push %rbx
|
||||
push %r12
|
||||
sub $0x10,%rsp
|
||||
|
||||
# s1..s4 = r1..r4 * 5
|
||||
mov r1,%eax
|
||||
lea (%eax,%eax,4),%eax
|
||||
mov %eax,s1
|
||||
mov r2,%eax
|
||||
lea (%eax,%eax,4),%eax
|
||||
mov %eax,s2
|
||||
mov r3,%eax
|
||||
lea (%eax,%eax,4),%eax
|
||||
mov %eax,s3
|
||||
mov r4,%eax
|
||||
lea (%eax,%eax,4),%eax
|
||||
mov %eax,s4
|
||||
|
||||
movdqa ANMASK(%rip),mask
|
||||
|
||||
.Ldoblock:
|
||||
# h01 = [0, h1, 0, h0]
|
||||
# h23 = [0, h3, 0, h2]
|
||||
# h44 = [0, h4, 0, h4]
|
||||
movd h0,h01
|
||||
movd h1,t1
|
||||
movd h2,h23
|
||||
movd h3,t2
|
||||
movd h4,h44
|
||||
punpcklqdq t1,h01
|
||||
punpcklqdq t2,h23
|
||||
punpcklqdq h44,h44
|
||||
|
||||
# h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ]
|
||||
movd 0x00(m),t1
|
||||
movd 0x03(m),t2
|
||||
psrld $2,t2
|
||||
punpcklqdq t2,t1
|
||||
pand mask,t1
|
||||
paddd t1,h01
|
||||
# h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ]
|
||||
movd 0x06(m),t1
|
||||
movd 0x09(m),t2
|
||||
psrld $4,t1
|
||||
psrld $6,t2
|
||||
punpcklqdq t2,t1
|
||||
pand mask,t1
|
||||
paddd t1,h23
|
||||
# h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ]
|
||||
mov 0x0c(m),%eax
|
||||
shr $8,%eax
|
||||
or $0x01000000,%eax
|
||||
movd %eax,t1
|
||||
pshufd $0xc4,t1,t1
|
||||
paddd t1,h44
|
||||
|
||||
# t1[0] = h0 * r0 + h2 * s3
|
||||
# t1[1] = h1 * s4 + h3 * s2
|
||||
movd r0,t1
|
||||
movd s4,t2
|
||||
punpcklqdq t2,t1
|
||||
pmuludq h01,t1
|
||||
movd s3,t2
|
||||
movd s2,t3
|
||||
punpcklqdq t3,t2
|
||||
pmuludq h23,t2
|
||||
paddq t2,t1
|
||||
# t2[0] = h0 * r1 + h2 * s4
|
||||
# t2[1] = h1 * r0 + h3 * s3
|
||||
movd r1,t2
|
||||
movd r0,t3
|
||||
punpcklqdq t3,t2
|
||||
pmuludq h01,t2
|
||||
movd s4,t3
|
||||
movd s3,t4
|
||||
punpcklqdq t4,t3
|
||||
pmuludq h23,t3
|
||||
paddq t3,t2
|
||||
# t3[0] = h4 * s1
|
||||
# t3[1] = h4 * s2
|
||||
movd s1,t3
|
||||
movd s2,t4
|
||||
punpcklqdq t4,t3
|
||||
pmuludq h44,t3
|
||||
# d0 = t1[0] + t1[1] + t3[0]
|
||||
# d1 = t2[0] + t2[1] + t3[1]
|
||||
movdqa t1,t4
|
||||
punpcklqdq t2,t4
|
||||
punpckhqdq t2,t1
|
||||
paddq t4,t1
|
||||
paddq t3,t1
|
||||
movq t1,d0
|
||||
psrldq $8,t1
|
||||
movq t1,d1
|
||||
|
||||
# t1[0] = h0 * r2 + h2 * r0
|
||||
# t1[1] = h1 * r1 + h3 * s4
|
||||
movd r2,t1
|
||||
movd r1,t2
|
||||
punpcklqdq t2,t1
|
||||
pmuludq h01,t1
|
||||
movd r0,t2
|
||||
movd s4,t3
|
||||
punpcklqdq t3,t2
|
||||
pmuludq h23,t2
|
||||
paddq t2,t1
|
||||
# t2[0] = h0 * r3 + h2 * r1
|
||||
# t2[1] = h1 * r2 + h3 * r0
|
||||
movd r3,t2
|
||||
movd r2,t3
|
||||
punpcklqdq t3,t2
|
||||
pmuludq h01,t2
|
||||
movd r1,t3
|
||||
movd r0,t4
|
||||
punpcklqdq t4,t3
|
||||
pmuludq h23,t3
|
||||
paddq t3,t2
|
||||
# t3[0] = h4 * s3
|
||||
# t3[1] = h4 * s4
|
||||
movd s3,t3
|
||||
movd s4,t4
|
||||
punpcklqdq t4,t3
|
||||
pmuludq h44,t3
|
||||
# d2 = t1[0] + t1[1] + t3[0]
|
||||
# d3 = t2[0] + t2[1] + t3[1]
|
||||
movdqa t1,t4
|
||||
punpcklqdq t2,t4
|
||||
punpckhqdq t2,t1
|
||||
paddq t4,t1
|
||||
paddq t3,t1
|
||||
movq t1,d2
|
||||
psrldq $8,t1
|
||||
movq t1,d3
|
||||
|
||||
# t1[0] = h0 * r4 + h2 * r2
|
||||
# t1[1] = h1 * r3 + h3 * r1
|
||||
movd r4,t1
|
||||
movd r3,t2
|
||||
punpcklqdq t2,t1
|
||||
pmuludq h01,t1
|
||||
movd r2,t2
|
||||
movd r1,t3
|
||||
punpcklqdq t3,t2
|
||||
pmuludq h23,t2
|
||||
paddq t2,t1
|
||||
# t3[0] = h4 * r0
|
||||
movd r0,t3
|
||||
pmuludq h44,t3
|
||||
# d4 = t1[0] + t1[1] + t3[0]
|
||||
movdqa t1,t4
|
||||
psrldq $8,t4
|
||||
paddq t4,t1
|
||||
paddq t3,t1
|
||||
movq t1,d4
|
||||
|
||||
# d1 += d0 >> 26
|
||||
mov d0,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d1
|
||||
# h0 = d0 & 0x3ffffff
|
||||
mov d0,%rbx
|
||||
and $0x3ffffff,%ebx
|
||||
|
||||
# d2 += d1 >> 26
|
||||
mov d1,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d2
|
||||
# h1 = d1 & 0x3ffffff
|
||||
mov d1,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h1
|
||||
|
||||
# d3 += d2 >> 26
|
||||
mov d2,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d3
|
||||
# h2 = d2 & 0x3ffffff
|
||||
mov d2,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h2
|
||||
|
||||
# d4 += d3 >> 26
|
||||
mov d3,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d4
|
||||
# h3 = d3 & 0x3ffffff
|
||||
mov d3,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h3
|
||||
|
||||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
mov %ebx,h0
|
||||
|
||||
add $0x10,m
|
||||
dec %rcx
|
||||
jnz .Ldoblock
|
||||
|
||||
add $0x10,%rsp
|
||||
pop %r12
|
||||
pop %rbx
|
||||
ret
|
||||
ENDPROC(poly1305_block_sse2)
|
||||
|
||||
|
||||
#define u0 0x00(%r8)
|
||||
#define u1 0x04(%r8)
|
||||
#define u2 0x08(%r8)
|
||||
#define u3 0x0c(%r8)
|
||||
#define u4 0x10(%r8)
|
||||
#define hc0 %xmm0
|
||||
#define hc1 %xmm1
|
||||
#define hc2 %xmm2
|
||||
#define hc3 %xmm5
|
||||
#define hc4 %xmm6
|
||||
#define ru0 %xmm7
|
||||
#define ru1 %xmm8
|
||||
#define ru2 %xmm9
|
||||
#define ru3 %xmm10
|
||||
#define ru4 %xmm11
|
||||
#define sv1 %xmm12
|
||||
#define sv2 %xmm13
|
||||
#define sv3 %xmm14
|
||||
#define sv4 %xmm15
|
||||
#undef d0
|
||||
#define d0 %r13
|
||||
|
||||
ENTRY(poly1305_2block_sse2)
|
||||
# %rdi: Accumulator h[5]
|
||||
# %rsi: 16 byte input block m
|
||||
# %rdx: Poly1305 key r[5]
|
||||
# %rcx: Doubleblock count
|
||||
# %r8: Poly1305 derived key r^2 u[5]
|
||||
|
||||
# This two-block variant further improves performance by using loop
|
||||
# unrolled block processing. This is more straight forward and does
|
||||
# less byte shuffling, but requires a second Poly1305 key r^2:
|
||||
# h = (h + m) * r => h = (h + m1) * r^2 + m2 * r
|
||||
|
||||
push %rbx
|
||||
push %r12
|
||||
push %r13
|
||||
|
||||
# combine r0,u0
|
||||
movd u0,ru0
|
||||
movd r0,t1
|
||||
punpcklqdq t1,ru0
|
||||
|
||||
# combine r1,u1 and s1=r1*5,v1=u1*5
|
||||
movd u1,ru1
|
||||
movd r1,t1
|
||||
punpcklqdq t1,ru1
|
||||
movdqa ru1,sv1
|
||||
pslld $2,sv1
|
||||
paddd ru1,sv1
|
||||
|
||||
# combine r2,u2 and s2=r2*5,v2=u2*5
|
||||
movd u2,ru2
|
||||
movd r2,t1
|
||||
punpcklqdq t1,ru2
|
||||
movdqa ru2,sv2
|
||||
pslld $2,sv2
|
||||
paddd ru2,sv2
|
||||
|
||||
# combine r3,u3 and s3=r3*5,v3=u3*5
|
||||
movd u3,ru3
|
||||
movd r3,t1
|
||||
punpcklqdq t1,ru3
|
||||
movdqa ru3,sv3
|
||||
pslld $2,sv3
|
||||
paddd ru3,sv3
|
||||
|
||||
# combine r4,u4 and s4=r4*5,v4=u4*5
|
||||
movd u4,ru4
|
||||
movd r4,t1
|
||||
punpcklqdq t1,ru4
|
||||
movdqa ru4,sv4
|
||||
pslld $2,sv4
|
||||
paddd ru4,sv4
|
||||
|
||||
.Ldoblock2:
|
||||
# hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ]
|
||||
movd 0x00(m),hc0
|
||||
movd 0x10(m),t1
|
||||
punpcklqdq t1,hc0
|
||||
pand ANMASK(%rip),hc0
|
||||
movd h0,t1
|
||||
paddd t1,hc0
|
||||
# hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ]
|
||||
movd 0x03(m),hc1
|
||||
movd 0x13(m),t1
|
||||
punpcklqdq t1,hc1
|
||||
psrld $2,hc1
|
||||
pand ANMASK(%rip),hc1
|
||||
movd h1,t1
|
||||
paddd t1,hc1
|
||||
# hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ]
|
||||
movd 0x06(m),hc2
|
||||
movd 0x16(m),t1
|
||||
punpcklqdq t1,hc2
|
||||
psrld $4,hc2
|
||||
pand ANMASK(%rip),hc2
|
||||
movd h2,t1
|
||||
paddd t1,hc2
|
||||
# hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ]
|
||||
movd 0x09(m),hc3
|
||||
movd 0x19(m),t1
|
||||
punpcklqdq t1,hc3
|
||||
psrld $6,hc3
|
||||
pand ANMASK(%rip),hc3
|
||||
movd h3,t1
|
||||
paddd t1,hc3
|
||||
# hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ]
|
||||
movd 0x0c(m),hc4
|
||||
movd 0x1c(m),t1
|
||||
punpcklqdq t1,hc4
|
||||
psrld $8,hc4
|
||||
por ORMASK(%rip),hc4
|
||||
movd h4,t1
|
||||
paddd t1,hc4
|
||||
|
||||
# t1 = [ hc0[1] * r0, hc0[0] * u0 ]
|
||||
movdqa ru0,t1
|
||||
pmuludq hc0,t1
|
||||
# t1 += [ hc1[1] * s4, hc1[0] * v4 ]
|
||||
movdqa sv4,t2
|
||||
pmuludq hc1,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc2[1] * s3, hc2[0] * v3 ]
|
||||
movdqa sv3,t2
|
||||
pmuludq hc2,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc3[1] * s2, hc3[0] * v2 ]
|
||||
movdqa sv2,t2
|
||||
pmuludq hc3,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc4[1] * s1, hc4[0] * v1 ]
|
||||
movdqa sv1,t2
|
||||
pmuludq hc4,t2
|
||||
paddq t2,t1
|
||||
# d0 = t1[0] + t1[1]
|
||||
movdqa t1,t2
|
||||
psrldq $8,t2
|
||||
paddq t2,t1
|
||||
movq t1,d0
|
||||
|
||||
# t1 = [ hc0[1] * r1, hc0[0] * u1 ]
|
||||
movdqa ru1,t1
|
||||
pmuludq hc0,t1
|
||||
# t1 += [ hc1[1] * r0, hc1[0] * u0 ]
|
||||
movdqa ru0,t2
|
||||
pmuludq hc1,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc2[1] * s4, hc2[0] * v4 ]
|
||||
movdqa sv4,t2
|
||||
pmuludq hc2,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc3[1] * s3, hc3[0] * v3 ]
|
||||
movdqa sv3,t2
|
||||
pmuludq hc3,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc4[1] * s2, hc4[0] * v2 ]
|
||||
movdqa sv2,t2
|
||||
pmuludq hc4,t2
|
||||
paddq t2,t1
|
||||
# d1 = t1[0] + t1[1]
|
||||
movdqa t1,t2
|
||||
psrldq $8,t2
|
||||
paddq t2,t1
|
||||
movq t1,d1
|
||||
|
||||
# t1 = [ hc0[1] * r2, hc0[0] * u2 ]
|
||||
movdqa ru2,t1
|
||||
pmuludq hc0,t1
|
||||
# t1 += [ hc1[1] * r1, hc1[0] * u1 ]
|
||||
movdqa ru1,t2
|
||||
pmuludq hc1,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc2[1] * r0, hc2[0] * u0 ]
|
||||
movdqa ru0,t2
|
||||
pmuludq hc2,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc3[1] * s4, hc3[0] * v4 ]
|
||||
movdqa sv4,t2
|
||||
pmuludq hc3,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc4[1] * s3, hc4[0] * v3 ]
|
||||
movdqa sv3,t2
|
||||
pmuludq hc4,t2
|
||||
paddq t2,t1
|
||||
# d2 = t1[0] + t1[1]
|
||||
movdqa t1,t2
|
||||
psrldq $8,t2
|
||||
paddq t2,t1
|
||||
movq t1,d2
|
||||
|
||||
# t1 = [ hc0[1] * r3, hc0[0] * u3 ]
|
||||
movdqa ru3,t1
|
||||
pmuludq hc0,t1
|
||||
# t1 += [ hc1[1] * r2, hc1[0] * u2 ]
|
||||
movdqa ru2,t2
|
||||
pmuludq hc1,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc2[1] * r1, hc2[0] * u1 ]
|
||||
movdqa ru1,t2
|
||||
pmuludq hc2,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc3[1] * r0, hc3[0] * u0 ]
|
||||
movdqa ru0,t2
|
||||
pmuludq hc3,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc4[1] * s4, hc4[0] * v4 ]
|
||||
movdqa sv4,t2
|
||||
pmuludq hc4,t2
|
||||
paddq t2,t1
|
||||
# d3 = t1[0] + t1[1]
|
||||
movdqa t1,t2
|
||||
psrldq $8,t2
|
||||
paddq t2,t1
|
||||
movq t1,d3
|
||||
|
||||
# t1 = [ hc0[1] * r4, hc0[0] * u4 ]
|
||||
movdqa ru4,t1
|
||||
pmuludq hc0,t1
|
||||
# t1 += [ hc1[1] * r3, hc1[0] * u3 ]
|
||||
movdqa ru3,t2
|
||||
pmuludq hc1,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc2[1] * r2, hc2[0] * u2 ]
|
||||
movdqa ru2,t2
|
||||
pmuludq hc2,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc3[1] * r1, hc3[0] * u1 ]
|
||||
movdqa ru1,t2
|
||||
pmuludq hc3,t2
|
||||
paddq t2,t1
|
||||
# t1 += [ hc4[1] * r0, hc4[0] * u0 ]
|
||||
movdqa ru0,t2
|
||||
pmuludq hc4,t2
|
||||
paddq t2,t1
|
||||
# d4 = t1[0] + t1[1]
|
||||
movdqa t1,t2
|
||||
psrldq $8,t2
|
||||
paddq t2,t1
|
||||
movq t1,d4
|
||||
|
||||
# d1 += d0 >> 26
|
||||
mov d0,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d1
|
||||
# h0 = d0 & 0x3ffffff
|
||||
mov d0,%rbx
|
||||
and $0x3ffffff,%ebx
|
||||
|
||||
# d2 += d1 >> 26
|
||||
mov d1,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d2
|
||||
# h1 = d1 & 0x3ffffff
|
||||
mov d1,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h1
|
||||
|
||||
# d3 += d2 >> 26
|
||||
mov d2,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d3
|
||||
# h2 = d2 & 0x3ffffff
|
||||
mov d2,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h2
|
||||
|
||||
# d4 += d3 >> 26
|
||||
mov d3,%rax
|
||||
shr $26,%rax
|
||||
add %rax,d4
|
||||
# h3 = d3 & 0x3ffffff
|
||||
mov d3,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h3
|
||||
|
||||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
mov %ebx,h0
|
||||
|
||||
add $0x20,m
|
||||
dec %rcx
|
||||
jnz .Ldoblock2
|
||||
|
||||
pop %r13
|
||||
pop %r12
|
||||
pop %rbx
|
||||
ret
|
||||
ENDPROC(poly1305_2block_sse2)
|
207
arch/x86/crypto/poly1305_glue.c
Normal file
207
arch/x86/crypto/poly1305_glue.c
Normal file
@ -0,0 +1,207 @@
|
||||
/*
|
||||
* Poly1305 authenticator algorithm, RFC7539, SIMD glue code
|
||||
*
|
||||
* Copyright (C) 2015 Martin Willi
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/poly1305.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
struct poly1305_simd_desc_ctx {
|
||||
struct poly1305_desc_ctx base;
|
||||
/* derived key u set? */
|
||||
bool uset;
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
/* derived keys r^3, r^4 set? */
|
||||
bool wset;
|
||||
#endif
|
||||
/* derived Poly1305 key r^2 */
|
||||
u32 u[5];
|
||||
/* ... silently appended r^3 and r^4 when using AVX2 */
|
||||
};
|
||||
|
||||
asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src,
|
||||
const u32 *r, unsigned int blocks);
|
||||
asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r,
|
||||
unsigned int blocks, const u32 *u);
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r,
|
||||
unsigned int blocks, const u32 *u);
|
||||
static bool poly1305_use_avx2;
|
||||
#endif
|
||||
|
||||
static int poly1305_simd_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc);
|
||||
|
||||
sctx->uset = false;
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
sctx->wset = false;
|
||||
#endif
|
||||
|
||||
return crypto_poly1305_init(desc);
|
||||
}
|
||||
|
||||
static void poly1305_simd_mult(u32 *a, const u32 *b)
|
||||
{
|
||||
u8 m[POLY1305_BLOCK_SIZE];
|
||||
|
||||
memset(m, 0, sizeof(m));
|
||||
/* The poly1305 block function adds a hi-bit to the accumulator which
|
||||
* we don't need for key multiplication; compensate for it. */
|
||||
a[4] -= 1 << 24;
|
||||
poly1305_block_sse2(a, m, b, 1);
|
||||
}
|
||||
|
||||
static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct poly1305_simd_desc_ctx *sctx;
|
||||
unsigned int blocks, datalen;
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base));
|
||||
sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base);
|
||||
|
||||
if (unlikely(!dctx->sset)) {
|
||||
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
|
||||
src += srclen - datalen;
|
||||
srclen = datalen;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) {
|
||||
if (unlikely(!sctx->wset)) {
|
||||
if (!sctx->uset) {
|
||||
memcpy(sctx->u, dctx->r, sizeof(sctx->u));
|
||||
poly1305_simd_mult(sctx->u, dctx->r);
|
||||
sctx->uset = true;
|
||||
}
|
||||
memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
|
||||
poly1305_simd_mult(sctx->u + 5, dctx->r);
|
||||
memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
|
||||
poly1305_simd_mult(sctx->u + 10, dctx->r);
|
||||
sctx->wset = true;
|
||||
}
|
||||
blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
|
||||
poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u);
|
||||
src += POLY1305_BLOCK_SIZE * 4 * blocks;
|
||||
srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
|
||||
}
|
||||
#endif
|
||||
if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
|
||||
if (unlikely(!sctx->uset)) {
|
||||
memcpy(sctx->u, dctx->r, sizeof(sctx->u));
|
||||
poly1305_simd_mult(sctx->u, dctx->r);
|
||||
sctx->uset = true;
|
||||
}
|
||||
blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
|
||||
poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u);
|
||||
src += POLY1305_BLOCK_SIZE * 2 * blocks;
|
||||
srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
|
||||
}
|
||||
if (srclen >= POLY1305_BLOCK_SIZE) {
|
||||
poly1305_block_sse2(dctx->h, src, dctx->r, 1);
|
||||
srclen -= POLY1305_BLOCK_SIZE;
|
||||
}
|
||||
return srclen;
|
||||
}
|
||||
|
||||
static int poly1305_simd_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
unsigned int bytes;
|
||||
|
||||
/* kernel_fpu_begin/end is costly, use fallback for small updates */
|
||||
if (srclen <= 288 || !may_use_simd())
|
||||
return crypto_poly1305_update(desc, src, srclen);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
if (unlikely(dctx->buflen)) {
|
||||
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
|
||||
memcpy(dctx->buf + dctx->buflen, src, bytes);
|
||||
src += bytes;
|
||||
srclen -= bytes;
|
||||
dctx->buflen += bytes;
|
||||
|
||||
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
|
||||
poly1305_simd_blocks(dctx, dctx->buf,
|
||||
POLY1305_BLOCK_SIZE);
|
||||
dctx->buflen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
|
||||
bytes = poly1305_simd_blocks(dctx, src, srclen);
|
||||
src += srclen - bytes;
|
||||
srclen = bytes;
|
||||
}
|
||||
|
||||
kernel_fpu_end();
|
||||
|
||||
if (unlikely(srclen)) {
|
||||
dctx->buflen = srclen;
|
||||
memcpy(dctx->buf, src, srclen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = POLY1305_DIGEST_SIZE,
|
||||
.init = poly1305_simd_init,
|
||||
.update = poly1305_simd_update,
|
||||
.final = crypto_poly1305_final,
|
||||
.setkey = crypto_poly1305_setkey,
|
||||
.descsize = sizeof(struct poly1305_simd_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "poly1305",
|
||||
.cra_driver_name = "poly1305-simd",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_alignmask = sizeof(u32) - 1,
|
||||
.cra_blocksize = POLY1305_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init poly1305_simd_mod_init(void)
|
||||
{
|
||||
if (!cpu_has_xmm2)
|
||||
return -ENODEV;
|
||||
|
||||
#ifdef CONFIG_AS_AVX2
|
||||
poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
|
||||
cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
|
||||
alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
|
||||
if (poly1305_use_avx2)
|
||||
alg.descsize += 10 * sizeof(u32);
|
||||
#endif
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit poly1305_simd_mod_exit(void)
|
||||
{
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
module_init(poly1305_simd_mod_init);
|
||||
module_exit(poly1305_simd_mod_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
|
||||
MODULE_DESCRIPTION("Poly1305 authenticator");
|
||||
MODULE_ALIAS_CRYPTO("poly1305");
|
||||
MODULE_ALIAS_CRYPTO("poly1305-simd");
|
@ -48,6 +48,8 @@ config CRYPTO_AEAD
|
||||
config CRYPTO_AEAD2
|
||||
tristate
|
||||
select CRYPTO_ALGAPI2
|
||||
select CRYPTO_NULL2
|
||||
select CRYPTO_RNG2
|
||||
|
||||
config CRYPTO_BLKCIPHER
|
||||
tristate
|
||||
@ -150,12 +152,16 @@ config CRYPTO_GF128MUL
|
||||
|
||||
config CRYPTO_NULL
|
||||
tristate "Null algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_NULL2
|
||||
help
|
||||
These are 'Null' algorithms, used by IPsec, which do nothing.
|
||||
|
||||
config CRYPTO_NULL2
|
||||
tristate
|
||||
select CRYPTO_ALGAPI2
|
||||
select CRYPTO_BLKCIPHER2
|
||||
select CRYPTO_HASH2
|
||||
|
||||
config CRYPTO_PCRYPT
|
||||
tristate "Parallel crypto engine"
|
||||
depends on SMP
|
||||
@ -200,6 +206,7 @@ config CRYPTO_AUTHENC
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_NULL
|
||||
help
|
||||
Authenc: Combined mode wrapper for IPsec.
|
||||
This is required for IPSec.
|
||||
@ -470,6 +477,18 @@ config CRYPTO_POLY1305
|
||||
It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
|
||||
in IETF protocols. This is the portable C implementation of Poly1305.
|
||||
|
||||
config CRYPTO_POLY1305_X86_64
|
||||
tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_POLY1305
|
||||
help
|
||||
Poly1305 authenticator algorithm, RFC7539.
|
||||
|
||||
Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
|
||||
It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
|
||||
in IETF protocols. This is the x86_64 assembler implementation using SIMD
|
||||
instructions.
|
||||
|
||||
config CRYPTO_MD4
|
||||
tristate "MD4 digest algorithm"
|
||||
select CRYPTO_HASH
|
||||
@ -1213,6 +1232,21 @@ config CRYPTO_CHACHA20
|
||||
See also:
|
||||
<http://cr.yp.to/chacha/chacha-20080128.pdf>
|
||||
|
||||
config CRYPTO_CHACHA20_X86_64
|
||||
tristate "ChaCha20 cipher algorithm (x86_64/SSSE3/AVX2)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_CHACHA20
|
||||
help
|
||||
ChaCha20 cipher algorithm, RFC7539.
|
||||
|
||||
ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
|
||||
Bernstein and further specified in RFC7539 for use in IETF protocols.
|
||||
This is the x86_64 assembler implementation using SIMD instructions.
|
||||
|
||||
See also:
|
||||
<http://cr.yp.to/chacha/chacha-20080128.pdf>
|
||||
|
||||
config CRYPTO_SEED
|
||||
tristate "SEED cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
|
@ -17,6 +17,7 @@ obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
|
||||
|
||||
crypto_blkcipher-y := ablkcipher.o
|
||||
crypto_blkcipher-y += blkcipher.o
|
||||
crypto_blkcipher-y += skcipher.o
|
||||
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
|
||||
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
|
||||
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
|
||||
@ -46,7 +47,7 @@ obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
|
||||
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
|
||||
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
|
||||
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
|
||||
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
|
||||
obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
|
||||
obj-$(CONFIG_CRYPTO_MD4) += md4.o
|
||||
obj-$(CONFIG_CRYPTO_MD5) += md5.o
|
||||
obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
|
||||
|
639
crypto/aead.c
639
crypto/aead.c
@ -3,7 +3,7 @@
|
||||
*
|
||||
* This file provides API support for AEAD algorithms.
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
@ -13,13 +13,14 @@
|
||||
*/
|
||||
|
||||
#include <crypto/internal/geniv.h>
|
||||
#include <crypto/internal/rng.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cryptouser.h>
|
||||
@ -27,17 +28,6 @@
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
struct compat_request_ctx {
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist dst[2];
|
||||
struct scatterlist ivbuf[2];
|
||||
struct scatterlist *ivsg;
|
||||
struct aead_givcrypt_request subreq;
|
||||
};
|
||||
|
||||
static int aead_null_givencrypt(struct aead_givcrypt_request *req);
|
||||
static int aead_null_givdecrypt(struct aead_givcrypt_request *req);
|
||||
|
||||
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@ -53,7 +43,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
|
||||
|
||||
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
||||
memcpy(alignbuffer, key, keylen);
|
||||
ret = tfm->setkey(tfm, alignbuffer, keylen);
|
||||
ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
|
||||
memset(alignbuffer, 0, keylen);
|
||||
kfree(buffer);
|
||||
return ret;
|
||||
@ -64,12 +54,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
|
||||
{
|
||||
unsigned long alignmask = crypto_aead_alignmask(tfm);
|
||||
|
||||
tfm = tfm->child;
|
||||
|
||||
if ((unsigned long)key & alignmask)
|
||||
return setkey_unaligned(tfm, key, keylen);
|
||||
|
||||
return tfm->setkey(tfm, key, keylen);
|
||||
return crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_aead_setkey);
|
||||
|
||||
@ -80,100 +68,17 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
||||
if (authsize > crypto_aead_maxauthsize(tfm))
|
||||
return -EINVAL;
|
||||
|
||||
if (tfm->setauthsize) {
|
||||
err = tfm->setauthsize(tfm->child, authsize);
|
||||
if (crypto_aead_alg(tfm)->setauthsize) {
|
||||
err = crypto_aead_alg(tfm)->setauthsize(tfm, authsize);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
tfm->child->authsize = authsize;
|
||||
tfm->authsize = authsize;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
|
||||
|
||||
struct aead_old_request {
|
||||
struct scatterlist srcbuf[2];
|
||||
struct scatterlist dstbuf[2];
|
||||
struct aead_request subreq;
|
||||
};
|
||||
|
||||
unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
|
||||
{
|
||||
return tfm->reqsize + sizeof(struct aead_old_request);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_aead_reqsize);
|
||||
|
||||
static int old_crypt(struct aead_request *req,
|
||||
int (*crypt)(struct aead_request *req))
|
||||
{
|
||||
struct aead_old_request *nreq = aead_request_ctx(req);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct scatterlist *src, *dst;
|
||||
|
||||
if (req->old)
|
||||
return crypt(req);
|
||||
|
||||
src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen);
|
||||
dst = req->src == req->dst ?
|
||||
src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen);
|
||||
|
||||
aead_request_set_tfm(&nreq->subreq, aead);
|
||||
aead_request_set_callback(&nreq->subreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen,
|
||||
req->iv);
|
||||
aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen);
|
||||
|
||||
return crypt(&nreq->subreq);
|
||||
}
|
||||
|
||||
static int old_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct old_aead_alg *alg = crypto_old_aead_alg(aead);
|
||||
|
||||
return old_crypt(req, alg->encrypt);
|
||||
}
|
||||
|
||||
static int old_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct old_aead_alg *alg = crypto_old_aead_alg(aead);
|
||||
|
||||
return old_crypt(req, alg->decrypt);
|
||||
}
|
||||
|
||||
static int no_givcrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int crypto_old_aead_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct old_aead_alg *alg = &tfm->__crt_alg->cra_aead;
|
||||
struct crypto_aead *crt = __crypto_aead_cast(tfm);
|
||||
|
||||
if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
crt->setkey = alg->setkey;
|
||||
crt->setauthsize = alg->setauthsize;
|
||||
crt->encrypt = old_encrypt;
|
||||
crt->decrypt = old_decrypt;
|
||||
if (alg->ivsize) {
|
||||
crt->givencrypt = alg->givencrypt ?: no_givcrypt;
|
||||
crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
|
||||
} else {
|
||||
crt->givencrypt = aead_null_givencrypt;
|
||||
crt->givdecrypt = aead_null_givdecrypt;
|
||||
}
|
||||
crt->child = __crypto_aead_cast(tfm);
|
||||
crt->authsize = alg->maxauthsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_aead *aead = __crypto_aead_cast(tfm);
|
||||
@ -187,14 +92,6 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
|
||||
struct crypto_aead *aead = __crypto_aead_cast(tfm);
|
||||
struct aead_alg *alg = crypto_aead_alg(aead);
|
||||
|
||||
if (crypto_old_aead_alg(aead)->encrypt)
|
||||
return crypto_old_aead_init_tfm(tfm);
|
||||
|
||||
aead->setkey = alg->setkey;
|
||||
aead->setauthsize = alg->setauthsize;
|
||||
aead->encrypt = alg->encrypt;
|
||||
aead->decrypt = alg->decrypt;
|
||||
aead->child = __crypto_aead_cast(tfm);
|
||||
aead->authsize = alg->maxauthsize;
|
||||
|
||||
if (alg->exit)
|
||||
@ -206,64 +103,6 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_report_aead raead;
|
||||
struct old_aead_alg *aead = &alg->cra_aead;
|
||||
|
||||
strncpy(raead.type, "aead", sizeof(raead.type));
|
||||
strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
|
||||
|
||||
raead.blocksize = alg->cra_blocksize;
|
||||
raead.maxauthsize = aead->maxauthsize;
|
||||
raead.ivsize = aead->ivsize;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
|
||||
sizeof(struct crypto_report_aead), &raead))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
#else
|
||||
static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
__attribute__ ((unused));
|
||||
static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
{
|
||||
struct old_aead_alg *aead = &alg->cra_aead;
|
||||
|
||||
seq_printf(m, "type : aead\n");
|
||||
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
||||
"yes" : "no");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "ivsize : %u\n", aead->ivsize);
|
||||
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
|
||||
seq_printf(m, "geniv : %s\n", aead->geniv ?: "<built-in>");
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_aead_type = {
|
||||
.extsize = crypto_alg_extsize,
|
||||
.init_tfm = crypto_aead_init_tfm,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_old_aead_show,
|
||||
#endif
|
||||
.report = crypto_old_aead_report,
|
||||
.lookup = crypto_lookup_aead,
|
||||
.maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
|
||||
.maskset = CRYPTO_ALG_TYPE_MASK,
|
||||
.type = CRYPTO_ALG_TYPE_AEAD,
|
||||
.tfmsize = offsetof(struct crypto_aead, base),
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_aead_type);
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
@ -307,9 +146,22 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
seq_printf(m, "geniv : <none>\n");
|
||||
}
|
||||
|
||||
static const struct crypto_type crypto_new_aead_type = {
|
||||
static void crypto_aead_free_instance(struct crypto_instance *inst)
|
||||
{
|
||||
struct aead_instance *aead = aead_instance(inst);
|
||||
|
||||
if (!aead->free) {
|
||||
inst->tmpl->free(inst);
|
||||
return;
|
||||
}
|
||||
|
||||
aead->free(aead);
|
||||
}
|
||||
|
||||
static const struct crypto_type crypto_aead_type = {
|
||||
.extsize = crypto_alg_extsize,
|
||||
.init_tfm = crypto_aead_init_tfm,
|
||||
.free = crypto_aead_free_instance,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_aead_show,
|
||||
#endif
|
||||
@ -320,81 +172,6 @@ static const struct crypto_type crypto_new_aead_type = {
|
||||
.tfmsize = offsetof(struct crypto_aead, base),
|
||||
};
|
||||
|
||||
static int aead_null_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
return crypto_aead_encrypt(&req->areq);
|
||||
}
|
||||
|
||||
static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
return crypto_aead_decrypt(&req->areq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_report_aead raead;
|
||||
struct old_aead_alg *aead = &alg->cra_aead;
|
||||
|
||||
strncpy(raead.type, "nivaead", sizeof(raead.type));
|
||||
strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
|
||||
|
||||
raead.blocksize = alg->cra_blocksize;
|
||||
raead.maxauthsize = aead->maxauthsize;
|
||||
raead.ivsize = aead->ivsize;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
|
||||
sizeof(struct crypto_report_aead), &raead))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
#else
|
||||
static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
__attribute__ ((unused));
|
||||
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
{
|
||||
struct old_aead_alg *aead = &alg->cra_aead;
|
||||
|
||||
seq_printf(m, "type : nivaead\n");
|
||||
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
||||
"yes" : "no");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "ivsize : %u\n", aead->ivsize);
|
||||
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
|
||||
seq_printf(m, "geniv : %s\n", aead->geniv);
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_nivaead_type = {
|
||||
.extsize = crypto_alg_extsize,
|
||||
.init_tfm = crypto_aead_init_tfm,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_nivaead_show,
|
||||
#endif
|
||||
.report = crypto_nivaead_report,
|
||||
.maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
|
||||
.maskset = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV,
|
||||
.type = CRYPTO_ALG_TYPE_AEAD,
|
||||
.tfmsize = offsetof(struct crypto_aead, base),
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_nivaead_type);
|
||||
|
||||
static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
|
||||
const char *name, u32 type, u32 mask)
|
||||
{
|
||||
spawn->base.frontend = &crypto_nivaead_type;
|
||||
return crypto_grab_spawn(&spawn->base, name, type, mask);
|
||||
}
|
||||
|
||||
static int aead_geniv_setkey(struct crypto_aead *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
@ -411,169 +188,6 @@ static int aead_geniv_setauthsize(struct crypto_aead *tfm,
|
||||
return crypto_aead_setauthsize(ctx->child, authsize);
|
||||
}
|
||||
|
||||
static void compat_encrypt_complete2(struct aead_request *req, int err)
|
||||
{
|
||||
struct compat_request_ctx *rctx = aead_request_ctx(req);
|
||||
struct aead_givcrypt_request *subreq = &rctx->subreq;
|
||||
struct crypto_aead *geniv;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
geniv = crypto_aead_reqtfm(req);
|
||||
scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0,
|
||||
crypto_aead_ivsize(geniv), 1);
|
||||
|
||||
out:
|
||||
kzfree(subreq->giv);
|
||||
}
|
||||
|
||||
static void compat_encrypt_complete(struct crypto_async_request *base, int err)
|
||||
{
|
||||
struct aead_request *req = base->data;
|
||||
|
||||
compat_encrypt_complete2(req, err);
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int compat_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct compat_request_ctx *rctx = aead_request_ctx(req);
|
||||
struct aead_givcrypt_request *subreq = &rctx->subreq;
|
||||
unsigned int ivsize = crypto_aead_ivsize(geniv);
|
||||
struct scatterlist *src, *dst;
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
u8 *info;
|
||||
__be64 seq;
|
||||
int err;
|
||||
|
||||
if (req->cryptlen < ivsize)
|
||||
return -EINVAL;
|
||||
|
||||
compl = req->base.complete;
|
||||
data = req->base.data;
|
||||
|
||||
rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen);
|
||||
info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg);
|
||||
|
||||
if (!info) {
|
||||
info = kmalloc(ivsize, req->base.flags &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
|
||||
GFP_ATOMIC);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
compl = compat_encrypt_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq));
|
||||
|
||||
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
|
||||
dst = req->src == req->dst ?
|
||||
src : scatterwalk_ffwd(rctx->dst, rctx->ivsg, ivsize);
|
||||
|
||||
aead_givcrypt_set_tfm(subreq, ctx->child);
|
||||
aead_givcrypt_set_callback(subreq, req->base.flags,
|
||||
req->base.complete, req->base.data);
|
||||
aead_givcrypt_set_crypt(subreq, src, dst,
|
||||
req->cryptlen - ivsize, req->iv);
|
||||
aead_givcrypt_set_assoc(subreq, req->src, req->assoclen);
|
||||
aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq));
|
||||
|
||||
err = crypto_aead_givencrypt(subreq);
|
||||
if (unlikely(PageHighMem(sg_page(rctx->ivsg))))
|
||||
compat_encrypt_complete2(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int compat_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct compat_request_ctx *rctx = aead_request_ctx(req);
|
||||
struct aead_request *subreq = &rctx->subreq.areq;
|
||||
unsigned int ivsize = crypto_aead_ivsize(geniv);
|
||||
struct scatterlist *src, *dst;
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
|
||||
if (req->cryptlen < ivsize)
|
||||
return -EINVAL;
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->child);
|
||||
|
||||
compl = req->base.complete;
|
||||
data = req->base.data;
|
||||
|
||||
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
|
||||
dst = req->src == req->dst ?
|
||||
src : scatterwalk_ffwd(rctx->dst, req->dst,
|
||||
req->assoclen + ivsize);
|
||||
|
||||
aead_request_set_callback(subreq, req->base.flags, compl, data);
|
||||
aead_request_set_crypt(subreq, src, dst,
|
||||
req->cryptlen - ivsize, req->iv);
|
||||
aead_request_set_assoc(subreq, req->src, req->assoclen);
|
||||
|
||||
scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
|
||||
|
||||
return crypto_aead_decrypt(subreq);
|
||||
}
|
||||
|
||||
static int compat_encrypt_first(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
int err = 0;
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
if (geniv->encrypt != compat_encrypt_first)
|
||||
goto unlock;
|
||||
|
||||
geniv->encrypt = compat_encrypt;
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return compat_encrypt(req);
|
||||
}
|
||||
|
||||
static int aead_geniv_init_compat(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_aead *geniv = __crypto_aead_cast(tfm);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
int err;
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
crypto_aead_set_reqsize(geniv, sizeof(struct compat_request_ctx));
|
||||
|
||||
err = aead_geniv_init(tfm);
|
||||
|
||||
ctx->child = geniv->child;
|
||||
geniv->child = geniv;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void aead_geniv_exit_compat(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_aead *geniv = __crypto_aead_cast(tfm);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
|
||||
crypto_free_aead(ctx->child);
|
||||
}
|
||||
|
||||
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type, u32 mask)
|
||||
{
|
||||
@ -590,8 +204,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
|
||||
if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
|
||||
algt->mask)
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
@ -608,9 +221,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
mask |= crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
|
||||
err = (algt->mask & CRYPTO_ALG_GENIV) ?
|
||||
crypto_grab_nivaead(spawn, name, type, mask) :
|
||||
crypto_grab_aead(spawn, name, type, mask);
|
||||
err = crypto_grab_aead(spawn, name, type, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
@ -623,43 +234,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
if (ivsize < sizeof(u64))
|
||||
goto err_drop_alg;
|
||||
|
||||
/*
|
||||
* This is only true if we're constructing an algorithm with its
|
||||
* default IV generator. For the default generator we elide the
|
||||
* template name and double-check the IV generator.
|
||||
*/
|
||||
if (algt->mask & CRYPTO_ALG_GENIV) {
|
||||
if (!alg->base.cra_aead.encrypt)
|
||||
goto err_drop_alg;
|
||||
if (strcmp(tmpl->name, alg->base.cra_aead.geniv))
|
||||
goto err_drop_alg;
|
||||
|
||||
memcpy(inst->alg.base.cra_name, alg->base.cra_name,
|
||||
CRYPTO_MAX_ALG_NAME);
|
||||
memcpy(inst->alg.base.cra_driver_name,
|
||||
alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME);
|
||||
|
||||
inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_GENIV;
|
||||
inst->alg.base.cra_flags |= alg->base.cra_flags &
|
||||
CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
inst->alg.base.cra_type = &crypto_aead_type;
|
||||
|
||||
inst->alg.base.cra_aead.ivsize = ivsize;
|
||||
inst->alg.base.cra_aead.maxauthsize = maxauthsize;
|
||||
|
||||
inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey;
|
||||
inst->alg.base.cra_aead.setauthsize =
|
||||
alg->base.cra_aead.setauthsize;
|
||||
inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt;
|
||||
inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->base.cra_name) >=
|
||||
@ -682,12 +256,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
inst->alg.ivsize = ivsize;
|
||||
inst->alg.maxauthsize = maxauthsize;
|
||||
|
||||
inst->alg.encrypt = compat_encrypt_first;
|
||||
inst->alg.decrypt = compat_decrypt;
|
||||
|
||||
inst->alg.base.cra_init = aead_geniv_init_compat;
|
||||
inst->alg.base.cra_exit = aead_geniv_exit_compat;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
@ -707,147 +275,58 @@ void aead_geniv_free(struct aead_instance *inst)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_free);
|
||||
|
||||
int aead_geniv_init(struct crypto_tfm *tfm)
|
||||
int aead_init_geniv(struct crypto_aead *aead)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct aead_instance *inst = aead_alg_instance(aead);
|
||||
struct crypto_aead *child;
|
||||
struct crypto_aead *aead;
|
||||
|
||||
aead = __crypto_aead_cast(tfm);
|
||||
|
||||
child = crypto_spawn_aead(crypto_instance_ctx(inst));
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
|
||||
aead->child = child;
|
||||
aead->reqsize += crypto_aead_reqsize(child);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_init);
|
||||
|
||||
void aead_geniv_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
crypto_free_aead(__crypto_aead_cast(tfm)->child);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_exit);
|
||||
|
||||
static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
|
||||
{
|
||||
struct rtattr *tb[3];
|
||||
struct {
|
||||
struct rtattr attr;
|
||||
struct crypto_attr_type data;
|
||||
} ptype;
|
||||
struct {
|
||||
struct rtattr attr;
|
||||
struct crypto_attr_alg data;
|
||||
} palg;
|
||||
struct crypto_template *tmpl;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *larval;
|
||||
const char *geniv;
|
||||
int err;
|
||||
|
||||
larval = crypto_larval_lookup(alg->cra_driver_name,
|
||||
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
|
||||
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
err = PTR_ERR(larval);
|
||||
if (IS_ERR(larval))
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
err = crypto_get_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = -EAGAIN;
|
||||
if (!crypto_is_larval(larval))
|
||||
goto drop_larval;
|
||||
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
|
||||
crypto_aead_ivsize(aead));
|
||||
crypto_put_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ptype.attr.rta_len = sizeof(ptype);
|
||||
ptype.attr.rta_type = CRYPTOA_TYPE;
|
||||
ptype.data.type = type | CRYPTO_ALG_GENIV;
|
||||
/* GENIV tells the template that we're making a default geniv. */
|
||||
ptype.data.mask = mask | CRYPTO_ALG_GENIV;
|
||||
tb[0] = &ptype.attr;
|
||||
ctx->null = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(ctx->null);
|
||||
if (IS_ERR(ctx->null))
|
||||
goto out;
|
||||
|
||||
palg.attr.rta_len = sizeof(palg);
|
||||
palg.attr.rta_type = CRYPTOA_ALG;
|
||||
/* Must use the exact name to locate ourselves. */
|
||||
memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
|
||||
tb[1] = &palg.attr;
|
||||
child = crypto_spawn_aead(aead_instance_ctx(inst));
|
||||
err = PTR_ERR(child);
|
||||
if (IS_ERR(child))
|
||||
goto drop_null;
|
||||
|
||||
tb[2] = NULL;
|
||||
ctx->child = child;
|
||||
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
|
||||
sizeof(struct aead_request));
|
||||
|
||||
geniv = alg->cra_aead.geniv;
|
||||
err = 0;
|
||||
|
||||
tmpl = crypto_lookup_template(geniv);
|
||||
err = -ENOENT;
|
||||
if (!tmpl)
|
||||
goto kill_larval;
|
||||
|
||||
if (tmpl->create) {
|
||||
err = tmpl->create(tmpl, tb);
|
||||
if (err)
|
||||
goto put_tmpl;
|
||||
goto ok;
|
||||
}
|
||||
|
||||
inst = tmpl->alloc(tb);
|
||||
err = PTR_ERR(inst);
|
||||
if (IS_ERR(inst))
|
||||
goto put_tmpl;
|
||||
|
||||
err = crypto_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
tmpl->free(inst);
|
||||
goto put_tmpl;
|
||||
}
|
||||
|
||||
ok:
|
||||
/* Redo the lookup to use the instance we just registered. */
|
||||
err = -EAGAIN;
|
||||
|
||||
put_tmpl:
|
||||
crypto_tmpl_put(tmpl);
|
||||
kill_larval:
|
||||
crypto_larval_kill(larval);
|
||||
drop_larval:
|
||||
crypto_mod_put(larval);
|
||||
out:
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
|
||||
drop_null:
|
||||
crypto_put_default_null_skcipher();
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_init_geniv);
|
||||
|
||||
void aead_exit_geniv(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
alg = crypto_alg_mod_lookup(name, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return alg;
|
||||
|
||||
if (alg->cra_type == &crypto_aead_type)
|
||||
return alg;
|
||||
|
||||
if (!alg->cra_aead.ivsize)
|
||||
return alg;
|
||||
|
||||
crypto_mod_put(alg);
|
||||
alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
|
||||
mask & ~CRYPTO_ALG_TESTED);
|
||||
if (IS_ERR(alg))
|
||||
return alg;
|
||||
|
||||
if (alg->cra_type == &crypto_aead_type) {
|
||||
if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
|
||||
crypto_mod_put(alg);
|
||||
alg = ERR_PTR(-ENOENT);
|
||||
}
|
||||
return alg;
|
||||
}
|
||||
|
||||
BUG_ON(!alg->cra_aead.ivsize);
|
||||
|
||||
return ERR_PTR(crypto_nivaead_default(alg, type, mask));
|
||||
crypto_free_aead(ctx->child);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_lookup_aead);
|
||||
EXPORT_SYMBOL_GPL(aead_exit_geniv);
|
||||
|
||||
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
|
||||
u32 type, u32 mask)
|
||||
@ -870,7 +349,7 @@ static int aead_prepare_alg(struct aead_alg *alg)
|
||||
if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
base->cra_type = &crypto_new_aead_type;
|
||||
base->cra_type = &crypto_aead_type;
|
||||
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
|
||||
|
||||
|
@ -67,12 +67,22 @@ static int crypto_check_alg(struct crypto_alg *alg)
|
||||
return crypto_set_driver_name(alg);
|
||||
}
|
||||
|
||||
static void crypto_free_instance(struct crypto_instance *inst)
|
||||
{
|
||||
if (!inst->alg.cra_type->free) {
|
||||
inst->tmpl->free(inst);
|
||||
return;
|
||||
}
|
||||
|
||||
inst->alg.cra_type->free(inst);
|
||||
}
|
||||
|
||||
static void crypto_destroy_instance(struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)alg;
|
||||
struct crypto_template *tmpl = inst->tmpl;
|
||||
|
||||
tmpl->free(inst);
|
||||
crypto_free_instance(inst);
|
||||
crypto_tmpl_put(tmpl);
|
||||
}
|
||||
|
||||
@ -481,7 +491,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
|
||||
|
||||
hlist_for_each_entry_safe(inst, n, list, list) {
|
||||
BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
|
||||
tmpl->free(inst);
|
||||
crypto_free_instance(inst);
|
||||
}
|
||||
crypto_remove_final(&users);
|
||||
}
|
||||
@ -892,7 +902,7 @@ int crypto_enqueue_request(struct crypto_queue *queue,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_enqueue_request);
|
||||
|
||||
void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
|
||||
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
|
||||
{
|
||||
struct list_head *request;
|
||||
|
||||
@ -907,14 +917,7 @@ void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
|
||||
request = queue->list.next;
|
||||
list_del(request);
|
||||
|
||||
return (char *)list_entry(request, struct crypto_async_request, list) -
|
||||
offset;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
|
||||
|
||||
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
|
||||
{
|
||||
return __crypto_dequeue_request(queue, 0);
|
||||
return list_entry(request, struct crypto_async_request, list);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_dequeue_request);
|
||||
|
||||
|
@ -248,13 +248,11 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
|
||||
type = alg->cra_flags;
|
||||
|
||||
/* This piece of crap needs to disappear into per-type test hooks. */
|
||||
if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
|
||||
CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
|
||||
((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
|
||||
alg->cra_ablkcipher.ivsize)) ||
|
||||
(!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
|
||||
alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
|
||||
if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
|
||||
CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
|
||||
((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
|
||||
alg->cra_ablkcipher.ivsize))
|
||||
type |= CRYPTO_ALG_TESTED;
|
||||
|
||||
param->type = type;
|
||||
|
@ -90,6 +90,7 @@ static void aead_put_sgl(struct sock *sk)
|
||||
put_page(sg_page(sg + i));
|
||||
sg_assign_page(sg + i, NULL);
|
||||
}
|
||||
sg_init_table(sg, ALG_MAX_PAGES);
|
||||
sgl->cur = 0;
|
||||
ctx->used = 0;
|
||||
ctx->more = 0;
|
||||
@ -514,8 +515,7 @@ static struct proto_ops algif_aead_ops = {
|
||||
|
||||
static void *aead_bind(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_aead(name, type | CRYPTO_ALG_AEAD_NEW,
|
||||
mask | CRYPTO_ALG_AEAD_NEW);
|
||||
return crypto_alloc_aead(name, type, mask);
|
||||
}
|
||||
|
||||
static void aead_release(void *private)
|
||||
|
592
crypto/authenc.c
592
crypto/authenc.c
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Authenc: Simple AEAD wrapper for IPsec
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
@ -14,6 +14,7 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
@ -23,26 +24,21 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
|
||||
|
||||
struct authenc_instance_ctx {
|
||||
struct crypto_ahash_spawn auth;
|
||||
struct crypto_skcipher_spawn enc;
|
||||
unsigned int reqoff;
|
||||
};
|
||||
|
||||
struct crypto_authenc_ctx {
|
||||
unsigned int reqoff;
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_ablkcipher *enc;
|
||||
struct crypto_blkcipher *null;
|
||||
};
|
||||
|
||||
struct authenc_request_ctx {
|
||||
unsigned int cryptlen;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist asg[2];
|
||||
struct scatterlist cipher[2];
|
||||
crypto_completion_t complete;
|
||||
crypto_completion_t update_complete;
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist dst[2];
|
||||
char tail[];
|
||||
};
|
||||
|
||||
@ -119,189 +115,35 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
|
||||
areq_ctx->cryptlen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
areq_ctx->complete, req);
|
||||
|
||||
err = crypto_ahash_finup(ahreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
|
||||
areq_ctx->cryptlen,
|
||||
crypto_aead_authsize(authenc), 1);
|
||||
|
||||
out:
|
||||
authenc_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct aead_instance *inst = aead_alg_instance(authenc);
|
||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
|
||||
areq_ctx->cryptlen,
|
||||
scatterwalk_map_and_copy(ahreq->result, req->dst,
|
||||
req->assoclen + req->cryptlen,
|
||||
crypto_aead_authsize(authenc), 1);
|
||||
|
||||
out:
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
u8 *ihash;
|
||||
unsigned int authsize;
|
||||
struct ablkcipher_request *abreq;
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
|
||||
areq_ctx->cryptlen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
areq_ctx->complete, req);
|
||||
|
||||
err = crypto_ahash_finup(ahreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
authsize = crypto_aead_authsize(authenc);
|
||||
cryptlen -= authsize;
|
||||
ihash = ahreq->result + authsize;
|
||||
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
abreq = aead_request_ctx(req);
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
|
||||
cryptlen, req->iv);
|
||||
|
||||
err = crypto_ablkcipher_decrypt(abreq);
|
||||
|
||||
out:
|
||||
authenc_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void authenc_verify_ahash_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
u8 *ihash;
|
||||
unsigned int authsize;
|
||||
struct ablkcipher_request *abreq;
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
authsize = crypto_aead_authsize(authenc);
|
||||
cryptlen -= authsize;
|
||||
ihash = ahreq->result + authsize;
|
||||
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
abreq = aead_request_ctx(req);
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
|
||||
cryptlen, req->iv);
|
||||
|
||||
err = crypto_ablkcipher_decrypt(abreq);
|
||||
|
||||
out:
|
||||
authenc_request_complete(req, err);
|
||||
}
|
||||
|
||||
static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
|
||||
static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct aead_instance *inst = aead_alg_instance(authenc);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct crypto_ahash *auth = ctx->auth;
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
u8 *hash = areq_ctx->tail;
|
||||
int err;
|
||||
|
||||
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
|
||||
ahash_request_set_tfm(ahreq, auth);
|
||||
|
||||
err = crypto_ahash_init(ahreq);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
|
||||
areq_ctx->update_complete, req);
|
||||
|
||||
err = crypto_ahash_update(ahreq);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
|
||||
areq_ctx->cryptlen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
|
||||
areq_ctx->complete, req);
|
||||
|
||||
err = crypto_ahash_finup(ahreq);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct crypto_ahash *auth = ctx->auth;
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
|
||||
u8 *hash = areq_ctx->tail;
|
||||
int err;
|
||||
|
||||
@ -309,66 +151,18 @@ static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
|
||||
ahash_request_set_tfm(ahreq, auth);
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
|
||||
areq_ctx->cryptlen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
|
||||
areq_ctx->complete, req);
|
||||
ahash_request_set_crypt(ahreq, req->dst, hash,
|
||||
req->assoclen + req->cryptlen);
|
||||
ahash_request_set_callback(ahreq, flags,
|
||||
authenc_geniv_ahash_done, req);
|
||||
|
||||
err = crypto_ahash_digest(ahreq);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct scatterlist *dst = req->dst;
|
||||
struct scatterlist *assoc = req->assoc;
|
||||
struct scatterlist *cipher = areq_ctx->cipher;
|
||||
struct scatterlist *asg = areq_ctx->asg;
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
|
||||
struct page *dstp;
|
||||
u8 *vdst;
|
||||
u8 *hash;
|
||||
|
||||
dstp = sg_page(dst);
|
||||
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
|
||||
|
||||
if (ivsize) {
|
||||
sg_init_table(cipher, 2);
|
||||
sg_set_buf(cipher, iv, ivsize);
|
||||
scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
|
||||
dst = cipher;
|
||||
cryptlen += ivsize;
|
||||
}
|
||||
|
||||
if (req->assoclen && sg_is_last(assoc)) {
|
||||
authenc_ahash_fn = crypto_authenc_ahash;
|
||||
sg_init_table(asg, 2);
|
||||
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
|
||||
scatterwalk_crypto_chain(asg, dst, 0, 2);
|
||||
dst = asg;
|
||||
cryptlen += req->assoclen;
|
||||
}
|
||||
|
||||
areq_ctx->cryptlen = cryptlen;
|
||||
areq_ctx->sg = dst;
|
||||
|
||||
areq_ctx->complete = authenc_geniv_ahash_done;
|
||||
areq_ctx->update_complete = authenc_geniv_ahash_update_done;
|
||||
|
||||
hash = authenc_ahash_fn(req, flags);
|
||||
if (IS_ERR(hash))
|
||||
return PTR_ERR(hash);
|
||||
|
||||
scatterwalk_map_and_copy(hash, dst, cryptlen,
|
||||
scatterwalk_map_and_copy(hash, req->dst, req->assoclen + req->cryptlen,
|
||||
crypto_aead_authsize(authenc), 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -377,180 +171,155 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
|
||||
{
|
||||
struct aead_request *areq = req->data;
|
||||
|
||||
if (!err) {
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
|
||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
|
||||
+ ctx->reqoff);
|
||||
u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_authenc_genicv(areq, iv, 0);
|
||||
}
|
||||
err = crypto_authenc_genicv(areq, 0);
|
||||
|
||||
out:
|
||||
authenc_request_complete(areq, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_copy_assoc(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = ctx->null,
|
||||
};
|
||||
|
||||
return crypto_blkcipher_encrypt(&desc, req->dst, req->src,
|
||||
req->assoclen);
|
||||
}
|
||||
|
||||
static int crypto_authenc_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct aead_instance *inst = aead_alg_instance(authenc);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct crypto_ablkcipher *enc = ctx->enc;
|
||||
struct scatterlist *dst = req->dst;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
|
||||
+ ctx->reqoff);
|
||||
u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
|
||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
|
||||
ictx->reqoff);
|
||||
struct scatterlist *src, *dst;
|
||||
int err;
|
||||
|
||||
sg_init_table(areq_ctx->src, 2);
|
||||
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
|
||||
dst = src;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
err = crypto_authenc_copy_assoc(req);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
|
||||
}
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
crypto_authenc_encrypt_done, req);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
|
||||
|
||||
memcpy(iv, req->iv, crypto_aead_ivsize(authenc));
|
||||
ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
|
||||
|
||||
err = crypto_ablkcipher_encrypt(abreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
return crypto_authenc_genicv(req, aead_request_flags(req));
|
||||
}
|
||||
|
||||
static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
|
||||
int err)
|
||||
static int crypto_authenc_decrypt_tail(struct aead_request *req,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct aead_request *areq = req->data;
|
||||
|
||||
if (!err) {
|
||||
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
|
||||
|
||||
err = crypto_authenc_genicv(areq, greq->giv, 0);
|
||||
}
|
||||
|
||||
authenc_request_complete(areq, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = aead_givcrypt_reqtfm(req);
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct aead_instance *inst = aead_alg_instance(authenc);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct aead_request *areq = &req->areq;
|
||||
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
|
||||
u8 *iv = req->giv;
|
||||
int err;
|
||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
|
||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
|
||||
ictx->reqoff);
|
||||
unsigned int authsize = crypto_aead_authsize(authenc);
|
||||
u8 *ihash = ahreq->result + authsize;
|
||||
struct scatterlist *src, *dst;
|
||||
|
||||
skcipher_givcrypt_set_tfm(greq, ctx->enc);
|
||||
skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
|
||||
crypto_authenc_givencrypt_done, areq);
|
||||
skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
|
||||
areq->iv);
|
||||
skcipher_givcrypt_set_giv(greq, iv, req->seq);
|
||||
scatterwalk_map_and_copy(ihash, req->src, ahreq->nbytes, authsize, 0);
|
||||
|
||||
if (crypto_memneq(ihash, ahreq->result, authsize))
|
||||
return -EBADMSG;
|
||||
|
||||
sg_init_table(areq_ctx->src, 2);
|
||||
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
|
||||
dst = src;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
|
||||
}
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, src, dst,
|
||||
req->cryptlen - authsize, req->iv);
|
||||
|
||||
return crypto_ablkcipher_decrypt(abreq);
|
||||
}
|
||||
|
||||
static void authenc_verify_ahash_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
|
||||
err = crypto_skcipher_givencrypt(greq);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
}
|
||||
err = crypto_authenc_decrypt_tail(req, 0);
|
||||
|
||||
static int crypto_authenc_verify(struct aead_request *req,
|
||||
authenc_ahash_t authenc_ahash_fn)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
u8 *ohash;
|
||||
u8 *ihash;
|
||||
unsigned int authsize;
|
||||
|
||||
areq_ctx->complete = authenc_verify_ahash_done;
|
||||
areq_ctx->update_complete = authenc_verify_ahash_update_done;
|
||||
|
||||
ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (IS_ERR(ohash))
|
||||
return PTR_ERR(ohash);
|
||||
|
||||
authsize = crypto_aead_authsize(authenc);
|
||||
ihash = ohash + authsize;
|
||||
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
|
||||
authsize, 0);
|
||||
return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
|
||||
}
|
||||
|
||||
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
|
||||
unsigned int cryptlen)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct scatterlist *src = req->src;
|
||||
struct scatterlist *assoc = req->assoc;
|
||||
struct scatterlist *cipher = areq_ctx->cipher;
|
||||
struct scatterlist *asg = areq_ctx->asg;
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc);
|
||||
authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
|
||||
struct page *srcp;
|
||||
u8 *vsrc;
|
||||
|
||||
srcp = sg_page(src);
|
||||
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
|
||||
|
||||
if (ivsize) {
|
||||
sg_init_table(cipher, 2);
|
||||
sg_set_buf(cipher, iv, ivsize);
|
||||
scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
|
||||
src = cipher;
|
||||
cryptlen += ivsize;
|
||||
}
|
||||
|
||||
if (req->assoclen && sg_is_last(assoc)) {
|
||||
authenc_ahash_fn = crypto_authenc_ahash;
|
||||
sg_init_table(asg, 2);
|
||||
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
|
||||
scatterwalk_crypto_chain(asg, src, 0, 2);
|
||||
src = asg;
|
||||
cryptlen += req->assoclen;
|
||||
}
|
||||
|
||||
areq_ctx->cryptlen = cryptlen;
|
||||
areq_ctx->sg = src;
|
||||
|
||||
return crypto_authenc_verify(req, authenc_ahash_fn);
|
||||
out:
|
||||
authenc_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct ablkcipher_request *abreq = aead_request_ctx(req);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
unsigned int authsize = crypto_aead_authsize(authenc);
|
||||
u8 *iv = req->iv;
|
||||
struct aead_instance *inst = aead_alg_instance(authenc);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct crypto_ahash *auth = ctx->auth;
|
||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
|
||||
u8 *hash = areq_ctx->tail;
|
||||
int err;
|
||||
|
||||
if (cryptlen < authsize)
|
||||
return -EINVAL;
|
||||
cryptlen -= authsize;
|
||||
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
|
||||
err = crypto_authenc_iverify(req, iv, cryptlen);
|
||||
ahash_request_set_tfm(ahreq, auth);
|
||||
ahash_request_set_crypt(ahreq, req->src, hash,
|
||||
req->assoclen + req->cryptlen - authsize);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req),
|
||||
authenc_verify_ahash_done, req);
|
||||
|
||||
err = crypto_ahash_digest(ahreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
|
||||
|
||||
return crypto_ablkcipher_decrypt(abreq);
|
||||
return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
|
||||
}
|
||||
|
||||
static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
|
||||
static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
|
||||
struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct aead_instance *inst = aead_alg_instance(tfm);
|
||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_ablkcipher *enc;
|
||||
struct crypto_blkcipher *null;
|
||||
int err;
|
||||
|
||||
auth = crypto_spawn_ahash(&ictx->auth);
|
||||
@ -562,42 +331,57 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
|
||||
if (IS_ERR(enc))
|
||||
goto err_free_ahash;
|
||||
|
||||
null = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(null);
|
||||
if (IS_ERR(null))
|
||||
goto err_free_skcipher;
|
||||
|
||||
ctx->auth = auth;
|
||||
ctx->enc = enc;
|
||||
ctx->null = null;
|
||||
|
||||
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
|
||||
crypto_ahash_alignmask(auth),
|
||||
crypto_ahash_alignmask(auth) + 1) +
|
||||
crypto_ablkcipher_ivsize(enc);
|
||||
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
sizeof(struct authenc_request_ctx) +
|
||||
ctx->reqoff +
|
||||
ictx->reqoff +
|
||||
max_t(unsigned int,
|
||||
crypto_ahash_reqsize(auth) +
|
||||
sizeof(struct ahash_request),
|
||||
sizeof(struct skcipher_givcrypt_request) +
|
||||
crypto_ablkcipher_reqsize(enc)));
|
||||
crypto_ahash_reqsize(auth) +
|
||||
sizeof(struct ahash_request),
|
||||
sizeof(struct ablkcipher_request) +
|
||||
crypto_ablkcipher_reqsize(enc)));
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_skcipher:
|
||||
crypto_free_ablkcipher(enc);
|
||||
err_free_ahash:
|
||||
crypto_free_ahash(auth);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
|
||||
static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
crypto_free_ahash(ctx->auth);
|
||||
crypto_free_ablkcipher(ctx->enc);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
||||
static void crypto_authenc_free(struct aead_instance *inst)
|
||||
{
|
||||
struct authenc_instance_ctx *ctx = aead_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->enc);
|
||||
crypto_drop_ahash(&ctx->auth);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_authenc_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct aead_instance *inst;
|
||||
struct hash_alg_common *auth;
|
||||
struct crypto_alg *auth_base;
|
||||
struct crypto_alg *enc;
|
||||
@ -607,15 +391,15 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
|
||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
||||
if (IS_ERR(auth))
|
||||
return ERR_CAST(auth);
|
||||
return PTR_ERR(auth);
|
||||
|
||||
auth_base = &auth->base;
|
||||
|
||||
@ -629,13 +413,14 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
||||
if (!inst)
|
||||
goto out_put_auth;
|
||||
|
||||
ctx = crypto_instance_ctx(inst);
|
||||
ctx = aead_instance_ctx(inst);
|
||||
|
||||
err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
|
||||
err = crypto_init_ahash_spawn(&ctx->auth, auth,
|
||||
aead_crypto_instance(inst));
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
crypto_set_skcipher_spawn(&ctx->enc, inst);
|
||||
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
|
||||
err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
|
||||
crypto_requires_sync(algt->type,
|
||||
algt->mask));
|
||||
@ -644,41 +429,47 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
||||
|
||||
enc = crypto_skcipher_spawn_alg(&ctx->enc);
|
||||
|
||||
ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
|
||||
auth_base->cra_alignmask + 1);
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_enc;
|
||||
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authenc(%s,%s)", auth_base->cra_driver_name,
|
||||
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_enc;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = enc->cra_priority *
|
||||
10 + auth_base->cra_priority;
|
||||
inst->alg.cra_blocksize = enc->cra_blocksize;
|
||||
inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_aead_type;
|
||||
inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = enc->cra_priority * 10 +
|
||||
auth_base->cra_priority;
|
||||
inst->alg.base.cra_blocksize = enc->cra_blocksize;
|
||||
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
|
||||
enc->cra_alignmask;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
|
||||
|
||||
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
|
||||
inst->alg.cra_aead.maxauthsize = auth->digestsize;
|
||||
inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
|
||||
inst->alg.maxauthsize = auth->digestsize;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
|
||||
inst->alg.init = crypto_authenc_init_tfm;
|
||||
inst->alg.exit = crypto_authenc_exit_tfm;
|
||||
|
||||
inst->alg.cra_init = crypto_authenc_init_tfm;
|
||||
inst->alg.cra_exit = crypto_authenc_exit_tfm;
|
||||
inst->alg.setkey = crypto_authenc_setkey;
|
||||
inst->alg.encrypt = crypto_authenc_encrypt;
|
||||
inst->alg.decrypt = crypto_authenc_decrypt;
|
||||
|
||||
inst->alg.cra_aead.setkey = crypto_authenc_setkey;
|
||||
inst->alg.cra_aead.encrypt = crypto_authenc_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_authenc_decrypt;
|
||||
inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
|
||||
inst->free = crypto_authenc_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_enc;
|
||||
|
||||
out:
|
||||
crypto_mod_put(auth_base);
|
||||
return inst;
|
||||
return err;
|
||||
|
||||
err_drop_enc:
|
||||
crypto_drop_skcipher(&ctx->enc);
|
||||
@ -687,23 +478,12 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
out_put_auth:
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_authenc_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->enc);
|
||||
crypto_drop_ahash(&ctx->auth);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_authenc_tmpl = {
|
||||
.name = "authenc",
|
||||
.alloc = crypto_authenc_alloc,
|
||||
.free = crypto_authenc_free,
|
||||
.create = crypto_authenc_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
*
|
||||
* Copyright (C) 2010 secunet Security Networks AG
|
||||
* Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
|
||||
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
@ -16,6 +17,7 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
@ -34,19 +36,12 @@ struct crypto_authenc_esn_ctx {
|
||||
unsigned int reqoff;
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_ablkcipher *enc;
|
||||
struct crypto_blkcipher *null;
|
||||
};
|
||||
|
||||
struct authenc_esn_request_ctx {
|
||||
unsigned int cryptlen;
|
||||
unsigned int headlen;
|
||||
unsigned int trailen;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist hsg[2];
|
||||
struct scatterlist tsg[1];
|
||||
struct scatterlist cipher[2];
|
||||
crypto_completion_t complete;
|
||||
crypto_completion_t update_complete;
|
||||
crypto_completion_t update_complete2;
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist dst[2];
|
||||
char tail[];
|
||||
};
|
||||
|
||||
@ -56,6 +51,15 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_setauthsize(struct crypto_aead *authenc_esn,
|
||||
unsigned int authsize)
|
||||
{
|
||||
if (authsize > 0 && authsize < 4)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@ -93,349 +97,73 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
struct crypto_ahash *auth = ctx->auth;
|
||||
u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
unsigned int authsize = crypto_aead_authsize(authenc_esn);
|
||||
unsigned int assoclen = req->assoclen;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
struct scatterlist *dst = req->dst;
|
||||
u32 tmp[2];
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
/* Move high-order bits of sequence number back. */
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
|
||||
areq_ctx->cryptlen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
areq_ctx->update_complete2, req);
|
||||
|
||||
err = crypto_ahash_update(ahreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
|
||||
areq_ctx->trailen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
areq_ctx->complete, req);
|
||||
|
||||
err = crypto_ahash_finup(ahreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
|
||||
areq_ctx->cryptlen,
|
||||
crypto_aead_authsize(authenc_esn), 1);
|
||||
|
||||
out:
|
||||
authenc_esn_request_complete(req, err);
|
||||
scatterwalk_map_and_copy(hash, dst, assoclen + cryptlen, authsize, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
|
||||
areq_ctx->trailen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
areq_ctx->complete, req);
|
||||
|
||||
err = crypto_ahash_finup(ahreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
|
||||
areq_ctx->cryptlen,
|
||||
crypto_aead_authsize(authenc_esn), 1);
|
||||
|
||||
out:
|
||||
authenc_esn_request_complete(req, err);
|
||||
}
|
||||
|
||||
|
||||
static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
|
||||
areq_ctx->cryptlen,
|
||||
crypto_aead_authsize(authenc_esn), 1);
|
||||
|
||||
out:
|
||||
err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
|
||||
static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
u8 *ihash;
|
||||
unsigned int authsize;
|
||||
struct ablkcipher_request *abreq;
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
|
||||
areq_ctx->cryptlen);
|
||||
|
||||
ahash_request_set_callback(ahreq,
|
||||
aead_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
areq_ctx->update_complete2, req);
|
||||
|
||||
err = crypto_ahash_update(ahreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
|
||||
areq_ctx->trailen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
areq_ctx->complete, req);
|
||||
|
||||
err = crypto_ahash_finup(ahreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
authsize = crypto_aead_authsize(authenc_esn);
|
||||
cryptlen -= authsize;
|
||||
ihash = ahreq->result + authsize;
|
||||
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
abreq = aead_request_ctx(req);
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
|
||||
cryptlen, req->iv);
|
||||
|
||||
err = crypto_ablkcipher_decrypt(abreq);
|
||||
|
||||
out:
|
||||
authenc_esn_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
u8 *ihash;
|
||||
unsigned int authsize;
|
||||
struct ablkcipher_request *abreq;
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
|
||||
areq_ctx->trailen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
areq_ctx->complete, req);
|
||||
|
||||
err = crypto_ahash_finup(ahreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
authsize = crypto_aead_authsize(authenc_esn);
|
||||
cryptlen -= authsize;
|
||||
ihash = ahreq->result + authsize;
|
||||
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
abreq = aead_request_ctx(req);
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
|
||||
cryptlen, req->iv);
|
||||
|
||||
err = crypto_ablkcipher_decrypt(abreq);
|
||||
|
||||
out:
|
||||
authenc_esn_request_complete(req, err);
|
||||
}
|
||||
|
||||
|
||||
static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
u8 *ihash;
|
||||
unsigned int authsize;
|
||||
struct ablkcipher_request *abreq;
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
authsize = crypto_aead_authsize(authenc_esn);
|
||||
cryptlen -= authsize;
|
||||
ihash = ahreq->result + authsize;
|
||||
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
abreq = aead_request_ctx(req);
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
|
||||
cryptlen, req->iv);
|
||||
|
||||
err = crypto_ablkcipher_decrypt(abreq);
|
||||
|
||||
out:
|
||||
authenc_esn_request_complete(req, err);
|
||||
}
|
||||
|
||||
static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct crypto_ahash *auth = ctx->auth;
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
u8 *hash = areq_ctx->tail;
|
||||
int err;
|
||||
|
||||
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
|
||||
ahash_request_set_tfm(ahreq, auth);
|
||||
|
||||
err = crypto_ahash_init(ahreq);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
|
||||
areq_ctx->update_complete, req);
|
||||
|
||||
err = crypto_ahash_update(ahreq);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
|
||||
areq_ctx->update_complete2, req);
|
||||
|
||||
err = crypto_ahash_update(ahreq);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash,
|
||||
areq_ctx->trailen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
|
||||
areq_ctx->complete, req);
|
||||
|
||||
err = crypto_ahash_finup(ahreq);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
|
||||
static int crypto_authenc_esn_genicv(struct aead_request *req,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct scatterlist *dst = req->dst;
|
||||
struct scatterlist *assoc = req->assoc;
|
||||
struct scatterlist *cipher = areq_ctx->cipher;
|
||||
struct scatterlist *hsg = areq_ctx->hsg;
|
||||
struct scatterlist *tsg = areq_ctx->tsg;
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct crypto_ahash *auth = ctx->auth;
|
||||
u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
unsigned int authsize = crypto_aead_authsize(authenc_esn);
|
||||
unsigned int assoclen = req->assoclen;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
struct page *dstp;
|
||||
u8 *vdst;
|
||||
u8 *hash;
|
||||
struct scatterlist *dst = req->dst;
|
||||
u32 tmp[2];
|
||||
|
||||
dstp = sg_page(dst);
|
||||
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
|
||||
if (!authsize)
|
||||
return 0;
|
||||
|
||||
if (ivsize) {
|
||||
sg_init_table(cipher, 2);
|
||||
sg_set_buf(cipher, iv, ivsize);
|
||||
scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
|
||||
dst = cipher;
|
||||
cryptlen += ivsize;
|
||||
}
|
||||
/* Move high-order bits of sequence number to the end. */
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
|
||||
|
||||
if (assoc->length < 12)
|
||||
return -EINVAL;
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
|
||||
|
||||
sg_init_table(hsg, 2);
|
||||
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
|
||||
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
|
||||
ahash_request_set_tfm(ahreq, auth);
|
||||
ahash_request_set_crypt(ahreq, dst, hash, assoclen + cryptlen);
|
||||
ahash_request_set_callback(ahreq, flags,
|
||||
authenc_esn_geniv_ahash_done, req);
|
||||
|
||||
sg_init_table(tsg, 1);
|
||||
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
|
||||
|
||||
areq_ctx->cryptlen = cryptlen;
|
||||
areq_ctx->headlen = 8;
|
||||
areq_ctx->trailen = 4;
|
||||
areq_ctx->sg = dst;
|
||||
|
||||
areq_ctx->complete = authenc_esn_geniv_ahash_done;
|
||||
areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
|
||||
areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
|
||||
|
||||
hash = crypto_authenc_esn_ahash(req, flags);
|
||||
if (IS_ERR(hash))
|
||||
return PTR_ERR(hash);
|
||||
|
||||
scatterwalk_map_and_copy(hash, dst, cryptlen,
|
||||
crypto_aead_authsize(authenc_esn), 1);
|
||||
return 0;
|
||||
return crypto_ahash_digest(ahreq) ?:
|
||||
crypto_authenc_esn_genicv_tail(req, aead_request_flags(req));
|
||||
}
|
||||
|
||||
|
||||
@ -444,185 +172,167 @@ static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
|
||||
{
|
||||
struct aead_request *areq = req->data;
|
||||
|
||||
if (!err) {
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct ablkcipher_request *abreq = aead_request_ctx(areq);
|
||||
u8 *iv = (u8 *)(abreq + 1) +
|
||||
crypto_ablkcipher_reqsize(ctx->enc);
|
||||
|
||||
err = crypto_authenc_esn_genicv(areq, iv, 0);
|
||||
}
|
||||
if (!err)
|
||||
err = crypto_authenc_esn_genicv(areq, 0);
|
||||
|
||||
authenc_esn_request_complete(areq, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = ctx->null,
|
||||
};
|
||||
|
||||
return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct crypto_ablkcipher *enc = ctx->enc;
|
||||
struct scatterlist *dst = req->dst;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
|
||||
+ ctx->reqoff);
|
||||
u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
|
||||
struct crypto_ablkcipher *enc = ctx->enc;
|
||||
unsigned int assoclen = req->assoclen;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
struct scatterlist *src, *dst;
|
||||
int err;
|
||||
|
||||
sg_init_table(areq_ctx->src, 2);
|
||||
src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
|
||||
dst = src;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
err = crypto_authenc_esn_copy(req, assoclen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
|
||||
}
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
crypto_authenc_esn_encrypt_done, req);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
|
||||
|
||||
memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
|
||||
ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
|
||||
|
||||
err = crypto_ablkcipher_encrypt(abreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
return crypto_authenc_esn_genicv(req, aead_request_flags(req));
|
||||
}
|
||||
|
||||
static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
|
||||
int err)
|
||||
static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct aead_request *areq = req->data;
|
||||
|
||||
if (!err) {
|
||||
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
|
||||
|
||||
err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
|
||||
}
|
||||
|
||||
authenc_esn_request_complete(areq, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
unsigned int authsize = crypto_aead_authsize(authenc_esn);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct aead_request *areq = &req->areq;
|
||||
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
|
||||
u8 *iv = req->giv;
|
||||
int err;
|
||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
|
||||
+ ctx->reqoff);
|
||||
struct crypto_ahash *auth = ctx->auth;
|
||||
u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
unsigned int cryptlen = req->cryptlen - authsize;
|
||||
unsigned int assoclen = req->assoclen;
|
||||
struct scatterlist *dst = req->dst;
|
||||
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
|
||||
u32 tmp[2];
|
||||
|
||||
skcipher_givcrypt_set_tfm(greq, ctx->enc);
|
||||
skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
|
||||
crypto_authenc_esn_givencrypt_done, areq);
|
||||
skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
|
||||
areq->iv);
|
||||
skcipher_givcrypt_set_giv(greq, iv, req->seq);
|
||||
/* Move high-order bits of sequence number back. */
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
|
||||
|
||||
err = crypto_skcipher_givencrypt(greq);
|
||||
if (err)
|
||||
return err;
|
||||
if (crypto_memneq(ihash, ohash, authsize))
|
||||
return -EBADMSG;
|
||||
|
||||
return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, flags,
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv);
|
||||
|
||||
return crypto_ablkcipher_decrypt(abreq);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_verify(struct aead_request *req)
|
||||
static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
u8 *ohash;
|
||||
u8 *ihash;
|
||||
unsigned int authsize;
|
||||
struct aead_request *req = areq->data;
|
||||
|
||||
areq_ctx->complete = authenc_esn_verify_ahash_done;
|
||||
areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
|
||||
|
||||
ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
if (IS_ERR(ohash))
|
||||
return PTR_ERR(ohash);
|
||||
|
||||
authsize = crypto_aead_authsize(authenc_esn);
|
||||
ihash = ohash + authsize;
|
||||
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
|
||||
authsize, 0);
|
||||
return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
|
||||
unsigned int cryptlen)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct scatterlist *src = req->src;
|
||||
struct scatterlist *assoc = req->assoc;
|
||||
struct scatterlist *cipher = areq_ctx->cipher;
|
||||
struct scatterlist *hsg = areq_ctx->hsg;
|
||||
struct scatterlist *tsg = areq_ctx->tsg;
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
|
||||
struct page *srcp;
|
||||
u8 *vsrc;
|
||||
|
||||
srcp = sg_page(src);
|
||||
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
|
||||
|
||||
if (ivsize) {
|
||||
sg_init_table(cipher, 2);
|
||||
sg_set_buf(cipher, iv, ivsize);
|
||||
scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
|
||||
src = cipher;
|
||||
cryptlen += ivsize;
|
||||
}
|
||||
|
||||
if (assoc->length < 12)
|
||||
return -EINVAL;
|
||||
|
||||
sg_init_table(hsg, 2);
|
||||
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
|
||||
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
|
||||
|
||||
sg_init_table(tsg, 1);
|
||||
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
|
||||
|
||||
areq_ctx->cryptlen = cryptlen;
|
||||
areq_ctx->headlen = 8;
|
||||
areq_ctx->trailen = 4;
|
||||
areq_ctx->sg = src;
|
||||
|
||||
areq_ctx->complete = authenc_esn_verify_ahash_done;
|
||||
areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
|
||||
areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
|
||||
|
||||
return crypto_authenc_esn_verify(req);
|
||||
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
struct ablkcipher_request *abreq = aead_request_ctx(req);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
|
||||
unsigned int authsize = crypto_aead_authsize(authenc_esn);
|
||||
u8 *iv = req->iv;
|
||||
struct crypto_ahash *auth = ctx->auth;
|
||||
u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
unsigned int assoclen = req->assoclen;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
|
||||
struct scatterlist *dst = req->dst;
|
||||
u32 tmp[2];
|
||||
int err;
|
||||
|
||||
if (cryptlen < authsize)
|
||||
return -EINVAL;
|
||||
cryptlen -= authsize;
|
||||
|
||||
err = crypto_authenc_esn_iverify(req, iv, cryptlen);
|
||||
if (req->src != dst) {
|
||||
err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
if (!authsize)
|
||||
goto tail;
|
||||
|
||||
/* Move high-order bits of sequence number to the end. */
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
|
||||
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
|
||||
|
||||
ahash_request_set_tfm(ahreq, auth);
|
||||
ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req),
|
||||
authenc_esn_verify_ahash_done, req);
|
||||
|
||||
err = crypto_ahash_digest(ahreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
|
||||
|
||||
return crypto_ablkcipher_decrypt(abreq);
|
||||
tail:
|
||||
return crypto_authenc_esn_decrypt_tail(req, aead_request_flags(req));
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
|
||||
static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
|
||||
struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct aead_instance *inst = aead_alg_instance(tfm);
|
||||
struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_ablkcipher *enc;
|
||||
struct crypto_blkcipher *null;
|
||||
int err;
|
||||
|
||||
auth = crypto_spawn_ahash(&ictx->auth);
|
||||
@ -634,15 +344,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
|
||||
if (IS_ERR(enc))
|
||||
goto err_free_ahash;
|
||||
|
||||
null = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(null);
|
||||
if (IS_ERR(null))
|
||||
goto err_free_skcipher;
|
||||
|
||||
ctx->auth = auth;
|
||||
ctx->enc = enc;
|
||||
ctx->null = null;
|
||||
|
||||
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
|
||||
crypto_ahash_alignmask(auth),
|
||||
crypto_ahash_alignmask(auth) + 1) +
|
||||
crypto_ablkcipher_ivsize(enc);
|
||||
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
sizeof(struct authenc_esn_request_ctx) +
|
||||
ctx->reqoff +
|
||||
max_t(unsigned int,
|
||||
@ -653,23 +368,36 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_skcipher:
|
||||
crypto_free_ablkcipher(enc);
|
||||
err_free_ahash:
|
||||
crypto_free_ahash(auth);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
|
||||
static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
crypto_free_ahash(ctx->auth);
|
||||
crypto_free_ablkcipher(ctx->enc);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
|
||||
static void crypto_authenc_esn_free(struct aead_instance *inst)
|
||||
{
|
||||
struct authenc_esn_instance_ctx *ctx = aead_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->enc);
|
||||
crypto_drop_ahash(&ctx->auth);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct aead_instance *inst;
|
||||
struct hash_alg_common *auth;
|
||||
struct crypto_alg *auth_base;
|
||||
struct crypto_alg *enc;
|
||||
@ -679,15 +407,15 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
|
||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
||||
if (IS_ERR(auth))
|
||||
return ERR_CAST(auth);
|
||||
return PTR_ERR(auth);
|
||||
|
||||
auth_base = &auth->base;
|
||||
|
||||
@ -701,13 +429,14 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
|
||||
if (!inst)
|
||||
goto out_put_auth;
|
||||
|
||||
ctx = crypto_instance_ctx(inst);
|
||||
ctx = aead_instance_ctx(inst);
|
||||
|
||||
err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
|
||||
err = crypto_init_ahash_spawn(&ctx->auth, auth,
|
||||
aead_crypto_instance(inst));
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
crypto_set_skcipher_spawn(&ctx->enc, inst);
|
||||
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
|
||||
err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
|
||||
crypto_requires_sync(algt->type,
|
||||
algt->mask));
|
||||
@ -717,40 +446,44 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
|
||||
enc = crypto_skcipher_spawn_alg(&ctx->enc);
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authencesn(%s,%s)", auth_base->cra_name,
|
||||
enc->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_enc;
|
||||
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authencesn(%s,%s)", auth_base->cra_driver_name,
|
||||
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_enc;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = enc->cra_priority *
|
||||
10 + auth_base->cra_priority;
|
||||
inst->alg.cra_blocksize = enc->cra_blocksize;
|
||||
inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_aead_type;
|
||||
inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = enc->cra_priority * 10 +
|
||||
auth_base->cra_priority;
|
||||
inst->alg.base.cra_blocksize = enc->cra_blocksize;
|
||||
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
|
||||
enc->cra_alignmask;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
|
||||
|
||||
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
|
||||
inst->alg.cra_aead.maxauthsize = auth->digestsize;
|
||||
inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
|
||||
inst->alg.maxauthsize = auth->digestsize;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
|
||||
inst->alg.init = crypto_authenc_esn_init_tfm;
|
||||
inst->alg.exit = crypto_authenc_esn_exit_tfm;
|
||||
|
||||
inst->alg.cra_init = crypto_authenc_esn_init_tfm;
|
||||
inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
|
||||
inst->alg.setkey = crypto_authenc_esn_setkey;
|
||||
inst->alg.setauthsize = crypto_authenc_esn_setauthsize;
|
||||
inst->alg.encrypt = crypto_authenc_esn_encrypt;
|
||||
inst->alg.decrypt = crypto_authenc_esn_decrypt;
|
||||
|
||||
inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
|
||||
inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
|
||||
inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
|
||||
inst->free = crypto_authenc_esn_free,
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_enc;
|
||||
|
||||
out:
|
||||
crypto_mod_put(auth_base);
|
||||
return inst;
|
||||
return err;
|
||||
|
||||
err_drop_enc:
|
||||
crypto_drop_skcipher(&ctx->enc);
|
||||
@ -759,23 +492,12 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
out_put_auth:
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_authenc_esn_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->enc);
|
||||
crypto_drop_ahash(&ctx->auth);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_authenc_esn_tmpl = {
|
||||
.name = "authencesn",
|
||||
.alloc = crypto_authenc_esn_alloc,
|
||||
.free = crypto_authenc_esn_free,
|
||||
.create = crypto_authenc_esn_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
378
crypto/ccm.c
378
crypto/ccm.c
@ -36,14 +36,20 @@ struct crypto_rfc4309_ctx {
|
||||
u8 nonce[3];
|
||||
};
|
||||
|
||||
struct crypto_rfc4309_req_ctx {
|
||||
struct scatterlist src[3];
|
||||
struct scatterlist dst[3];
|
||||
struct aead_request subreq;
|
||||
};
|
||||
|
||||
struct crypto_ccm_req_priv_ctx {
|
||||
u8 odata[16];
|
||||
u8 idata[16];
|
||||
u8 auth_tag[16];
|
||||
u32 ilen;
|
||||
u32 flags;
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist dst[2];
|
||||
struct scatterlist src[3];
|
||||
struct scatterlist dst[3];
|
||||
struct ablkcipher_request abreq;
|
||||
};
|
||||
|
||||
@ -265,7 +271,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
|
||||
/* format associated data and compute into mac */
|
||||
if (assoclen) {
|
||||
pctx->ilen = format_adata(idata, assoclen);
|
||||
get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
|
||||
get_data_to_compute(cipher, pctx, req->src, req->assoclen);
|
||||
} else {
|
||||
pctx->ilen = 0;
|
||||
}
|
||||
@ -286,7 +292,8 @@ static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
|
||||
u8 *odata = pctx->odata;
|
||||
|
||||
if (!err)
|
||||
scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
|
||||
scatterwalk_map_and_copy(odata, req->dst,
|
||||
req->assoclen + req->cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
@ -300,6 +307,41 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag)
|
||||
{
|
||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||
struct scatterlist *sg;
|
||||
u8 *iv = req->iv;
|
||||
int err;
|
||||
|
||||
err = crypto_ccm_check_iv(iv);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pctx->flags = aead_request_flags(req);
|
||||
|
||||
/* Note: rfc 3610 and NIST 800-38C require counter of
|
||||
* zero to encrypt auth tag.
|
||||
*/
|
||||
memset(iv + 15 - iv[0], 0, iv[0] + 1);
|
||||
|
||||
sg_init_table(pctx->src, 3);
|
||||
sg_set_buf(pctx->src, tag, 16);
|
||||
sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
|
||||
if (sg != pctx->src + 1)
|
||||
sg_chain(pctx->src, 2, sg);
|
||||
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(pctx->dst, 3);
|
||||
sg_set_buf(pctx->dst, tag, 16);
|
||||
sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
|
||||
if (sg != pctx->dst + 1)
|
||||
sg_chain(pctx->dst, 2, sg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_ccm_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
@ -312,32 +354,17 @@ static int crypto_ccm_encrypt(struct aead_request *req)
|
||||
u8 *iv = req->iv;
|
||||
int err;
|
||||
|
||||
err = crypto_ccm_check_iv(iv);
|
||||
err = crypto_ccm_init_crypt(req, odata);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pctx->flags = aead_request_flags(req);
|
||||
|
||||
err = crypto_ccm_auth(req, req->src, cryptlen);
|
||||
err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Note: rfc 3610 and NIST 800-38C require counter of
|
||||
* zero to encrypt auth tag.
|
||||
*/
|
||||
memset(iv + 15 - iv[0], 0, iv[0] + 1);
|
||||
|
||||
sg_init_table(pctx->src, 2);
|
||||
sg_set_buf(pctx->src, odata, 16);
|
||||
scatterwalk_sg_chain(pctx->src, 2, req->src);
|
||||
|
||||
dst = pctx->src;
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(pctx->dst, 2);
|
||||
sg_set_buf(pctx->dst, odata, 16);
|
||||
scatterwalk_sg_chain(pctx->dst, 2, req->dst);
|
||||
if (req->src != req->dst)
|
||||
dst = pctx->dst;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->ctr);
|
||||
ablkcipher_request_set_callback(abreq, pctx->flags,
|
||||
@ -348,7 +375,7 @@ static int crypto_ccm_encrypt(struct aead_request *req)
|
||||
return err;
|
||||
|
||||
/* copy authtag to end of dst */
|
||||
scatterwalk_map_and_copy(odata, req->dst, cryptlen,
|
||||
scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
return err;
|
||||
}
|
||||
@ -361,9 +388,14 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
unsigned int cryptlen = req->cryptlen - authsize;
|
||||
struct scatterlist *dst;
|
||||
|
||||
pctx->flags = 0;
|
||||
|
||||
dst = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
|
||||
|
||||
if (!err) {
|
||||
err = crypto_ccm_auth(req, req->dst, cryptlen);
|
||||
err = crypto_ccm_auth(req, dst, cryptlen);
|
||||
if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
|
||||
err = -EBADMSG;
|
||||
}
|
||||
@ -384,31 +416,18 @@ static int crypto_ccm_decrypt(struct aead_request *req)
|
||||
u8 *iv = req->iv;
|
||||
int err;
|
||||
|
||||
if (cryptlen < authsize)
|
||||
return -EINVAL;
|
||||
cryptlen -= authsize;
|
||||
|
||||
err = crypto_ccm_check_iv(iv);
|
||||
err = crypto_ccm_init_crypt(req, authtag);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pctx->flags = aead_request_flags(req);
|
||||
|
||||
scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
|
||||
|
||||
memset(iv + 15 - iv[0], 0, iv[0] + 1);
|
||||
|
||||
sg_init_table(pctx->src, 2);
|
||||
sg_set_buf(pctx->src, authtag, 16);
|
||||
scatterwalk_sg_chain(pctx->src, 2, req->src);
|
||||
scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen,
|
||||
authsize, 0);
|
||||
|
||||
dst = pctx->src;
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(pctx->dst, 2);
|
||||
sg_set_buf(pctx->dst, authtag, 16);
|
||||
scatterwalk_sg_chain(pctx->dst, 2, req->dst);
|
||||
if (req->src != req->dst)
|
||||
dst = pctx->dst;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->ctr);
|
||||
ablkcipher_request_set_callback(abreq, pctx->flags,
|
||||
@ -418,7 +437,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = crypto_ccm_auth(req, req->dst, cryptlen);
|
||||
err = crypto_ccm_auth(req, sg_next(dst), cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -429,11 +448,11 @@ static int crypto_ccm_decrypt(struct aead_request *req)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
|
||||
static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct aead_instance *inst = aead_alg_instance(tfm);
|
||||
struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_cipher *cipher;
|
||||
struct crypto_ablkcipher *ctr;
|
||||
unsigned long align;
|
||||
@ -451,9 +470,10 @@ static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
|
||||
ctx->cipher = cipher;
|
||||
ctx->ctr = ctr;
|
||||
|
||||
align = crypto_tfm_alg_alignmask(tfm);
|
||||
align = crypto_aead_alignmask(tfm);
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
align + sizeof(struct crypto_ccm_req_priv_ctx) +
|
||||
crypto_ablkcipher_reqsize(ctr));
|
||||
|
||||
@ -464,21 +484,31 @@ static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
|
||||
static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
crypto_free_cipher(ctx->cipher);
|
||||
crypto_free_ablkcipher(ctx->ctr);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
|
||||
const char *full_name,
|
||||
const char *ctr_name,
|
||||
const char *cipher_name)
|
||||
static void crypto_ccm_free(struct aead_instance *inst)
|
||||
{
|
||||
struct ccm_instance_ctx *ctx = aead_instance_ctx(inst);
|
||||
|
||||
crypto_drop_spawn(&ctx->cipher);
|
||||
crypto_drop_skcipher(&ctx->ctr);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
||||
struct rtattr **tb,
|
||||
const char *full_name,
|
||||
const char *ctr_name,
|
||||
const char *cipher_name)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct aead_instance *inst;
|
||||
struct crypto_alg *ctr;
|
||||
struct crypto_alg *cipher;
|
||||
struct ccm_instance_ctx *ictx;
|
||||
@ -486,15 +516,15 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (IS_ERR(cipher))
|
||||
return ERR_CAST(cipher);
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
err = -EINVAL;
|
||||
if (cipher->cra_blocksize != 16)
|
||||
@ -505,14 +535,15 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
|
||||
if (!inst)
|
||||
goto out_put_cipher;
|
||||
|
||||
ictx = crypto_instance_ctx(inst);
|
||||
ictx = aead_instance_ctx(inst);
|
||||
|
||||
err = crypto_init_spawn(&ictx->cipher, cipher, inst,
|
||||
err = crypto_init_spawn(&ictx->cipher, cipher,
|
||||
aead_crypto_instance(inst),
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
crypto_set_skcipher_spawn(&ictx->ctr, inst);
|
||||
crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
|
||||
err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
|
||||
crypto_requires_sync(algt->type,
|
||||
algt->mask));
|
||||
@ -531,33 +562,39 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
|
||||
goto err_drop_ctr;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"ccm_base(%s,%s)", ctr->cra_driver_name,
|
||||
cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_ctr;
|
||||
|
||||
memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
||||
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
|
||||
(__alignof__(u32) - 1);
|
||||
inst->alg.cra_type = &crypto_aead_type;
|
||||
inst->alg.cra_aead.ivsize = 16;
|
||||
inst->alg.cra_aead.maxauthsize = 16;
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
|
||||
inst->alg.cra_init = crypto_ccm_init_tfm;
|
||||
inst->alg.cra_exit = crypto_ccm_exit_tfm;
|
||||
inst->alg.cra_aead.setkey = crypto_ccm_setkey;
|
||||
inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
|
||||
inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
|
||||
inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = (cipher->cra_priority +
|
||||
ctr->cra_priority) / 2;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = cipher->cra_alignmask |
|
||||
ctr->cra_alignmask |
|
||||
(__alignof__(u32) - 1);
|
||||
inst->alg.ivsize = 16;
|
||||
inst->alg.maxauthsize = 16;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
|
||||
inst->alg.init = crypto_ccm_init_tfm;
|
||||
inst->alg.exit = crypto_ccm_exit_tfm;
|
||||
inst->alg.setkey = crypto_ccm_setkey;
|
||||
inst->alg.setauthsize = crypto_ccm_setauthsize;
|
||||
inst->alg.encrypt = crypto_ccm_encrypt;
|
||||
inst->alg.decrypt = crypto_ccm_decrypt;
|
||||
|
||||
out:
|
||||
inst->free = crypto_ccm_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_ctr;
|
||||
|
||||
out_put_cipher:
|
||||
crypto_mod_put(cipher);
|
||||
return inst;
|
||||
return err;
|
||||
|
||||
err_drop_ctr:
|
||||
crypto_drop_skcipher(&ictx->ctr);
|
||||
@ -565,12 +602,10 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
|
||||
crypto_drop_spawn(&ictx->cipher);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
out_put_cipher:
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
goto out_put_cipher;
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
|
||||
static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
const char *cipher_name;
|
||||
char ctr_name[CRYPTO_MAX_ALG_NAME];
|
||||
@ -578,36 +613,28 @@ static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(cipher_name))
|
||||
return ERR_CAST(cipher_name);
|
||||
return PTR_ERR(cipher_name);
|
||||
|
||||
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
|
||||
cipher_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
|
||||
}
|
||||
|
||||
static void crypto_ccm_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_spawn(&ctx->cipher);
|
||||
crypto_drop_skcipher(&ctx->ctr);
|
||||
kfree(inst);
|
||||
return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
|
||||
cipher_name);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_ccm_tmpl = {
|
||||
.name = "ccm",
|
||||
.alloc = crypto_ccm_alloc,
|
||||
.free = crypto_ccm_free,
|
||||
.create = crypto_ccm_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
|
||||
static int crypto_ccm_base_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
const char *ctr_name;
|
||||
const char *cipher_name;
|
||||
@ -615,23 +642,23 @@ static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
|
||||
|
||||
ctr_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(ctr_name))
|
||||
return ERR_CAST(ctr_name);
|
||||
return PTR_ERR(ctr_name);
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[2]);
|
||||
if (IS_ERR(cipher_name))
|
||||
return ERR_CAST(cipher_name);
|
||||
return PTR_ERR(cipher_name);
|
||||
|
||||
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
|
||||
ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
|
||||
return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
|
||||
cipher_name);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_ccm_base_tmpl = {
|
||||
.name = "ccm_base",
|
||||
.alloc = crypto_ccm_base_alloc,
|
||||
.free = crypto_ccm_free,
|
||||
.create = crypto_ccm_base_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -677,10 +704,12 @@ static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
|
||||
|
||||
static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
|
||||
{
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req);
|
||||
struct aead_request *subreq = &rctx->subreq;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_aead *child = ctx->child;
|
||||
struct scatterlist *sg;
|
||||
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
|
||||
crypto_aead_alignmask(child) + 1);
|
||||
|
||||
@ -690,17 +719,38 @@ static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
|
||||
memcpy(iv + 1, ctx->nonce, 3);
|
||||
memcpy(iv + 4, req->iv, 8);
|
||||
|
||||
scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0);
|
||||
|
||||
sg_init_table(rctx->src, 3);
|
||||
sg_set_buf(rctx->src, iv + 16, req->assoclen - 8);
|
||||
sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
|
||||
if (sg != rctx->src + 1)
|
||||
sg_chain(rctx->src, 2, sg);
|
||||
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(rctx->dst, 3);
|
||||
sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8);
|
||||
sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
|
||||
if (sg != rctx->dst + 1)
|
||||
sg_chain(rctx->dst, 2, sg);
|
||||
}
|
||||
|
||||
aead_request_set_tfm(subreq, child);
|
||||
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
|
||||
req->base.data);
|
||||
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
|
||||
aead_request_set_assoc(subreq, req->assoc, req->assoclen);
|
||||
aead_request_set_crypt(subreq, rctx->src,
|
||||
req->src == req->dst ? rctx->src : rctx->dst,
|
||||
req->cryptlen, iv);
|
||||
aead_request_set_ad(subreq, req->assoclen - 8);
|
||||
|
||||
return subreq;
|
||||
}
|
||||
|
||||
static int crypto_rfc4309_encrypt(struct aead_request *req)
|
||||
{
|
||||
if (req->assoclen != 16 && req->assoclen != 20)
|
||||
return -EINVAL;
|
||||
|
||||
req = crypto_rfc4309_crypt(req);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
@ -708,16 +758,19 @@ static int crypto_rfc4309_encrypt(struct aead_request *req)
|
||||
|
||||
static int crypto_rfc4309_decrypt(struct aead_request *req)
|
||||
{
|
||||
if (req->assoclen != 16 && req->assoclen != 20)
|
||||
return -EINVAL;
|
||||
|
||||
req = crypto_rfc4309_crypt(req);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
|
||||
static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
|
||||
static int crypto_rfc4309_init_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct aead_instance *inst = aead_alg_instance(tfm);
|
||||
struct crypto_aead_spawn *spawn = aead_instance_ctx(inst);
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_aead *aead;
|
||||
unsigned long align;
|
||||
|
||||
@ -729,115 +782,118 @@ static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
|
||||
|
||||
align = crypto_aead_alignmask(aead);
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
sizeof(struct aead_request) +
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
sizeof(struct crypto_rfc4309_req_ctx) +
|
||||
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
|
||||
align + 16);
|
||||
align + 32);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
|
||||
static void crypto_rfc4309_exit_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
|
||||
static void crypto_rfc4309_free(struct aead_instance *inst)
|
||||
{
|
||||
crypto_drop_aead(aead_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct aead_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
struct aead_alg *alg;
|
||||
const char *ccm_name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
ccm_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(ccm_name))
|
||||
return ERR_CAST(ccm_name);
|
||||
return PTR_ERR(ccm_name);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
spawn = crypto_instance_ctx(inst);
|
||||
crypto_set_aead_spawn(spawn, inst);
|
||||
spawn = aead_instance_ctx(inst);
|
||||
crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
|
||||
err = crypto_grab_aead(spawn, ccm_name, 0,
|
||||
crypto_requires_sync(algt->type, algt->mask));
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
|
||||
alg = crypto_aead_spawn_alg(spawn);
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
/* We only support 16-byte blocks. */
|
||||
if (alg->cra_aead.ivsize != 16)
|
||||
if (crypto_aead_alg_ivsize(alg) != 16)
|
||||
goto out_drop_alg;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->cra_blocksize != 1)
|
||||
if (alg->base.cra_blocksize != 1)
|
||||
goto out_drop_alg;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
|
||||
snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4309(%s)", alg->cra_driver_name) >=
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4309(%s)", alg->base.cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME ||
|
||||
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4309(%s)", alg->base.cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_nivaead_type;
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
|
||||
inst->alg.cra_aead.ivsize = 8;
|
||||
inst->alg.cra_aead.maxauthsize = 16;
|
||||
inst->alg.ivsize = 8;
|
||||
inst->alg.maxauthsize = 16;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
|
||||
|
||||
inst->alg.cra_init = crypto_rfc4309_init_tfm;
|
||||
inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
|
||||
inst->alg.init = crypto_rfc4309_init_tfm;
|
||||
inst->alg.exit = crypto_rfc4309_exit_tfm;
|
||||
|
||||
inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
|
||||
inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
|
||||
inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
|
||||
inst->alg.setkey = crypto_rfc4309_setkey;
|
||||
inst->alg.setauthsize = crypto_rfc4309_setauthsize;
|
||||
inst->alg.encrypt = crypto_rfc4309_encrypt;
|
||||
inst->alg.decrypt = crypto_rfc4309_decrypt;
|
||||
|
||||
inst->alg.cra_aead.geniv = "seqiv";
|
||||
inst->free = crypto_rfc4309_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_alg;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
return err;
|
||||
|
||||
out_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_rfc4309_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_rfc4309_tmpl = {
|
||||
.name = "rfc4309",
|
||||
.alloc = crypto_rfc4309_alloc,
|
||||
.free = crypto_rfc4309_free,
|
||||
.create = crypto_rfc4309_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -13,14 +13,7 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define CHACHA20_NONCE_SIZE 16
|
||||
#define CHACHA20_KEY_SIZE 32
|
||||
#define CHACHA20_BLOCK_SIZE 64
|
||||
|
||||
struct chacha20_ctx {
|
||||
u32 key[8];
|
||||
};
|
||||
#include <crypto/chacha20.h>
|
||||
|
||||
static inline u32 rotl32(u32 v, u8 n)
|
||||
{
|
||||
@ -108,7 +101,7 @@ static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
|
||||
}
|
||||
}
|
||||
|
||||
static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
|
||||
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
|
||||
{
|
||||
static const char constant[16] = "expand 32-byte k";
|
||||
|
||||
@ -129,8 +122,9 @@ static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
|
||||
state[14] = le32_to_cpuvp(iv + 8);
|
||||
state[15] = le32_to_cpuvp(iv + 12);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_chacha20_init);
|
||||
|
||||
static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keysize)
|
||||
{
|
||||
struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
@ -144,8 +138,9 @@ static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
|
||||
|
||||
static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
@ -155,7 +150,7 @@ static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
|
||||
|
||||
chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
|
||||
crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
|
||||
|
||||
while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
|
||||
chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
@ -172,6 +167,7 @@ static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_chacha20_crypt);
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "chacha20",
|
||||
@ -187,11 +183,11 @@ static struct crypto_alg alg = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CHACHA20_KEY_SIZE,
|
||||
.max_keysize = CHACHA20_KEY_SIZE,
|
||||
.ivsize = CHACHA20_NONCE_SIZE,
|
||||
.ivsize = CHACHA20_IV_SIZE,
|
||||
.geniv = "seqiv",
|
||||
.setkey = chacha20_setkey,
|
||||
.encrypt = chacha20_crypt,
|
||||
.decrypt = chacha20_crypt,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.encrypt = crypto_chacha20_crypt,
|
||||
.decrypt = crypto_chacha20_crypt,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/chacha20.h>
|
||||
#include <crypto/poly1305.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -20,11 +22,6 @@
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#define POLY1305_BLOCK_SIZE 16
|
||||
#define POLY1305_DIGEST_SIZE 16
|
||||
#define POLY1305_KEY_SIZE 32
|
||||
#define CHACHA20_KEY_SIZE 32
|
||||
#define CHACHA20_IV_SIZE 16
|
||||
#define CHACHAPOLY_IV_SIZE 12
|
||||
|
||||
struct chachapoly_instance_ctx {
|
||||
@ -60,12 +57,16 @@ struct chacha_req {
|
||||
};
|
||||
|
||||
struct chachapoly_req_ctx {
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist dst[2];
|
||||
/* the key we generate for Poly1305 using Chacha20 */
|
||||
u8 key[POLY1305_KEY_SIZE];
|
||||
/* calculated Poly1305 tag */
|
||||
u8 tag[POLY1305_DIGEST_SIZE];
|
||||
/* length of data to en/decrypt, without ICV */
|
||||
unsigned int cryptlen;
|
||||
/* Actual AD, excluding IV */
|
||||
unsigned int assoclen;
|
||||
union {
|
||||
struct poly_req poly;
|
||||
struct chacha_req chacha;
|
||||
@ -98,7 +99,9 @@ static int poly_verify_tag(struct aead_request *req)
|
||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||
u8 tag[sizeof(rctx->tag)];
|
||||
|
||||
scatterwalk_map_and_copy(tag, req->src, rctx->cryptlen, sizeof(tag), 0);
|
||||
scatterwalk_map_and_copy(tag, req->src,
|
||||
req->assoclen + rctx->cryptlen,
|
||||
sizeof(tag), 0);
|
||||
if (crypto_memneq(tag, rctx->tag, sizeof(tag)))
|
||||
return -EBADMSG;
|
||||
return 0;
|
||||
@ -108,7 +111,8 @@ static int poly_copy_tag(struct aead_request *req)
|
||||
{
|
||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||
|
||||
scatterwalk_map_and_copy(rctx->tag, req->dst, rctx->cryptlen,
|
||||
scatterwalk_map_and_copy(rctx->tag, req->dst,
|
||||
req->assoclen + rctx->cryptlen,
|
||||
sizeof(rctx->tag), 1);
|
||||
return 0;
|
||||
}
|
||||
@ -123,14 +127,24 @@ static int chacha_decrypt(struct aead_request *req)
|
||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||
struct chacha_req *creq = &rctx->u.chacha;
|
||||
struct scatterlist *src, *dst;
|
||||
int err;
|
||||
|
||||
chacha_iv(creq->iv, req, 1);
|
||||
|
||||
sg_init_table(rctx->src, 2);
|
||||
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
|
||||
dst = src;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(rctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
||||
}
|
||||
|
||||
ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
||||
chacha_decrypt_done, req);
|
||||
ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
|
||||
ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
|
||||
ablkcipher_request_set_crypt(&creq->req, src, dst,
|
||||
rctx->cryptlen, creq->iv);
|
||||
err = crypto_ablkcipher_decrypt(&creq->req);
|
||||
if (err)
|
||||
@ -156,14 +170,15 @@ static void poly_tail_done(struct crypto_async_request *areq, int err)
|
||||
|
||||
static int poly_tail(struct aead_request *req)
|
||||
{
|
||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||
struct poly_req *preq = &rctx->u.poly;
|
||||
__le64 len;
|
||||
int err;
|
||||
|
||||
sg_init_table(preq->src, 1);
|
||||
len = cpu_to_le64(req->assoclen);
|
||||
len = cpu_to_le64(rctx->assoclen);
|
||||
memcpy(&preq->tail.assoclen, &len, sizeof(len));
|
||||
len = cpu_to_le64(rctx->cryptlen);
|
||||
memcpy(&preq->tail.cryptlen, &len, sizeof(len));
|
||||
@ -228,6 +243,9 @@ static int poly_cipher(struct aead_request *req)
|
||||
if (rctx->cryptlen == req->cryptlen) /* encrypting */
|
||||
crypt = req->dst;
|
||||
|
||||
sg_init_table(rctx->src, 2);
|
||||
crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
|
||||
|
||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
||||
poly_cipher_done, req);
|
||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||
@ -253,7 +271,7 @@ static int poly_adpad(struct aead_request *req)
|
||||
unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
|
||||
int err;
|
||||
|
||||
padlen = (bs - (req->assoclen % bs)) % bs;
|
||||
padlen = (bs - (rctx->assoclen % bs)) % bs;
|
||||
memset(preq->pad, 0, sizeof(preq->pad));
|
||||
sg_init_table(preq->src, 1);
|
||||
sg_set_buf(preq->src, preq->pad, padlen);
|
||||
@ -285,7 +303,7 @@ static int poly_ad(struct aead_request *req)
|
||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
||||
poly_ad_done, req);
|
||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||
ahash_request_set_crypt(&preq->req, req->assoc, NULL, req->assoclen);
|
||||
ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
|
||||
|
||||
err = crypto_ahash_update(&preq->req);
|
||||
if (err)
|
||||
@ -351,11 +369,20 @@ static void poly_genkey_done(struct crypto_async_request *areq, int err)
|
||||
|
||||
static int poly_genkey(struct aead_request *req)
|
||||
{
|
||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||
struct chacha_req *creq = &rctx->u.chacha;
|
||||
int err;
|
||||
|
||||
rctx->assoclen = req->assoclen;
|
||||
|
||||
if (crypto_aead_ivsize(tfm) == 8) {
|
||||
if (rctx->assoclen < 8)
|
||||
return -EINVAL;
|
||||
rctx->assoclen -= 8;
|
||||
}
|
||||
|
||||
sg_init_table(creq->src, 1);
|
||||
memset(rctx->key, 0, sizeof(rctx->key));
|
||||
sg_set_buf(creq->src, rctx->key, sizeof(rctx->key));
|
||||
@ -385,14 +412,24 @@ static int chacha_encrypt(struct aead_request *req)
|
||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||
struct chacha_req *creq = &rctx->u.chacha;
|
||||
struct scatterlist *src, *dst;
|
||||
int err;
|
||||
|
||||
chacha_iv(creq->iv, req, 1);
|
||||
|
||||
sg_init_table(rctx->src, 2);
|
||||
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
|
||||
dst = src;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(rctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
||||
}
|
||||
|
||||
ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
||||
chacha_encrypt_done, req);
|
||||
ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
|
||||
ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
|
||||
ablkcipher_request_set_crypt(&creq->req, src, dst,
|
||||
req->cryptlen, creq->iv);
|
||||
err = crypto_ablkcipher_encrypt(&creq->req);
|
||||
if (err)
|
||||
@ -426,8 +463,6 @@ static int chachapoly_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||
|
||||
if (req->cryptlen < POLY1305_DIGEST_SIZE)
|
||||
return -EINVAL;
|
||||
rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
|
||||
|
||||
/* decrypt call chain:
|
||||
@ -476,11 +511,11 @@ static int chachapoly_setauthsize(struct crypto_aead *tfm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chachapoly_init(struct crypto_tfm *tfm)
|
||||
static int chachapoly_init(struct crypto_aead *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct chachapoly_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct aead_instance *inst = aead_alg_instance(tfm);
|
||||
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_ablkcipher *chacha;
|
||||
struct crypto_ahash *poly;
|
||||
unsigned long align;
|
||||
@ -499,77 +534,87 @@ static int chachapoly_init(struct crypto_tfm *tfm)
|
||||
ctx->poly = poly;
|
||||
ctx->saltlen = ictx->saltlen;
|
||||
|
||||
align = crypto_tfm_alg_alignmask(tfm);
|
||||
align = crypto_aead_alignmask(tfm);
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
align + offsetof(struct chachapoly_req_ctx, u) +
|
||||
max(offsetof(struct chacha_req, req) +
|
||||
sizeof(struct ablkcipher_request) +
|
||||
crypto_ablkcipher_reqsize(chacha),
|
||||
offsetof(struct poly_req, req) +
|
||||
sizeof(struct ahash_request) +
|
||||
crypto_ahash_reqsize(poly)));
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
align + offsetof(struct chachapoly_req_ctx, u) +
|
||||
max(offsetof(struct chacha_req, req) +
|
||||
sizeof(struct ablkcipher_request) +
|
||||
crypto_ablkcipher_reqsize(chacha),
|
||||
offsetof(struct poly_req, req) +
|
||||
sizeof(struct ahash_request) +
|
||||
crypto_ahash_reqsize(poly)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void chachapoly_exit(struct crypto_tfm *tfm)
|
||||
static void chachapoly_exit(struct crypto_aead *tfm)
|
||||
{
|
||||
struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
crypto_free_ahash(ctx->poly);
|
||||
crypto_free_ablkcipher(ctx->chacha);
|
||||
}
|
||||
|
||||
static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
|
||||
const char *name,
|
||||
unsigned int ivsize)
|
||||
static void chachapoly_free(struct aead_instance *inst)
|
||||
{
|
||||
struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->chacha);
|
||||
crypto_drop_ahash(&ctx->poly);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
const char *name, unsigned int ivsize)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct aead_instance *inst;
|
||||
struct crypto_alg *chacha;
|
||||
struct crypto_alg *poly;
|
||||
struct ahash_alg *poly_ahash;
|
||||
struct hash_alg_common *poly_hash;
|
||||
struct chachapoly_instance_ctx *ctx;
|
||||
const char *chacha_name, *poly_name;
|
||||
int err;
|
||||
|
||||
if (ivsize > CHACHAPOLY_IV_SIZE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_CAST(algt);
|
||||
return PTR_ERR(algt);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
chacha_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(chacha_name))
|
||||
return ERR_CAST(chacha_name);
|
||||
return PTR_ERR(chacha_name);
|
||||
poly_name = crypto_attr_alg_name(tb[2]);
|
||||
if (IS_ERR(poly_name))
|
||||
return ERR_CAST(poly_name);
|
||||
return PTR_ERR(poly_name);
|
||||
|
||||
poly = crypto_find_alg(poly_name, &crypto_ahash_type,
|
||||
CRYPTO_ALG_TYPE_HASH,
|
||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
||||
if (IS_ERR(poly))
|
||||
return ERR_CAST(poly);
|
||||
return PTR_ERR(poly);
|
||||
|
||||
err = -ENOMEM;
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
goto out_put_poly;
|
||||
|
||||
ctx = crypto_instance_ctx(inst);
|
||||
ctx = aead_instance_ctx(inst);
|
||||
ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
|
||||
poly_ahash = container_of(poly, struct ahash_alg, halg.base);
|
||||
err = crypto_init_ahash_spawn(&ctx->poly, &poly_ahash->halg, inst);
|
||||
poly_hash = __crypto_hash_alg_common(poly);
|
||||
err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
|
||||
aead_crypto_instance(inst));
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
crypto_set_skcipher_spawn(&ctx->chacha, inst);
|
||||
crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
|
||||
err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
|
||||
crypto_requires_sync(algt->type,
|
||||
algt->mask));
|
||||
@ -587,37 +632,42 @@ static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
|
||||
goto out_drop_chacha;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s,%s)", name, chacha_name,
|
||||
poly_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_chacha;
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s,%s)", name, chacha->cra_driver_name,
|
||||
poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_chacha;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= (chacha->cra_flags |
|
||||
poly->cra_flags) & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = (chacha->cra_priority +
|
||||
poly->cra_priority) / 2;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = chacha->cra_alignmask | poly->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_nivaead_type;
|
||||
inst->alg.cra_aead.ivsize = ivsize;
|
||||
inst->alg.cra_aead.maxauthsize = POLY1305_DIGEST_SIZE;
|
||||
inst->alg.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen;
|
||||
inst->alg.cra_init = chachapoly_init;
|
||||
inst->alg.cra_exit = chachapoly_exit;
|
||||
inst->alg.cra_aead.encrypt = chachapoly_encrypt;
|
||||
inst->alg.cra_aead.decrypt = chachapoly_decrypt;
|
||||
inst->alg.cra_aead.setkey = chachapoly_setkey;
|
||||
inst->alg.cra_aead.setauthsize = chachapoly_setauthsize;
|
||||
inst->alg.cra_aead.geniv = "seqiv";
|
||||
inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) &
|
||||
CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = (chacha->cra_priority +
|
||||
poly->cra_priority) / 2;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = chacha->cra_alignmask |
|
||||
poly->cra_alignmask;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
|
||||
ctx->saltlen;
|
||||
inst->alg.ivsize = ivsize;
|
||||
inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
|
||||
inst->alg.init = chachapoly_init;
|
||||
inst->alg.exit = chachapoly_exit;
|
||||
inst->alg.encrypt = chachapoly_encrypt;
|
||||
inst->alg.decrypt = chachapoly_decrypt;
|
||||
inst->alg.setkey = chachapoly_setkey;
|
||||
inst->alg.setauthsize = chachapoly_setauthsize;
|
||||
|
||||
out:
|
||||
inst->free = chachapoly_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_chacha;
|
||||
|
||||
out_put_poly:
|
||||
crypto_mod_put(poly);
|
||||
return inst;
|
||||
return err;
|
||||
|
||||
out_drop_chacha:
|
||||
crypto_drop_skcipher(&ctx->chacha);
|
||||
@ -625,41 +675,28 @@ static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
|
||||
crypto_drop_ahash(&ctx->poly);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
out_put_poly:
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
goto out_put_poly;
|
||||
}
|
||||
|
||||
static struct crypto_instance *rfc7539_alloc(struct rtattr **tb)
|
||||
static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
return chachapoly_alloc(tb, "rfc7539", 12);
|
||||
return chachapoly_create(tmpl, tb, "rfc7539", 12);
|
||||
}
|
||||
|
||||
static struct crypto_instance *rfc7539esp_alloc(struct rtattr **tb)
|
||||
static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
return chachapoly_alloc(tb, "rfc7539esp", 8);
|
||||
}
|
||||
|
||||
static void chachapoly_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct chachapoly_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->chacha);
|
||||
crypto_drop_ahash(&ctx->poly);
|
||||
kfree(inst);
|
||||
return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
|
||||
}
|
||||
|
||||
static struct crypto_template rfc7539_tmpl = {
|
||||
.name = "rfc7539",
|
||||
.alloc = rfc7539_alloc,
|
||||
.free = chachapoly_free,
|
||||
.create = rfc7539_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct crypto_template rfc7539esp_tmpl = {
|
||||
.name = "rfc7539esp",
|
||||
.alloc = rfc7539esp_alloc,
|
||||
.free = chachapoly_free,
|
||||
.create = rfc7539esp_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -690,6 +727,5 @@ module_exit(chacha20poly1305_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
|
||||
MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD");
|
||||
MODULE_ALIAS_CRYPTO("chacha20poly1305");
|
||||
MODULE_ALIAS_CRYPTO("rfc7539");
|
||||
MODULE_ALIAS_CRYPTO("rfc7539esp");
|
||||
|
@ -176,10 +176,9 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return;
|
||||
if ((algt->type & CRYPTO_ALG_INTERNAL))
|
||||
*type |= CRYPTO_ALG_INTERNAL;
|
||||
if ((algt->mask & CRYPTO_ALG_INTERNAL))
|
||||
*mask |= CRYPTO_ALG_INTERNAL;
|
||||
|
||||
*type |= algt->type & CRYPTO_ALG_INTERNAL;
|
||||
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
|
||||
}
|
||||
|
||||
static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
|
||||
@ -688,16 +687,18 @@ static void cryptd_aead_crypt(struct aead_request *req,
|
||||
int (*crypt)(struct aead_request *req))
|
||||
{
|
||||
struct cryptd_aead_request_ctx *rctx;
|
||||
crypto_completion_t compl;
|
||||
|
||||
rctx = aead_request_ctx(req);
|
||||
compl = rctx->complete;
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
aead_request_set_tfm(req, child);
|
||||
err = crypt( req );
|
||||
req->base.complete = rctx->complete;
|
||||
out:
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, err);
|
||||
compl(&req->base, err);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
@ -708,7 +709,7 @@ static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
|
||||
struct aead_request *req;
|
||||
|
||||
req = container_of(areq, struct aead_request, base);
|
||||
cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
|
||||
cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
|
||||
}
|
||||
|
||||
static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
|
||||
@ -718,7 +719,7 @@ static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
|
||||
struct aead_request *req;
|
||||
|
||||
req = container_of(areq, struct aead_request, base);
|
||||
cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
|
||||
cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
|
||||
}
|
||||
|
||||
static int cryptd_aead_enqueue(struct aead_request *req,
|
||||
@ -756,7 +757,9 @@ static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctx->child = cipher;
|
||||
crypto_aead_set_reqsize(tfm, sizeof(struct cryptd_aead_request_ctx));
|
||||
crypto_aead_set_reqsize(
|
||||
tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
|
||||
crypto_aead_reqsize(cipher)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -775,7 +778,7 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
|
||||
struct aead_alg *alg;
|
||||
const char *name;
|
||||
u32 type = 0;
|
||||
u32 mask = 0;
|
||||
u32 mask = CRYPTO_ALG_ASYNC;
|
||||
int err;
|
||||
|
||||
cryptd_check_internal(tb, &type, &mask);
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include <net/netlink.h>
|
||||
#include <linux/security.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/internal/rng.h>
|
||||
#include <crypto/akcipher.h>
|
||||
@ -385,34 +384,6 @@ static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
int err;
|
||||
struct crypto_alg *alg;
|
||||
|
||||
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
type |= CRYPTO_ALG_TYPE_AEAD;
|
||||
mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
for (;;) {
|
||||
alg = crypto_lookup_aead(name, type, mask);
|
||||
if (!IS_ERR(alg))
|
||||
return alg;
|
||||
|
||||
err = PTR_ERR(alg);
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
struct nlattr **attrs)
|
||||
{
|
||||
@ -446,9 +417,6 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
name = p->cru_name;
|
||||
|
||||
switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_AEAD:
|
||||
alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_GIVCIPHER:
|
||||
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
||||
|
@ -19,8 +19,6 @@
|
||||
*/
|
||||
|
||||
#include <crypto/internal/geniv.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/rng.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
@ -33,13 +31,6 @@
|
||||
|
||||
#define MAX_IV_SIZE 16
|
||||
|
||||
struct echainiv_ctx {
|
||||
/* aead_geniv_ctx must be first the element */
|
||||
struct aead_geniv_ctx geniv;
|
||||
struct crypto_blkcipher *null;
|
||||
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
|
||||
|
||||
/* We don't care if we get preempted and read/write IVs from the next CPU. */
|
||||
@ -103,7 +94,7 @@ static void echainiv_encrypt_complete(struct crypto_async_request *base,
|
||||
static int echainiv_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
@ -114,7 +105,7 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||
if (req->cryptlen < ivsize)
|
||||
return -EINVAL;
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->geniv.child);
|
||||
aead_request_set_tfm(subreq, ctx->child);
|
||||
|
||||
compl = echainiv_encrypt_complete;
|
||||
data = req;
|
||||
@ -145,8 +136,8 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||
|
||||
aead_request_set_callback(subreq, req->base.flags, compl, data);
|
||||
aead_request_set_crypt(subreq, req->dst, req->dst,
|
||||
req->cryptlen - ivsize, info);
|
||||
aead_request_set_ad(subreq, req->assoclen + ivsize);
|
||||
req->cryptlen, info);
|
||||
aead_request_set_ad(subreq, req->assoclen);
|
||||
|
||||
crypto_xor(info, ctx->salt, ivsize);
|
||||
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
|
||||
@ -160,16 +151,16 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||
static int echainiv_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
unsigned int ivsize = crypto_aead_ivsize(geniv);
|
||||
|
||||
if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
|
||||
if (req->cryptlen < ivsize)
|
||||
return -EINVAL;
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->geniv.child);
|
||||
aead_request_set_tfm(subreq, ctx->child);
|
||||
|
||||
compl = req->base.complete;
|
||||
data = req->base.data;
|
||||
@ -180,61 +171,10 @@ static int echainiv_decrypt(struct aead_request *req)
|
||||
aead_request_set_ad(subreq, req->assoclen + ivsize);
|
||||
|
||||
scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
|
||||
if (req->src != req->dst)
|
||||
scatterwalk_map_and_copy(req->iv, req->dst,
|
||||
req->assoclen, ivsize, 1);
|
||||
|
||||
return crypto_aead_decrypt(subreq);
|
||||
}
|
||||
|
||||
static int echainiv_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_aead *geniv = __crypto_aead_cast(tfm);
|
||||
struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
int err;
|
||||
|
||||
spin_lock_init(&ctx->geniv.lock);
|
||||
|
||||
crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
|
||||
|
||||
err = crypto_get_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
|
||||
crypto_aead_ivsize(geniv));
|
||||
crypto_put_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ctx->null = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(ctx->null);
|
||||
if (IS_ERR(ctx->null))
|
||||
goto out;
|
||||
|
||||
err = aead_geniv_init(tfm);
|
||||
if (err)
|
||||
goto drop_null;
|
||||
|
||||
ctx->geniv.child = geniv->child;
|
||||
geniv->child = geniv;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
drop_null:
|
||||
crypto_put_default_null_skcipher();
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void echainiv_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->geniv.child);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
|
||||
static int echainiv_aead_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
@ -251,9 +191,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
|
||||
spawn = aead_instance_ctx(inst);
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
|
||||
if (alg->base.cra_aead.encrypt)
|
||||
goto done;
|
||||
|
||||
err = -EINVAL;
|
||||
if (inst->alg.ivsize & (sizeof(u32) - 1) ||
|
||||
inst->alg.ivsize > MAX_IV_SIZE)
|
||||
@ -262,14 +199,15 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
|
||||
inst->alg.encrypt = echainiv_encrypt;
|
||||
inst->alg.decrypt = echainiv_decrypt;
|
||||
|
||||
inst->alg.base.cra_init = echainiv_init;
|
||||
inst->alg.base.cra_exit = echainiv_exit;
|
||||
inst->alg.init = aead_init_geniv;
|
||||
inst->alg.exit = aead_exit_geniv;
|
||||
|
||||
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
|
||||
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
|
||||
|
||||
done:
|
||||
inst->free = aead_geniv_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto free_inst;
|
||||
|
102
crypto/gcm.c
102
crypto/gcm.c
@ -38,6 +38,12 @@ struct crypto_rfc4106_ctx {
|
||||
u8 nonce[4];
|
||||
};
|
||||
|
||||
struct crypto_rfc4106_req_ctx {
|
||||
struct scatterlist src[3];
|
||||
struct scatterlist dst[3];
|
||||
struct aead_request subreq;
|
||||
};
|
||||
|
||||
struct crypto_rfc4543_instance_ctx {
|
||||
struct crypto_aead_spawn aead;
|
||||
};
|
||||
@ -601,6 +607,15 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
|
||||
crypto_free_ablkcipher(ctx->ctr);
|
||||
}
|
||||
|
||||
static void crypto_gcm_free(struct aead_instance *inst)
|
||||
{
|
||||
struct gcm_instance_ctx *ctx = aead_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->ctr);
|
||||
crypto_drop_ahash(&ctx->ghash);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||
struct rtattr **tb,
|
||||
const char *full_name,
|
||||
@ -689,6 +704,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||
inst->alg.encrypt = crypto_gcm_encrypt;
|
||||
inst->alg.decrypt = crypto_gcm_decrypt;
|
||||
|
||||
inst->free = crypto_gcm_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_put_ctr;
|
||||
@ -728,19 +745,9 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
ctr_name, "ghash");
|
||||
}
|
||||
|
||||
static void crypto_gcm_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->ctr);
|
||||
crypto_drop_ahash(&ctx->ghash);
|
||||
kfree(aead_instance(inst));
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_gcm_tmpl = {
|
||||
.name = "gcm",
|
||||
.create = crypto_gcm_create,
|
||||
.free = crypto_gcm_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -770,7 +777,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
|
||||
static struct crypto_template crypto_gcm_base_tmpl = {
|
||||
.name = "gcm_base",
|
||||
.create = crypto_gcm_base_create,
|
||||
.free = crypto_gcm_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -816,27 +822,50 @@ static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
|
||||
|
||||
static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
|
||||
{
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct aead_request *subreq = &rctx->subreq;
|
||||
struct crypto_aead *child = ctx->child;
|
||||
struct scatterlist *sg;
|
||||
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
|
||||
crypto_aead_alignmask(child) + 1);
|
||||
|
||||
scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0);
|
||||
|
||||
memcpy(iv, ctx->nonce, 4);
|
||||
memcpy(iv + 4, req->iv, 8);
|
||||
|
||||
sg_init_table(rctx->src, 3);
|
||||
sg_set_buf(rctx->src, iv + 12, req->assoclen - 8);
|
||||
sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
|
||||
if (sg != rctx->src + 1)
|
||||
sg_chain(rctx->src, 2, sg);
|
||||
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(rctx->dst, 3);
|
||||
sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8);
|
||||
sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
|
||||
if (sg != rctx->dst + 1)
|
||||
sg_chain(rctx->dst, 2, sg);
|
||||
}
|
||||
|
||||
aead_request_set_tfm(subreq, child);
|
||||
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
|
||||
req->base.data);
|
||||
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
|
||||
aead_request_set_ad(subreq, req->assoclen);
|
||||
aead_request_set_crypt(subreq, rctx->src,
|
||||
req->src == req->dst ? rctx->src : rctx->dst,
|
||||
req->cryptlen, iv);
|
||||
aead_request_set_ad(subreq, req->assoclen - 8);
|
||||
|
||||
return subreq;
|
||||
}
|
||||
|
||||
static int crypto_rfc4106_encrypt(struct aead_request *req)
|
||||
{
|
||||
if (req->assoclen != 16 && req->assoclen != 20)
|
||||
return -EINVAL;
|
||||
|
||||
req = crypto_rfc4106_crypt(req);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
@ -844,6 +873,9 @@ static int crypto_rfc4106_encrypt(struct aead_request *req)
|
||||
|
||||
static int crypto_rfc4106_decrypt(struct aead_request *req)
|
||||
{
|
||||
if (req->assoclen != 16 && req->assoclen != 20)
|
||||
return -EINVAL;
|
||||
|
||||
req = crypto_rfc4106_crypt(req);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
@ -867,9 +899,9 @@ static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm)
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
sizeof(struct aead_request) +
|
||||
sizeof(struct crypto_rfc4106_req_ctx) +
|
||||
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
|
||||
align + 12);
|
||||
align + 24);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -881,6 +913,12 @@ static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm)
|
||||
crypto_free_aead(ctx->child);
|
||||
}
|
||||
|
||||
static void crypto_rfc4106_free(struct aead_instance *inst)
|
||||
{
|
||||
crypto_drop_aead(aead_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
@ -934,7 +972,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
|
||||
inst->alg.base.cra_flags |= alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||
@ -952,6 +990,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
inst->alg.encrypt = crypto_rfc4106_encrypt;
|
||||
inst->alg.decrypt = crypto_rfc4106_decrypt;
|
||||
|
||||
inst->free = crypto_rfc4106_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_alg;
|
||||
@ -966,16 +1006,9 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_rfc4106_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_aead(crypto_instance_ctx(inst));
|
||||
kfree(aead_instance(inst));
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_rfc4106_tmpl = {
|
||||
.name = "rfc4106",
|
||||
.create = crypto_rfc4106_create,
|
||||
.free = crypto_rfc4106_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -1114,6 +1147,15 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
|
||||
static void crypto_rfc4543_free(struct aead_instance *inst)
|
||||
{
|
||||
struct crypto_rfc4543_instance_ctx *ctx = aead_instance_ctx(inst);
|
||||
|
||||
crypto_drop_aead(&ctx->aead);
|
||||
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
@ -1187,6 +1229,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
inst->alg.encrypt = crypto_rfc4543_encrypt;
|
||||
inst->alg.decrypt = crypto_rfc4543_decrypt;
|
||||
|
||||
inst->free = crypto_rfc4543_free,
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_alg;
|
||||
@ -1201,19 +1245,9 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_rfc4543_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_aead(&ctx->aead);
|
||||
|
||||
kfree(aead_instance(inst));
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_rfc4543_tmpl = {
|
||||
.name = "rfc4543",
|
||||
.create = crypto_rfc4543_create,
|
||||
.free = crypto_rfc4543_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -79,7 +79,7 @@ int jent_fips_enabled(void)
|
||||
|
||||
void jent_panic(char *s)
|
||||
{
|
||||
panic(s);
|
||||
panic("%s", s);
|
||||
}
|
||||
|
||||
void jent_memcpy(void *dest, const void *src, unsigned int n)
|
||||
|
@ -274,11 +274,16 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct pcrypt_instance_ctx *ctx;
|
||||
struct crypto_attr_type *algt;
|
||||
struct aead_instance *inst;
|
||||
struct aead_alg *alg;
|
||||
const char *name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(name))
|
||||
return PTR_ERR(name);
|
||||
@ -299,6 +304,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
if (err)
|
||||
goto out_drop_aead;
|
||||
|
||||
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
|
||||
|
||||
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
|
||||
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
||||
|
||||
|
@ -13,31 +13,11 @@
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/poly1305.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define POLY1305_BLOCK_SIZE 16
|
||||
#define POLY1305_KEY_SIZE 32
|
||||
#define POLY1305_DIGEST_SIZE 16
|
||||
|
||||
struct poly1305_desc_ctx {
|
||||
/* key */
|
||||
u32 r[5];
|
||||
/* finalize key */
|
||||
u32 s[4];
|
||||
/* accumulator */
|
||||
u32 h[5];
|
||||
/* partial buffer */
|
||||
u8 buf[POLY1305_BLOCK_SIZE];
|
||||
/* bytes used in partial buffer */
|
||||
unsigned int buflen;
|
||||
/* r key has been set */
|
||||
bool rset;
|
||||
/* s key has been set */
|
||||
bool sset;
|
||||
};
|
||||
|
||||
static inline u64 mlt(u64 a, u64 b)
|
||||
{
|
||||
return a * b;
|
||||
@ -58,7 +38,7 @@ static inline u32 le32_to_cpuvp(const void *p)
|
||||
return le32_to_cpup(p);
|
||||
}
|
||||
|
||||
static int poly1305_init(struct shash_desc *desc)
|
||||
int crypto_poly1305_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
@ -69,8 +49,9 @@ static int poly1305_init(struct shash_desc *desc)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_init);
|
||||
|
||||
static int poly1305_setkey(struct crypto_shash *tfm,
|
||||
int crypto_poly1305_setkey(struct crypto_shash *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
/* Poly1305 requires a unique key for each tag, which implies that
|
||||
@ -79,6 +60,7 @@ static int poly1305_setkey(struct crypto_shash *tfm,
|
||||
* the update() call. */
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
|
||||
|
||||
static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
|
||||
{
|
||||
@ -98,16 +80,10 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
|
||||
dctx->s[3] = le32_to_cpuvp(key + 12);
|
||||
}
|
||||
|
||||
static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen,
|
||||
u32 hibit)
|
||||
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
u32 r0, r1, r2, r3, r4;
|
||||
u32 s1, s2, s3, s4;
|
||||
u32 h0, h1, h2, h3, h4;
|
||||
u64 d0, d1, d2, d3, d4;
|
||||
|
||||
if (unlikely(!dctx->sset)) {
|
||||
if (!dctx->sset) {
|
||||
if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
|
||||
poly1305_setrkey(dctx, src);
|
||||
src += POLY1305_BLOCK_SIZE;
|
||||
@ -121,6 +97,25 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
|
||||
dctx->sset = true;
|
||||
}
|
||||
}
|
||||
return srclen;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey);
|
||||
|
||||
static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen,
|
||||
u32 hibit)
|
||||
{
|
||||
u32 r0, r1, r2, r3, r4;
|
||||
u32 s1, s2, s3, s4;
|
||||
u32 h0, h1, h2, h3, h4;
|
||||
u64 d0, d1, d2, d3, d4;
|
||||
unsigned int datalen;
|
||||
|
||||
if (unlikely(!dctx->sset)) {
|
||||
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
|
||||
src += srclen - datalen;
|
||||
srclen = datalen;
|
||||
}
|
||||
|
||||
r0 = dctx->r[0];
|
||||
r1 = dctx->r[1];
|
||||
@ -181,7 +176,7 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
|
||||
return srclen;
|
||||
}
|
||||
|
||||
static int poly1305_update(struct shash_desc *desc,
|
||||
int crypto_poly1305_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
@ -214,8 +209,9 @@ static int poly1305_update(struct shash_desc *desc,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_update);
|
||||
|
||||
static int poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
__le32 *mac = (__le32 *)dst;
|
||||
@ -282,13 +278,14 @@ static int poly1305_final(struct shash_desc *desc, u8 *dst)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_poly1305_final);
|
||||
|
||||
static struct shash_alg poly1305_alg = {
|
||||
.digestsize = POLY1305_DIGEST_SIZE,
|
||||
.init = poly1305_init,
|
||||
.update = poly1305_update,
|
||||
.final = poly1305_final,
|
||||
.setkey = poly1305_setkey,
|
||||
.init = crypto_poly1305_init,
|
||||
.update = crypto_poly1305_update,
|
||||
.final = crypto_poly1305_final,
|
||||
.setkey = crypto_poly1305_setkey,
|
||||
.descsize = sizeof(struct poly1305_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "poly1305",
|
||||
|
26
crypto/rsa.c
26
crypto/rsa.c
@ -267,12 +267,36 @@ static int rsa_verify(struct akcipher_request *req)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rsa_check_key_length(unsigned int len)
|
||||
{
|
||||
switch (len) {
|
||||
case 512:
|
||||
case 1024:
|
||||
case 1536:
|
||||
case 2048:
|
||||
case 3072:
|
||||
case 4096:
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int rsa_setkey(struct crypto_akcipher *tfm, const void *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
return rsa_parse_key(pkey, key, keylen);
|
||||
ret = rsa_parse_key(pkey, key, keylen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) {
|
||||
rsa_free_key(pkey);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void rsa_exit_tfm(struct crypto_akcipher *tfm)
|
||||
|
@ -28,7 +28,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
|
||||
return -ENOMEM;
|
||||
|
||||
/* In FIPS mode only allow key size 2K & 3K */
|
||||
if (fips_enabled && (mpi_get_size(key->n) != 256 ||
|
||||
if (fips_enabled && (mpi_get_size(key->n) != 256 &&
|
||||
mpi_get_size(key->n) != 384)) {
|
||||
pr_err("RSA: key size not allowed in FIPS mode\n");
|
||||
mpi_free(key->n);
|
||||
@ -62,7 +62,7 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
|
||||
return -ENOMEM;
|
||||
|
||||
/* In FIPS mode only allow key size 2K & 3K */
|
||||
if (fips_enabled && (mpi_get_size(key->d) != 256 ||
|
||||
if (fips_enabled && (mpi_get_size(key->d) != 256 &&
|
||||
mpi_get_size(key->d) != 384)) {
|
||||
pr_err("RSA: key size not allowed in FIPS mode\n");
|
||||
mpi_free(key->d);
|
||||
|
445
crypto/seqiv.c
445
crypto/seqiv.c
@ -15,7 +15,6 @@
|
||||
|
||||
#include <crypto/internal/geniv.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/rng.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
@ -26,23 +25,11 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
struct seqniv_request_ctx {
|
||||
struct scatterlist dst[2];
|
||||
struct aead_request subreq;
|
||||
};
|
||||
|
||||
struct seqiv_ctx {
|
||||
spinlock_t lock;
|
||||
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
|
||||
};
|
||||
|
||||
struct seqiv_aead_ctx {
|
||||
/* aead_geniv_ctx must be first the element */
|
||||
struct aead_geniv_ctx geniv;
|
||||
struct crypto_blkcipher *null;
|
||||
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
|
||||
};
|
||||
|
||||
static void seqiv_free(struct crypto_instance *inst);
|
||||
|
||||
static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
|
||||
@ -71,32 +58,6 @@ static void seqiv_complete(struct crypto_async_request *base, int err)
|
||||
skcipher_givcrypt_complete(req, err);
|
||||
}
|
||||
|
||||
static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
|
||||
{
|
||||
struct aead_request *subreq = aead_givcrypt_reqctx(req);
|
||||
struct crypto_aead *geniv;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
geniv = aead_givcrypt_reqtfm(req);
|
||||
memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
|
||||
|
||||
out:
|
||||
kfree(subreq->iv);
|
||||
}
|
||||
|
||||
static void seqiv_aead_complete(struct crypto_async_request *base, int err)
|
||||
{
|
||||
struct aead_givcrypt_request *req = base->data;
|
||||
|
||||
seqiv_aead_complete2(req, err);
|
||||
aead_givcrypt_complete(req, err);
|
||||
}
|
||||
|
||||
static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
|
||||
{
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
@ -124,50 +85,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err)
|
||||
{
|
||||
unsigned int ivsize = 8;
|
||||
u8 data[20];
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
/* Swap IV and ESP header back to correct order. */
|
||||
scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0);
|
||||
scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1);
|
||||
scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1);
|
||||
}
|
||||
|
||||
static void seqniv_aead_encrypt_complete(struct crypto_async_request *base,
|
||||
int err)
|
||||
{
|
||||
struct aead_request *req = base->data;
|
||||
|
||||
seqniv_aead_encrypt_complete2(req, err);
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err)
|
||||
{
|
||||
u8 data[4];
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
/* Move ESP header back to correct location. */
|
||||
scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0);
|
||||
scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1);
|
||||
}
|
||||
|
||||
static void seqniv_aead_decrypt_complete(struct crypto_async_request *base,
|
||||
int err)
|
||||
{
|
||||
struct aead_request *req = base->data;
|
||||
|
||||
seqniv_aead_decrypt_complete2(req, err);
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
|
||||
unsigned int ivsize)
|
||||
{
|
||||
@ -227,112 +144,10 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
|
||||
struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_request *areq = &req->areq;
|
||||
struct aead_request *subreq = aead_givcrypt_reqctx(req);
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
u8 *info;
|
||||
unsigned int ivsize;
|
||||
int err;
|
||||
|
||||
aead_request_set_tfm(subreq, aead_geniv_base(geniv));
|
||||
|
||||
compl = areq->base.complete;
|
||||
data = areq->base.data;
|
||||
info = areq->iv;
|
||||
|
||||
ivsize = crypto_aead_ivsize(geniv);
|
||||
|
||||
if (unlikely(!IS_ALIGNED((unsigned long)info,
|
||||
crypto_aead_alignmask(geniv) + 1))) {
|
||||
info = kmalloc(ivsize, areq->base.flags &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
|
||||
GFP_ATOMIC);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
compl = seqiv_aead_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
aead_request_set_callback(subreq, areq->base.flags, compl, data);
|
||||
aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
|
||||
info);
|
||||
aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
|
||||
|
||||
seqiv_geniv(ctx, info, req->seq, ivsize);
|
||||
memcpy(req->giv, info, ivsize);
|
||||
|
||||
err = crypto_aead_encrypt(subreq);
|
||||
if (unlikely(info != areq->iv))
|
||||
seqiv_aead_complete2(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqniv_aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct seqniv_request_ctx *rctx = aead_request_ctx(req);
|
||||
struct aead_request *subreq = &rctx->subreq;
|
||||
struct scatterlist *dst;
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
unsigned int ivsize = 8;
|
||||
u8 buf[20] __attribute__ ((aligned(__alignof__(u32))));
|
||||
int err;
|
||||
|
||||
if (req->cryptlen < ivsize)
|
||||
return -EINVAL;
|
||||
|
||||
/* ESP AD is at most 12 bytes (ESN). */
|
||||
if (req->assoclen > 12)
|
||||
return -EINVAL;
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->geniv.child);
|
||||
|
||||
compl = seqniv_aead_encrypt_complete;
|
||||
data = req;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = ctx->null,
|
||||
};
|
||||
|
||||
err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
|
||||
req->assoclen + req->cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
|
||||
|
||||
aead_request_set_callback(subreq, req->base.flags, compl, data);
|
||||
aead_request_set_crypt(subreq, dst, dst,
|
||||
req->cryptlen - ivsize, req->iv);
|
||||
aead_request_set_ad(subreq, req->assoclen);
|
||||
|
||||
memcpy(buf, req->iv, ivsize);
|
||||
crypto_xor(buf, ctx->salt, ivsize);
|
||||
memcpy(req->iv, buf, ivsize);
|
||||
|
||||
/* Swap order of IV and ESP AD for ICV generation. */
|
||||
scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0);
|
||||
scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1);
|
||||
|
||||
err = crypto_aead_encrypt(subreq);
|
||||
seqniv_aead_encrypt_complete2(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqiv_aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
@ -343,7 +158,7 @@ static int seqiv_aead_encrypt(struct aead_request *req)
|
||||
if (req->cryptlen < ivsize)
|
||||
return -EINVAL;
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->geniv.child);
|
||||
aead_request_set_tfm(subreq, ctx->child);
|
||||
|
||||
compl = req->base.complete;
|
||||
data = req->base.data;
|
||||
@ -387,67 +202,10 @@ static int seqiv_aead_encrypt(struct aead_request *req)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqniv_aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct seqniv_request_ctx *rctx = aead_request_ctx(req);
|
||||
struct aead_request *subreq = &rctx->subreq;
|
||||
struct scatterlist *dst;
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
unsigned int ivsize = 8;
|
||||
u8 buf[20];
|
||||
int err;
|
||||
|
||||
if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
|
||||
return -EINVAL;
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->geniv.child);
|
||||
|
||||
compl = req->base.complete;
|
||||
data = req->base.data;
|
||||
|
||||
if (req->assoclen > 12)
|
||||
return -EINVAL;
|
||||
else if (req->assoclen > 8) {
|
||||
compl = seqniv_aead_decrypt_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
if (req->src != req->dst) {
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = ctx->null,
|
||||
};
|
||||
|
||||
err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
|
||||
req->assoclen + req->cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Move ESP AD forward for ICV generation. */
|
||||
scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0);
|
||||
memcpy(req->iv, buf + req->assoclen, ivsize);
|
||||
scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1);
|
||||
|
||||
dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
|
||||
|
||||
aead_request_set_callback(subreq, req->base.flags, compl, data);
|
||||
aead_request_set_crypt(subreq, dst, dst,
|
||||
req->cryptlen - ivsize, req->iv);
|
||||
aead_request_set_ad(subreq, req->assoclen);
|
||||
|
||||
err = crypto_aead_decrypt(subreq);
|
||||
if (req->assoclen > 8)
|
||||
seqniv_aead_decrypt_complete2(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqiv_aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||
struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
@ -456,7 +214,7 @@ static int seqiv_aead_decrypt(struct aead_request *req)
|
||||
if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
|
||||
return -EINVAL;
|
||||
|
||||
aead_request_set_tfm(subreq, ctx->geniv.child);
|
||||
aead_request_set_tfm(subreq, ctx->child);
|
||||
|
||||
compl = req->base.complete;
|
||||
data = req->base.data;
|
||||
@ -467,9 +225,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
|
||||
aead_request_set_ad(subreq, req->assoclen + ivsize);
|
||||
|
||||
scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
|
||||
if (req->src != req->dst)
|
||||
scatterwalk_map_and_copy(req->iv, req->dst,
|
||||
req->assoclen, ivsize, 1);
|
||||
|
||||
return crypto_aead_decrypt(subreq);
|
||||
}
|
||||
@ -495,85 +250,6 @@ static int seqiv_init(struct crypto_tfm *tfm)
|
||||
return err ?: skcipher_geniv_init(tfm);
|
||||
}
|
||||
|
||||
static int seqiv_old_aead_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_aead *geniv = __crypto_aead_cast(tfm);
|
||||
struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
int err;
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
sizeof(struct aead_request));
|
||||
err = 0;
|
||||
if (!crypto_get_default_rng()) {
|
||||
geniv->givencrypt = seqiv_aead_givencrypt;
|
||||
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
|
||||
crypto_aead_ivsize(geniv));
|
||||
crypto_put_default_rng();
|
||||
}
|
||||
|
||||
return err ?: aead_geniv_init(tfm);
|
||||
}
|
||||
|
||||
static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize)
|
||||
{
|
||||
struct crypto_aead *geniv = __crypto_aead_cast(tfm);
|
||||
struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
int err;
|
||||
|
||||
spin_lock_init(&ctx->geniv.lock);
|
||||
|
||||
crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
|
||||
|
||||
err = crypto_get_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
|
||||
crypto_aead_ivsize(geniv));
|
||||
crypto_put_default_rng();
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ctx->null = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(ctx->null);
|
||||
if (IS_ERR(ctx->null))
|
||||
goto out;
|
||||
|
||||
err = aead_geniv_init(tfm);
|
||||
if (err)
|
||||
goto drop_null;
|
||||
|
||||
ctx->geniv.child = geniv->child;
|
||||
geniv->child = geniv;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
drop_null:
|
||||
crypto_put_default_null_skcipher();
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int seqiv_aead_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
return seqiv_aead_init_common(tfm, sizeof(struct aead_request));
|
||||
}
|
||||
|
||||
static int seqniv_aead_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx));
|
||||
}
|
||||
|
||||
static void seqiv_aead_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->geniv.child);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
|
||||
static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
|
||||
struct rtattr **tb)
|
||||
{
|
||||
@ -609,33 +285,6 @@ static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int seqiv_old_aead_create(struct crypto_template *tmpl,
|
||||
struct aead_instance *aead)
|
||||
{
|
||||
struct crypto_instance *inst = aead_crypto_instance(aead);
|
||||
int err = -EINVAL;
|
||||
|
||||
if (inst->alg.cra_aead.ivsize < sizeof(u64))
|
||||
goto free_inst;
|
||||
|
||||
inst->alg.cra_init = seqiv_old_aead_init;
|
||||
inst->alg.cra_exit = aead_geniv_exit;
|
||||
|
||||
inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
|
||||
inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
|
||||
|
||||
err = crypto_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto free_inst;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
free_inst:
|
||||
aead_geniv_free(aead);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct aead_instance *inst;
|
||||
@ -650,15 +299,9 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
|
||||
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
|
||||
|
||||
if (inst->alg.base.cra_aead.encrypt)
|
||||
return seqiv_old_aead_create(tmpl, inst);
|
||||
|
||||
spawn = aead_instance_ctx(inst);
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
|
||||
if (alg->base.cra_aead.encrypt)
|
||||
goto done;
|
||||
|
||||
err = -EINVAL;
|
||||
if (inst->alg.ivsize != sizeof(u64))
|
||||
goto free_inst;
|
||||
@ -666,13 +309,12 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
inst->alg.encrypt = seqiv_aead_encrypt;
|
||||
inst->alg.decrypt = seqiv_aead_decrypt;
|
||||
|
||||
inst->alg.base.cra_init = seqiv_aead_init;
|
||||
inst->alg.base.cra_exit = seqiv_aead_exit;
|
||||
inst->alg.init = aead_init_geniv;
|
||||
inst->alg.exit = aead_exit_geniv;
|
||||
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
|
||||
inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
|
||||
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
|
||||
|
||||
done:
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto free_inst;
|
||||
@ -702,51 +344,6 @@ static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct aead_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct aead_alg *alg;
|
||||
int err;
|
||||
|
||||
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
|
||||
err = PTR_ERR(inst);
|
||||
if (IS_ERR(inst))
|
||||
goto out;
|
||||
|
||||
spawn = aead_instance_ctx(inst);
|
||||
alg = crypto_spawn_aead_alg(spawn);
|
||||
|
||||
if (alg->base.cra_aead.encrypt)
|
||||
goto done;
|
||||
|
||||
err = -EINVAL;
|
||||
if (inst->alg.ivsize != sizeof(u64))
|
||||
goto free_inst;
|
||||
|
||||
inst->alg.encrypt = seqniv_aead_encrypt;
|
||||
inst->alg.decrypt = seqniv_aead_decrypt;
|
||||
|
||||
inst->alg.base.cra_init = seqniv_aead_init;
|
||||
inst->alg.base.cra_exit = seqiv_aead_exit;
|
||||
|
||||
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
|
||||
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
|
||||
|
||||
done:
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto free_inst;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
free_inst:
|
||||
aead_geniv_free(inst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void seqiv_free(struct crypto_instance *inst)
|
||||
{
|
||||
if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
|
||||
@ -762,36 +359,13 @@ static struct crypto_template seqiv_tmpl = {
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct crypto_template seqniv_tmpl = {
|
||||
.name = "seqniv",
|
||||
.create = seqniv_create,
|
||||
.free = seqiv_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init seqiv_module_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_register_template(&seqiv_tmpl);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_register_template(&seqniv_tmpl);
|
||||
if (err)
|
||||
goto out_undo_niv;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
out_undo_niv:
|
||||
crypto_unregister_template(&seqiv_tmpl);
|
||||
goto out;
|
||||
return crypto_register_template(&seqiv_tmpl);
|
||||
}
|
||||
|
||||
static void __exit seqiv_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&seqniv_tmpl);
|
||||
crypto_unregister_template(&seqiv_tmpl);
|
||||
}
|
||||
|
||||
@ -801,4 +375,3 @@ module_exit(seqiv_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Sequence Number IV Generator");
|
||||
MODULE_ALIAS_CRYPTO("seqiv");
|
||||
MODULE_ALIAS_CRYPTO("seqniv");
|
||||
|
245
crypto/skcipher.c
Normal file
245
crypto/skcipher.c
Normal file
@ -0,0 +1,245 @@
|
||||
/*
|
||||
* Symmetric key cipher operations.
|
||||
*
|
||||
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
|
||||
* multiple page boundaries by using temporary blocks. In user context,
|
||||
* the kernel is given a chance to schedule us once per page.
|
||||
*
|
||||
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
|
||||
{
|
||||
if (alg->cra_type == &crypto_blkcipher_type)
|
||||
return sizeof(struct crypto_blkcipher *);
|
||||
|
||||
BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
|
||||
alg->cra_type != &crypto_givcipher_type);
|
||||
|
||||
return sizeof(struct crypto_ablkcipher *);
|
||||
}
|
||||
|
||||
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_blkcipher *blkcipher = *ctx;
|
||||
int err;
|
||||
|
||||
crypto_blkcipher_clear_flags(blkcipher, ~0);
|
||||
crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
|
||||
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
|
||||
int (*crypt)(struct blkcipher_desc *,
|
||||
struct scatterlist *,
|
||||
struct scatterlist *,
|
||||
unsigned int))
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = *ctx,
|
||||
.info = req->iv,
|
||||
.flags = req->base.flags,
|
||||
};
|
||||
|
||||
|
||||
return crypt(&desc, req->dst, req->src, req->cryptlen);
|
||||
}
|
||||
|
||||
static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
|
||||
return skcipher_crypt_blkcipher(req, alg->encrypt);
|
||||
}
|
||||
|
||||
static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
|
||||
return skcipher_crypt_blkcipher(req, alg->decrypt);
|
||||
}
|
||||
|
||||
static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_blkcipher(*ctx);
|
||||
}
|
||||
|
||||
int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *calg = tfm->__crt_alg;
|
||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_blkcipher *blkcipher;
|
||||
struct crypto_tfm *btfm;
|
||||
|
||||
if (!crypto_mod_get(calg))
|
||||
return -EAGAIN;
|
||||
|
||||
btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (IS_ERR(btfm)) {
|
||||
crypto_mod_put(calg);
|
||||
return PTR_ERR(btfm);
|
||||
}
|
||||
|
||||
blkcipher = __crypto_blkcipher_cast(btfm);
|
||||
*ctx = blkcipher;
|
||||
tfm->exit = crypto_exit_skcipher_ops_blkcipher;
|
||||
|
||||
skcipher->setkey = skcipher_setkey_blkcipher;
|
||||
skcipher->encrypt = skcipher_encrypt_blkcipher;
|
||||
skcipher->decrypt = skcipher_decrypt_blkcipher;
|
||||
|
||||
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_ablkcipher *ablkcipher = *ctx;
|
||||
int err;
|
||||
|
||||
crypto_ablkcipher_clear_flags(ablkcipher, ~0);
|
||||
crypto_ablkcipher_set_flags(ablkcipher,
|
||||
crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
|
||||
crypto_skcipher_set_flags(tfm,
|
||||
crypto_ablkcipher_get_flags(ablkcipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
|
||||
int (*crypt)(struct ablkcipher_request *))
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
|
||||
struct ablkcipher_request *subreq = skcipher_request_ctx(req);
|
||||
|
||||
ablkcipher_request_set_tfm(subreq, *ctx);
|
||||
ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
|
||||
req->iv);
|
||||
|
||||
return crypt(subreq);
|
||||
}
|
||||
|
||||
static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
|
||||
struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
|
||||
|
||||
return skcipher_crypt_ablkcipher(req, alg->encrypt);
|
||||
}
|
||||
|
||||
static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
|
||||
struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
|
||||
|
||||
return skcipher_crypt_ablkcipher(req, alg->decrypt);
|
||||
}
|
||||
|
||||
static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_ablkcipher(*ctx);
|
||||
}
|
||||
|
||||
int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *calg = tfm->__crt_alg;
|
||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_ablkcipher *ablkcipher;
|
||||
struct crypto_tfm *abtfm;
|
||||
|
||||
if (!crypto_mod_get(calg))
|
||||
return -EAGAIN;
|
||||
|
||||
abtfm = __crypto_alloc_tfm(calg, 0, 0);
|
||||
if (IS_ERR(abtfm)) {
|
||||
crypto_mod_put(calg);
|
||||
return PTR_ERR(abtfm);
|
||||
}
|
||||
|
||||
ablkcipher = __crypto_ablkcipher_cast(abtfm);
|
||||
*ctx = ablkcipher;
|
||||
tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
|
||||
|
||||
skcipher->setkey = skcipher_setkey_ablkcipher;
|
||||
skcipher->encrypt = skcipher_encrypt_ablkcipher;
|
||||
skcipher->decrypt = skcipher_decrypt_ablkcipher;
|
||||
|
||||
skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
|
||||
sizeof(struct ablkcipher_request);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
|
||||
return crypto_init_skcipher_ops_blkcipher(tfm);
|
||||
|
||||
BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
|
||||
tfm->__crt_alg->cra_type != &crypto_givcipher_type);
|
||||
|
||||
return crypto_init_skcipher_ops_ablkcipher(tfm);
|
||||
}
|
||||
|
||||
static const struct crypto_type crypto_skcipher_type2 = {
|
||||
.extsize = crypto_skcipher_extsize,
|
||||
.init_tfm = crypto_skcipher_init_tfm,
|
||||
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
||||
.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
|
||||
.type = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.tfmsize = offsetof(struct crypto_skcipher, base),
|
||||
};
|
||||
|
||||
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Symmetric key cipher type");
|
@ -73,6 +73,22 @@ static char *check[] = {
|
||||
"lzo", "cts", "zlib", NULL
|
||||
};
|
||||
|
||||
struct tcrypt_result {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
static void tcrypt_complete(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct tcrypt_result *res = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
res->err = err;
|
||||
complete(&res->completion);
|
||||
}
|
||||
|
||||
static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
|
||||
struct scatterlist *sg, int blen, int secs)
|
||||
{
|
||||
@ -143,6 +159,20 @@ static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int do_one_aead_op(struct aead_request *req, int ret)
|
||||
{
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||
struct tcrypt_result *tr = req->base.data;
|
||||
|
||||
ret = wait_for_completion_interruptible(&tr->completion);
|
||||
if (!ret)
|
||||
ret = tr->err;
|
||||
reinit_completion(&tr->completion);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int test_aead_jiffies(struct aead_request *req, int enc,
|
||||
int blen, int secs)
|
||||
{
|
||||
@ -153,9 +183,9 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
|
||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
if (enc)
|
||||
ret = crypto_aead_encrypt(req);
|
||||
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
|
||||
else
|
||||
ret = crypto_aead_decrypt(req);
|
||||
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -177,9 +207,9 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
|
||||
/* Warm-up run. */
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (enc)
|
||||
ret = crypto_aead_encrypt(req);
|
||||
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
|
||||
else
|
||||
ret = crypto_aead_decrypt(req);
|
||||
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -191,9 +221,9 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
|
||||
|
||||
start = get_cycles();
|
||||
if (enc)
|
||||
ret = crypto_aead_encrypt(req);
|
||||
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
|
||||
else
|
||||
ret = crypto_aead_decrypt(req);
|
||||
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
|
||||
end = get_cycles();
|
||||
|
||||
if (ret)
|
||||
@ -286,6 +316,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
char *axbuf[XBUFSIZE];
|
||||
unsigned int *b_size;
|
||||
unsigned int iv_len;
|
||||
struct tcrypt_result result;
|
||||
|
||||
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
|
||||
if (!iv)
|
||||
@ -321,6 +352,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
goto out_notfm;
|
||||
}
|
||||
|
||||
init_completion(&result.completion);
|
||||
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
|
||||
get_driver_name(crypto_aead, tfm), e);
|
||||
|
||||
@ -331,6 +363,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
goto out_noreq;
|
||||
}
|
||||
|
||||
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
|
||||
i = 0;
|
||||
do {
|
||||
b_size = aead_sizes;
|
||||
@ -749,22 +784,6 @@ static void test_hash_speed(const char *algo, unsigned int secs,
|
||||
crypto_free_hash(tfm);
|
||||
}
|
||||
|
||||
struct tcrypt_result {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
static void tcrypt_complete(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct tcrypt_result *res = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
res->err = err;
|
||||
complete(&res->completion);
|
||||
}
|
||||
|
||||
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
|
||||
{
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||
@ -1759,14 +1778,27 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||
|
||||
case 211:
|
||||
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
|
||||
NULL, 0, 16, 16, aead_speed_template_20);
|
||||
test_aead_speed("gcm(aes)", ENCRYPT, sec,
|
||||
NULL, 0, 16, 8, aead_speed_template_20);
|
||||
break;
|
||||
|
||||
case 212:
|
||||
test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
|
||||
NULL, 0, 16, 8, aead_speed_template_19);
|
||||
NULL, 0, 16, 16, aead_speed_template_19);
|
||||
break;
|
||||
|
||||
case 213:
|
||||
test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
|
||||
NULL, 0, 16, 8, aead_speed_template_36);
|
||||
break;
|
||||
|
||||
case 214:
|
||||
test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_32);
|
||||
break;
|
||||
|
||||
|
||||
case 300:
|
||||
if (alg) {
|
||||
test_hash_speed(alg, sec, generic_hash_speed_template);
|
||||
@ -1855,6 +1887,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
case 321:
|
||||
test_hash_speed("poly1305", sec, poly1305_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
case 399:
|
||||
break;
|
||||
|
||||
|
@ -61,12 +61,14 @@ static u8 speed_template_32_40_48[] = {32, 40, 48, 0};
|
||||
static u8 speed_template_32_48[] = {32, 48, 0};
|
||||
static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
|
||||
static u8 speed_template_32_64[] = {32, 64, 0};
|
||||
static u8 speed_template_32[] = {32, 0};
|
||||
|
||||
/*
|
||||
* AEAD speed tests
|
||||
*/
|
||||
static u8 aead_speed_template_19[] = {19, 0};
|
||||
static u8 aead_speed_template_20[] = {20, 0};
|
||||
static u8 aead_speed_template_36[] = {36, 0};
|
||||
|
||||
/*
|
||||
* Digest speed tests
|
||||
@ -127,4 +129,22 @@ static struct hash_speed hash_speed_template_16[] = {
|
||||
{ .blen = 0, .plen = 0, .klen = 0, }
|
||||
};
|
||||
|
||||
static struct hash_speed poly1305_speed_template[] = {
|
||||
{ .blen = 96, .plen = 16, },
|
||||
{ .blen = 96, .plen = 32, },
|
||||
{ .blen = 96, .plen = 96, },
|
||||
{ .blen = 288, .plen = 16, },
|
||||
{ .blen = 288, .plen = 32, },
|
||||
{ .blen = 288, .plen = 288, },
|
||||
{ .blen = 1056, .plen = 32, },
|
||||
{ .blen = 1056, .plen = 1056, },
|
||||
{ .blen = 2080, .plen = 32, },
|
||||
{ .blen = 2080, .plen = 2080, },
|
||||
{ .blen = 4128, .plen = 4128, },
|
||||
{ .blen = 8224, .plen = 8224, },
|
||||
|
||||
/* End marker */
|
||||
{ .blen = 0, .plen = 0, }
|
||||
};
|
||||
|
||||
#endif /* _CRYPTO_TCRYPT_H */
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/fips.h>
|
||||
#include <linux/module.h>
|
||||
@ -921,15 +922,15 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
struct cipher_testvec *template, unsigned int tcount,
|
||||
const bool diff_dst, const int align_offset)
|
||||
{
|
||||
const char *algo =
|
||||
crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
|
||||
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
|
||||
unsigned int i, j, k, n, temp;
|
||||
char *q;
|
||||
struct ablkcipher_request *req;
|
||||
struct skcipher_request *req;
|
||||
struct scatterlist sg[8];
|
||||
struct scatterlist sgout[8];
|
||||
const char *e, *d;
|
||||
@ -958,15 +959,15 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
|
||||
init_completion(&result.completion);
|
||||
|
||||
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
|
||||
req = skcipher_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
|
||||
d, algo);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
|
||||
j = 0;
|
||||
for (i = 0; i < tcount; i++) {
|
||||
@ -987,15 +988,16 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
data += align_offset;
|
||||
memcpy(data, template[i].input, template[i].ilen);
|
||||
|
||||
crypto_ablkcipher_clear_flags(tfm, ~0);
|
||||
crypto_skcipher_clear_flags(tfm, ~0);
|
||||
if (template[i].wk)
|
||||
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
crypto_skcipher_set_flags(tfm,
|
||||
CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
|
||||
ret = crypto_ablkcipher_setkey(tfm, template[i].key,
|
||||
template[i].klen);
|
||||
ret = crypto_skcipher_setkey(tfm, template[i].key,
|
||||
template[i].klen);
|
||||
if (!ret == template[i].fail) {
|
||||
pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
|
||||
d, j, algo, crypto_ablkcipher_get_flags(tfm));
|
||||
d, j, algo, crypto_skcipher_get_flags(tfm));
|
||||
goto out;
|
||||
} else if (ret)
|
||||
continue;
|
||||
@ -1007,10 +1009,10 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
sg_init_one(&sgout[0], data, template[i].ilen);
|
||||
}
|
||||
|
||||
ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
|
||||
template[i].ilen, iv);
|
||||
ret = enc ? crypto_ablkcipher_encrypt(req) :
|
||||
crypto_ablkcipher_decrypt(req);
|
||||
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
|
||||
template[i].ilen, iv);
|
||||
ret = enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
@ -1054,15 +1056,16 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
memset(iv, 0, MAX_IVLEN);
|
||||
|
||||
j++;
|
||||
crypto_ablkcipher_clear_flags(tfm, ~0);
|
||||
crypto_skcipher_clear_flags(tfm, ~0);
|
||||
if (template[i].wk)
|
||||
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
crypto_skcipher_set_flags(tfm,
|
||||
CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
|
||||
ret = crypto_ablkcipher_setkey(tfm, template[i].key,
|
||||
template[i].klen);
|
||||
ret = crypto_skcipher_setkey(tfm, template[i].key,
|
||||
template[i].klen);
|
||||
if (!ret == template[i].fail) {
|
||||
pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
|
||||
d, j, algo, crypto_ablkcipher_get_flags(tfm));
|
||||
d, j, algo, crypto_skcipher_get_flags(tfm));
|
||||
goto out;
|
||||
} else if (ret)
|
||||
continue;
|
||||
@ -1100,11 +1103,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
temp += template[i].tap[k];
|
||||
}
|
||||
|
||||
ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
|
||||
template[i].ilen, iv);
|
||||
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
|
||||
template[i].ilen, iv);
|
||||
|
||||
ret = enc ? crypto_ablkcipher_encrypt(req) :
|
||||
crypto_ablkcipher_decrypt(req);
|
||||
ret = enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
@ -1157,7 +1160,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
ablkcipher_request_free(req);
|
||||
skcipher_request_free(req);
|
||||
if (diff_dst)
|
||||
testmgr_free_buf(xoutbuf);
|
||||
out_nooutbuf:
|
||||
@ -1166,7 +1169,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||
static int test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
struct cipher_testvec *template, unsigned int tcount)
|
||||
{
|
||||
unsigned int alignmask;
|
||||
@ -1578,10 +1581,10 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
|
||||
static int alg_test_skcipher(const struct alg_test_desc *desc,
|
||||
const char *driver, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_ablkcipher *tfm;
|
||||
struct crypto_skcipher *tfm;
|
||||
int err = 0;
|
||||
|
||||
tfm = crypto_alloc_ablkcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
|
||||
tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
|
||||
if (IS_ERR(tfm)) {
|
||||
printk(KERN_ERR "alg: skcipher: Failed to load transform for "
|
||||
"%s: %ld\n", driver, PTR_ERR(tfm));
|
||||
@ -1600,7 +1603,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
|
||||
desc->suite.cipher.dec.count);
|
||||
|
||||
out:
|
||||
crypto_free_ablkcipher(tfm);
|
||||
crypto_free_skcipher(tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2476,6 +2479,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
}
|
||||
}, {
|
||||
.alg = "cmac(aes)",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = {
|
||||
@ -2485,6 +2489,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
}
|
||||
}, {
|
||||
.alg = "cmac(des3_ede)",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = {
|
||||
|
2704
crypto/testmgr.h
2704
crypto/testmgr.h
File diff suppressed because it is too large
Load Diff
@ -381,6 +381,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
|
||||
clk[IMX6QDL_CLK_ASRC] = imx_clk_gate2_shared("asrc", "asrc_podf", base + 0x68, 6, &share_count_asrc);
|
||||
clk[IMX6QDL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
|
||||
clk[IMX6QDL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
|
||||
clk[IMX6QDL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
|
||||
clk[IMX6QDL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
|
||||
clk[IMX6QDL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
|
||||
clk[IMX6QDL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
|
||||
clk[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
|
||||
clk[IMX6QDL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
|
||||
|
@ -480,4 +480,21 @@ config CRYPTO_DEV_IMGTEC_HASH
|
||||
hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
|
||||
hashing algorithms.
|
||||
|
||||
config CRYPTO_DEV_SUN4I_SS
|
||||
tristate "Support for Allwinner Security System cryptographic accelerator"
|
||||
depends on ARCH_SUNXI
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
Some Allwinner SoC have a crypto accelerator named
|
||||
Security System. Select this if you want to use it.
|
||||
The Security System handle AES/DES/3DES ciphers in CBC mode
|
||||
and SHA1 and MD5 hash algorithms.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called sun4i-ss.
|
||||
|
||||
endif # CRYPTO_HW
|
||||
|
@ -28,3 +28,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
|
||||
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
|
||||
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
|
||||
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
|
||||
|
@ -1113,7 +1113,7 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
|
||||
struct device *dev = (struct device *)data;
|
||||
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
|
||||
|
||||
if (core_dev->dev->ce_base == 0)
|
||||
if (!core_dev->dev->ce_base)
|
||||
return 0;
|
||||
|
||||
writel(PPC4XX_INTERRUPT_CLR,
|
||||
|
@ -1,6 +1,6 @@
|
||||
config CRYPTO_DEV_FSL_CAAM
|
||||
tristate "Freescale CAAM-Multicore driver backend"
|
||||
depends on FSL_SOC
|
||||
depends on FSL_SOC || ARCH_MXC
|
||||
help
|
||||
Enables the driver module for Freescale's Cryptographic Accelerator
|
||||
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
|
||||
@ -112,6 +112,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called caamrng.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_IMX
|
||||
def_bool SOC_IMX6 || SOC_IMX7D
|
||||
depends on CRYPTO_DEV_FSL_CAAM
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_LE
|
||||
def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
|
||||
depends on CRYPTO_DEV_FSL_CAAM
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_DEBUG
|
||||
bool "Enable debug output in CAAM driver"
|
||||
depends on CRYPTO_DEV_FSL_CAAM
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -127,7 +127,7 @@ struct caam_hash_state {
|
||||
int buflen_0;
|
||||
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
||||
int buflen_1;
|
||||
u8 caam_ctx[MAX_CTX_LEN];
|
||||
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
|
||||
int (*update)(struct ahash_request *req);
|
||||
int (*final)(struct ahash_request *req);
|
||||
int (*finup)(struct ahash_request *req);
|
||||
@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
* allocate space for base edesc and hw desc commands,
|
||||
* link tables
|
||||
*/
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
|
||||
sec4_sg_bytes, GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(jrdev,
|
||||
@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
|
||||
edesc->sec4_sg + 1,
|
||||
buf, state->buf_dma,
|
||||
*buflen, last_buflen);
|
||||
*next_buflen, *buflen);
|
||||
|
||||
if (src_nents) {
|
||||
src_map_to_sec4_sg(jrdev, req->src, src_nents,
|
||||
@ -919,8 +919,8 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||
sec4_sg_bytes, GFP_DMA | flags);
|
||||
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
|
||||
GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(jrdev, "could not allocate extended descriptor\n");
|
||||
return -ENOMEM;
|
||||
@ -1006,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||
sec4_sg_bytes, GFP_DMA | flags);
|
||||
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
|
||||
GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(jrdev, "could not allocate extended descriptor\n");
|
||||
return -ENOMEM;
|
||||
@ -1092,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req)
|
||||
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
|
||||
DESC_JOB_IO_LEN, GFP_DMA | flags);
|
||||
edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
|
||||
GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(jrdev, "could not allocate extended descriptor\n");
|
||||
return -ENOMEM;
|
||||
@ -1166,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
int sh_len;
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
|
||||
GFP_DMA | flags);
|
||||
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(jrdev, "could not allocate extended descriptor\n");
|
||||
return -ENOMEM;
|
||||
@ -1246,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
* allocate space for base edesc and hw desc commands,
|
||||
* link tables
|
||||
*/
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
|
||||
sec4_sg_bytes, GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(jrdev,
|
||||
@ -1354,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||
sec4_sg_bytes, GFP_DMA | flags);
|
||||
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
|
||||
GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(jrdev, "could not allocate extended descriptor\n");
|
||||
return -ENOMEM;
|
||||
@ -1449,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
* allocate space for base edesc and hw desc commands,
|
||||
* link tables
|
||||
*/
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||
edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
|
||||
sec4_sg_bytes, GFP_DMA | flags);
|
||||
if (!edesc) {
|
||||
dev_err(jrdev,
|
||||
@ -1843,7 +1842,7 @@ caam_hash_alloc(struct caam_hash_template *template,
|
||||
struct ahash_alg *halg;
|
||||
struct crypto_alg *alg;
|
||||
|
||||
t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
|
||||
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
|
||||
if (!t_alg) {
|
||||
pr_err("failed to allocate t_alg\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -1885,8 +1884,10 @@ static int __init caam_algapi_hash_init(void)
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct device *ctrldev;
|
||||
void *priv;
|
||||
int i = 0, err = 0;
|
||||
struct caam_drv_private *priv;
|
||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||
u32 cha_inst, cha_vid;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
@ -1912,19 +1913,40 @@ static int __init caam_algapi_hash_init(void)
|
||||
if (!priv)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Register crypto algorithms the device supports. First, identify
|
||||
* presence and attributes of MD block.
|
||||
*/
|
||||
cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
|
||||
cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
|
||||
|
||||
/*
|
||||
* Skip registration of any hashing algorithms if MD block
|
||||
* is not present.
|
||||
*/
|
||||
if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
|
||||
return -ENODEV;
|
||||
|
||||
/* Limit digest size based on LP256 */
|
||||
if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
|
||||
md_limit = SHA256_DIGEST_SIZE;
|
||||
|
||||
INIT_LIST_HEAD(&hash_list);
|
||||
|
||||
/* register crypto algorithms the device supports */
|
||||
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
|
||||
/* TODO: check if h/w supports alg */
|
||||
struct caam_hash_alg *t_alg;
|
||||
struct caam_hash_template *alg = driver_hash + i;
|
||||
|
||||
/* If MD size is not supported by device, skip registration */
|
||||
if (alg->template_ahash.halg.digestsize > md_limit)
|
||||
continue;
|
||||
|
||||
/* register hmac version */
|
||||
t_alg = caam_hash_alloc(&driver_hash[i], true);
|
||||
t_alg = caam_hash_alloc(alg, true);
|
||||
if (IS_ERR(t_alg)) {
|
||||
err = PTR_ERR(t_alg);
|
||||
pr_warn("%s alg allocation failed\n",
|
||||
driver_hash[i].driver_name);
|
||||
pr_warn("%s alg allocation failed\n", alg->driver_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1937,11 +1959,10 @@ static int __init caam_algapi_hash_init(void)
|
||||
list_add_tail(&t_alg->entry, &hash_list);
|
||||
|
||||
/* register unkeyed version */
|
||||
t_alg = caam_hash_alloc(&driver_hash[i], false);
|
||||
t_alg = caam_hash_alloc(alg, false);
|
||||
if (IS_ERR(t_alg)) {
|
||||
err = PTR_ERR(t_alg);
|
||||
pr_warn("%s alg allocation failed\n",
|
||||
driver_hash[i].driver_name);
|
||||
pr_warn("%s alg allocation failed\n", alg->driver_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -108,6 +108,10 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
|
||||
|
||||
atomic_set(&bd->empty, BUF_NOT_EMPTY);
|
||||
complete(&bd->filled);
|
||||
|
||||
/* Buffer refilled, invalidate cache */
|
||||
dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
|
||||
@ -311,7 +315,7 @@ static int __init caam_rng_init(void)
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct device *ctrldev;
|
||||
void *priv;
|
||||
struct caam_drv_private *priv;
|
||||
int err;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
@ -338,20 +342,32 @@ static int __init caam_rng_init(void)
|
||||
if (!priv)
|
||||
return -ENODEV;
|
||||
|
||||
/* Check for an instantiated RNG before registration */
|
||||
if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
|
||||
return -ENODEV;
|
||||
|
||||
dev = caam_jr_alloc();
|
||||
if (IS_ERR(dev)) {
|
||||
pr_err("Job Ring Device allocation for transform failed\n");
|
||||
return PTR_ERR(dev);
|
||||
}
|
||||
rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
|
||||
if (!rng_ctx)
|
||||
return -ENOMEM;
|
||||
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
|
||||
if (!rng_ctx) {
|
||||
err = -ENOMEM;
|
||||
goto free_caam_alloc;
|
||||
}
|
||||
err = caam_init_rng(rng_ctx, dev);
|
||||
if (err)
|
||||
return err;
|
||||
goto free_rng_ctx;
|
||||
|
||||
dev_info(dev, "registering rng-caam\n");
|
||||
return hwrng_register(&caam_rng);
|
||||
|
||||
free_rng_ctx:
|
||||
kfree(rng_ctx);
|
||||
free_caam_alloc:
|
||||
caam_jr_free(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
module_init(caam_rng_init);
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/circ_buf.h>
|
||||
#include <linux/clk.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
|
@ -15,6 +15,24 @@
|
||||
#include "desc_constr.h"
|
||||
#include "error.h"
|
||||
|
||||
/*
|
||||
* i.MX targets tend to have clock control subsystems that can
|
||||
* enable/disable clocking to our device.
|
||||
*/
|
||||
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
|
||||
static inline struct clk *caam_drv_identify_clk(struct device *dev,
|
||||
char *clk_name)
|
||||
{
|
||||
return devm_clk_get(dev, clk_name);
|
||||
}
|
||||
#else
|
||||
static inline struct clk *caam_drv_identify_clk(struct device *dev,
|
||||
char *clk_name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Descriptor to instantiate RNG State Handle 0 in normal mode and
|
||||
* load the JDKEK, TDKEK and TDSK registers
|
||||
@ -121,7 +139,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
|
||||
flags |= DECO_JQCR_FOUR;
|
||||
|
||||
/* Instruct the DECO to execute it */
|
||||
wr_reg32(&deco->jr_ctl_hi, flags);
|
||||
setbits32(&deco->jr_ctl_hi, flags);
|
||||
|
||||
timeout = 10000000;
|
||||
do {
|
||||
@ -175,7 +193,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
|
||||
{
|
||||
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
|
||||
struct caam_ctrl __iomem *ctrl;
|
||||
u32 *desc, status, rdsta_val;
|
||||
u32 *desc, status = 0, rdsta_val;
|
||||
int ret = 0, sh_idx;
|
||||
|
||||
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
|
||||
@ -207,7 +225,8 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
|
||||
* CAAM eras), then try again.
|
||||
*/
|
||||
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
|
||||
if (status || !(rdsta_val & (1 << sh_idx)))
|
||||
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
|
||||
!(rdsta_val & (1 << sh_idx)))
|
||||
ret = -EAGAIN;
|
||||
if (ret)
|
||||
break;
|
||||
@ -279,7 +298,7 @@ static int caam_remove(struct platform_device *pdev)
|
||||
struct device *ctrldev;
|
||||
struct caam_drv_private *ctrlpriv;
|
||||
struct caam_ctrl __iomem *ctrl;
|
||||
int ring, ret = 0;
|
||||
int ring;
|
||||
|
||||
ctrldev = &pdev->dev;
|
||||
ctrlpriv = dev_get_drvdata(ctrldev);
|
||||
@ -303,7 +322,13 @@ static int caam_remove(struct platform_device *pdev)
|
||||
/* Unmap controller region */
|
||||
iounmap(ctrl);
|
||||
|
||||
return ret;
|
||||
/* shut clocks off before finalizing shutdown */
|
||||
clk_disable_unprepare(ctrlpriv->caam_ipg);
|
||||
clk_disable_unprepare(ctrlpriv->caam_mem);
|
||||
clk_disable_unprepare(ctrlpriv->caam_aclk);
|
||||
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -370,14 +395,14 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
|
||||
int caam_get_era(void)
|
||||
{
|
||||
struct device_node *caam_node;
|
||||
for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
|
||||
const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
|
||||
"fsl,sec-era",
|
||||
NULL);
|
||||
return prop ? *prop : -ENOTSUPP;
|
||||
}
|
||||
int ret;
|
||||
u32 prop;
|
||||
|
||||
return -ENOTSUPP;
|
||||
caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
|
||||
of_node_put(caam_node);
|
||||
|
||||
return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
|
||||
}
|
||||
EXPORT_SYMBOL(caam_get_era);
|
||||
|
||||
@ -390,6 +415,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
struct device_node *nprop, *np;
|
||||
struct caam_ctrl __iomem *ctrl;
|
||||
struct caam_drv_private *ctrlpriv;
|
||||
struct clk *clk;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct caam_perfmon *perfmon;
|
||||
#endif
|
||||
@ -398,8 +424,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
int pg_size;
|
||||
int BLOCK_OFFSET = 0;
|
||||
|
||||
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
|
||||
GFP_KERNEL);
|
||||
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
|
||||
if (!ctrlpriv)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -408,12 +433,76 @@ static int caam_probe(struct platform_device *pdev)
|
||||
ctrlpriv->pdev = pdev;
|
||||
nprop = pdev->dev.of_node;
|
||||
|
||||
/* Enable clocking */
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "ipg");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev,
|
||||
"can't identify CAAM ipg clk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ctrlpriv->caam_ipg = clk;
|
||||
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "mem");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev,
|
||||
"can't identify CAAM mem clk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ctrlpriv->caam_mem = clk;
|
||||
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "aclk");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev,
|
||||
"can't identify CAAM aclk clk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ctrlpriv->caam_aclk = clk;
|
||||
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev,
|
||||
"can't identify CAAM emi_slow clk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ctrlpriv->caam_emi_slow = clk;
|
||||
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_mem);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
|
||||
ret);
|
||||
goto disable_caam_ipg;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_aclk);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
|
||||
goto disable_caam_mem;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
|
||||
ret);
|
||||
goto disable_caam_aclk;
|
||||
}
|
||||
|
||||
/* Get configuration properties from device tree */
|
||||
/* First, get register page */
|
||||
ctrl = of_iomap(nprop, 0);
|
||||
if (ctrl == NULL) {
|
||||
dev_err(dev, "caam: of_iomap() failed\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto disable_caam_emi_slow;
|
||||
}
|
||||
/* Finding the page size for using the CTPR_MS register */
|
||||
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
|
||||
@ -444,8 +533,9 @@ static int caam_probe(struct platform_device *pdev)
|
||||
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
|
||||
* long pointers in master configuration register
|
||||
*/
|
||||
setbits32(&ctrl->mcr, MCFGR_WDENABLE |
|
||||
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
|
||||
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
|
||||
MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
|
||||
MCFGR_LONG_PTR : 0));
|
||||
|
||||
/*
|
||||
* Read the Compile Time paramters and SCFGR to determine
|
||||
@ -492,12 +582,11 @@ static int caam_probe(struct platform_device *pdev)
|
||||
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
|
||||
rspec++;
|
||||
|
||||
ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
|
||||
sizeof(struct platform_device *) * rspec,
|
||||
GFP_KERNEL);
|
||||
ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
|
||||
sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
|
||||
if (ctrlpriv->jrpdev == NULL) {
|
||||
iounmap(ctrl);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto iounmap_ctrl;
|
||||
}
|
||||
|
||||
ring = 0;
|
||||
@ -537,8 +626,8 @@ static int caam_probe(struct platform_device *pdev)
|
||||
/* If no QI and no rings specified, quit and go home */
|
||||
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
|
||||
dev_err(dev, "no queues configured, terminating\n");
|
||||
caam_remove(pdev);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto caam_remove;
|
||||
}
|
||||
|
||||
cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
|
||||
@ -595,8 +684,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to instantiate RNG");
|
||||
caam_remove(pdev);
|
||||
return ret;
|
||||
goto caam_remove;
|
||||
}
|
||||
/*
|
||||
* Set handles init'ed by this module as the complement of the
|
||||
@ -700,6 +788,20 @@ static int caam_probe(struct platform_device *pdev)
|
||||
&ctrlpriv->ctl_tdsk_wrap);
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
caam_remove:
|
||||
caam_remove(pdev);
|
||||
iounmap_ctrl:
|
||||
iounmap(ctrl);
|
||||
disable_caam_emi_slow:
|
||||
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
||||
disable_caam_aclk:
|
||||
clk_disable_unprepare(ctrlpriv->caam_aclk);
|
||||
disable_caam_mem:
|
||||
clk_disable_unprepare(ctrlpriv->caam_mem);
|
||||
disable_caam_ipg:
|
||||
clk_disable_unprepare(ctrlpriv->caam_ipg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct of_device_id caam_match[] = {
|
||||
|
@ -8,12 +8,29 @@
|
||||
#ifndef DESC_H
|
||||
#define DESC_H
|
||||
|
||||
/*
|
||||
* 16-byte hardware scatter/gather table
|
||||
* An 8-byte table exists in the hardware spec, but has never been
|
||||
* implemented to date. The 8/16 option is selected at RTL-compile-time.
|
||||
* and this selection is visible in the Compile Time Parameters Register
|
||||
*/
|
||||
|
||||
#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
|
||||
#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
|
||||
#define SEC4_SG_BPID_MASK 0x000000ff
|
||||
#define SEC4_SG_BPID_SHIFT 16
|
||||
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
|
||||
#define SEC4_SG_OFFS_MASK 0x00001fff
|
||||
|
||||
struct sec4_sg_entry {
|
||||
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
|
||||
u32 rsvd1;
|
||||
dma_addr_t ptr;
|
||||
#else
|
||||
u64 ptr;
|
||||
#define SEC4_SG_LEN_FIN 0x40000000
|
||||
#define SEC4_SG_LEN_EXT 0x80000000
|
||||
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
|
||||
u32 len;
|
||||
u8 reserved;
|
||||
u8 rsvd2;
|
||||
u8 buf_pool_id;
|
||||
u16 offset;
|
||||
};
|
||||
|
@ -367,7 +367,7 @@ do { \
|
||||
if (upper) \
|
||||
append_u64(desc, data); \
|
||||
else \
|
||||
append_u32(desc, data); \
|
||||
append_u32(desc, lower_32_bits(data)); \
|
||||
} while (0)
|
||||
|
||||
#define append_math_add_imm_u64(desc, dest, src0, src1, data) \
|
||||
|
@ -91,6 +91,11 @@ struct caam_drv_private {
|
||||
Handles of the RNG4 block are initialized
|
||||
by this driver */
|
||||
|
||||
struct clk *caam_ipg;
|
||||
struct clk *caam_mem;
|
||||
struct clk *caam_aclk;
|
||||
struct clk *caam_emi_slow;
|
||||
|
||||
/*
|
||||
* debugfs entries for developer view into driver/device
|
||||
* variables at runtime.
|
||||
|
@ -202,6 +202,13 @@ static void caam_jr_dequeue(unsigned long devarg)
|
||||
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
|
||||
userstatus = jrp->outring[hw_idx].jrstatus;
|
||||
|
||||
/*
|
||||
* Make sure all information from the job has been obtained
|
||||
* before telling CAAM that the job has been removed from the
|
||||
* output ring.
|
||||
*/
|
||||
mb();
|
||||
|
||||
/* set done */
|
||||
wr_reg32(&jrp->rregs->outring_rmvd, 1);
|
||||
|
||||
@ -351,12 +358,23 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||
|
||||
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
|
||||
|
||||
/*
|
||||
* Guarantee that the descriptor's DMA address has been written to
|
||||
* the next slot in the ring before the write index is updated, since
|
||||
* other cores may update this index independently.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
|
||||
(JOBR_DEPTH - 1);
|
||||
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
|
||||
|
||||
/*
|
||||
* Ensure that all job information has been written before
|
||||
* notifying CAAM that a new job was added to the input ring.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
|
||||
|
||||
spin_unlock_bh(&jrp->inplock);
|
||||
@ -392,18 +410,17 @@ static int caam_jr_init(struct device *dev)
|
||||
goto out_free_irq;
|
||||
|
||||
error = -ENOMEM;
|
||||
jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
|
||||
&inpbusaddr, GFP_KERNEL);
|
||||
jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
|
||||
JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
|
||||
if (!jrp->inpring)
|
||||
goto out_free_irq;
|
||||
|
||||
jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
|
||||
jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
|
||||
JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
|
||||
if (!jrp->outring)
|
||||
goto out_free_inpring;
|
||||
|
||||
jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
|
||||
GFP_KERNEL);
|
||||
jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
|
||||
if (!jrp->entinfo)
|
||||
goto out_free_outring;
|
||||
|
||||
@ -461,8 +478,7 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||
int error;
|
||||
|
||||
jrdev = &pdev->dev;
|
||||
jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
|
||||
GFP_KERNEL);
|
||||
jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
|
||||
if (!jrpriv)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -65,9 +65,31 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_ARM
|
||||
/* These are common macros for Power, put here for ARM */
|
||||
#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
|
||||
#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
|
||||
|
||||
#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
|
||||
#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
|
||||
|
||||
#define out_le32(a, v) out_arch(l, le32, a, v)
|
||||
#define in_le32(a) in_arch(l, le32, a)
|
||||
|
||||
#define out_be32(a, v) out_arch(l, be32, a, v)
|
||||
#define in_be32(a) in_arch(l, be32, a)
|
||||
|
||||
#define clrsetbits(type, addr, clear, set) \
|
||||
out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
|
||||
|
||||
#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
|
||||
#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
|
||||
#endif
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#define wr_reg32(reg, data) out_be32(reg, data)
|
||||
#define rd_reg32(reg) in_be32(reg)
|
||||
#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set)
|
||||
#ifdef CONFIG_64BIT
|
||||
#define wr_reg64(reg, data) out_be64(reg, data)
|
||||
#define rd_reg64(reg) in_be64(reg)
|
||||
@ -76,6 +98,7 @@
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#define wr_reg32(reg, data) __raw_writel(data, reg)
|
||||
#define rd_reg32(reg) __raw_readl(reg)
|
||||
#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
|
||||
#ifdef CONFIG_64BIT
|
||||
#define wr_reg64(reg, data) __raw_writeq(data, reg)
|
||||
#define rd_reg64(reg) __raw_readq(reg)
|
||||
@ -85,20 +108,31 @@
|
||||
|
||||
/*
|
||||
* The only users of these wr/rd_reg64 functions is the Job Ring (JR).
|
||||
* The DMA address registers in the JR are a pair of 32-bit registers.
|
||||
* The layout is:
|
||||
* The DMA address registers in the JR are handled differently depending on
|
||||
* platform:
|
||||
*
|
||||
* 1. All BE CAAM platforms and i.MX platforms (LE CAAM):
|
||||
*
|
||||
* base + 0x0000 : most-significant 32 bits
|
||||
* base + 0x0004 : least-significant 32 bits
|
||||
*
|
||||
* The 32-bit version of this core therefore has to write to base + 0x0004
|
||||
* to set the 32-bit wide DMA address. This seems to be independent of the
|
||||
* endianness of the written/read data.
|
||||
* to set the 32-bit wide DMA address.
|
||||
*
|
||||
* 2. All other LE CAAM platforms (LS1021A etc.)
|
||||
* base + 0x0000 : least-significant 32 bits
|
||||
* base + 0x0004 : most-significant 32 bits
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
|
||||
defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
|
||||
#define REG64_MS32(reg) ((u32 __iomem *)(reg))
|
||||
#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
|
||||
#else
|
||||
#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
|
||||
#define REG64_LS32(reg) ((u32 __iomem *)(reg))
|
||||
#endif
|
||||
|
||||
static inline void wr_reg64(u64 __iomem *reg, u64 data)
|
||||
{
|
||||
@ -133,18 +167,28 @@ struct jr_outentry {
|
||||
#define CHA_NUM_MS_DECONUM_SHIFT 24
|
||||
#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
|
||||
|
||||
/* CHA Version IDs */
|
||||
/*
|
||||
* CHA version IDs / instantiation bitfields
|
||||
* Defined for use with the cha_id fields in perfmon, but the same shift/mask
|
||||
* selectors can be used to pull out the number of instantiated blocks within
|
||||
* cha_num fields in perfmon because the locations are the same.
|
||||
*/
|
||||
#define CHA_ID_LS_AES_SHIFT 0
|
||||
#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
|
||||
#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
|
||||
#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
|
||||
#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
|
||||
|
||||
#define CHA_ID_LS_DES_SHIFT 4
|
||||
#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
|
||||
#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
|
||||
|
||||
#define CHA_ID_LS_ARC4_SHIFT 8
|
||||
#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
|
||||
|
||||
#define CHA_ID_LS_MD_SHIFT 12
|
||||
#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
|
||||
#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
|
||||
#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
|
||||
#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
|
||||
|
||||
#define CHA_ID_LS_RNG_SHIFT 16
|
||||
#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
|
||||
@ -395,10 +439,16 @@ struct caam_ctrl {
|
||||
/* AXI read cache control */
|
||||
#define MCFGR_ARCACHE_SHIFT 12
|
||||
#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
|
||||
#define MCFGR_ARCACHE_BUFF (0x1 << MCFGR_ARCACHE_SHIFT)
|
||||
#define MCFGR_ARCACHE_CACH (0x2 << MCFGR_ARCACHE_SHIFT)
|
||||
#define MCFGR_ARCACHE_RALL (0x4 << MCFGR_ARCACHE_SHIFT)
|
||||
|
||||
/* AXI write cache control */
|
||||
#define MCFGR_AWCACHE_SHIFT 8
|
||||
#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
|
||||
#define MCFGR_AWCACHE_BUFF (0x1 << MCFGR_AWCACHE_SHIFT)
|
||||
#define MCFGR_AWCACHE_CACH (0x2 << MCFGR_AWCACHE_SHIFT)
|
||||
#define MCFGR_AWCACHE_WALL (0x8 << MCFGR_AWCACHE_SHIFT)
|
||||
|
||||
/* AXI pipeline depth */
|
||||
#define MCFGR_AXIPIPE_SHIFT 4
|
||||
|
@ -15,7 +15,6 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
|
||||
{
|
||||
sec4_sg_ptr->ptr = dma;
|
||||
sec4_sg_ptr->len = len;
|
||||
sec4_sg_ptr->reserved = 0;
|
||||
sec4_sg_ptr->buf_pool_id = 0;
|
||||
sec4_sg_ptr->offset = offset;
|
||||
#ifdef DEBUG
|
||||
@ -106,9 +105,15 @@ static inline void dma_unmap_sg_chained(
|
||||
{
|
||||
if (unlikely(chained)) {
|
||||
int i;
|
||||
struct scatterlist *tsg = sg;
|
||||
|
||||
/*
|
||||
* Use a local copy of the sg pointer to avoid moving the
|
||||
* head of the list pointed to by sg as we walk the list.
|
||||
*/
|
||||
for (i = 0; i < nents; i++) {
|
||||
dma_unmap_sg(dev, sg, 1, dir);
|
||||
sg = sg_next(sg);
|
||||
dma_unmap_sg(dev, tsg, 1, dir);
|
||||
tsg = sg_next(tsg);
|
||||
}
|
||||
} else if (nents) {
|
||||
dma_unmap_sg(dev, sg, nents, dir);
|
||||
@ -119,19 +124,23 @@ static inline int dma_map_sg_chained(
|
||||
struct device *dev, struct scatterlist *sg, unsigned int nents,
|
||||
enum dma_data_direction dir, bool chained)
|
||||
{
|
||||
struct scatterlist *first = sg;
|
||||
|
||||
if (unlikely(chained)) {
|
||||
int i;
|
||||
struct scatterlist *tsg = sg;
|
||||
|
||||
/*
|
||||
* Use a local copy of the sg pointer to avoid moving the
|
||||
* head of the list pointed to by sg as we walk the list.
|
||||
*/
|
||||
for (i = 0; i < nents; i++) {
|
||||
if (!dma_map_sg(dev, sg, 1, dir)) {
|
||||
dma_unmap_sg_chained(dev, first, i, dir,
|
||||
if (!dma_map_sg(dev, tsg, 1, dir)) {
|
||||
dma_unmap_sg_chained(dev, sg, i, dir,
|
||||
chained);
|
||||
nents = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
sg = sg_next(sg);
|
||||
tsg = sg_next(tsg);
|
||||
}
|
||||
} else
|
||||
nents = dma_map_sg(dev, sg, nents, dir);
|
||||
|
@ -216,6 +216,7 @@ static const struct acpi_device_id ccp_acpi_match[] = {
|
||||
{ "AMDI0C00", 0 },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
@ -223,6 +224,7 @@ static const struct of_device_id ccp_of_match[] = {
|
||||
{ .compatible = "amd,ccp-seattle-v1a" },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ccp_of_match);
|
||||
#endif
|
||||
|
||||
static struct platform_driver ccp_platform_driver = {
|
||||
|
@ -334,7 +334,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
|
||||
|
||||
hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
|
||||
if (!hdev->dma_lch) {
|
||||
dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n");
|
||||
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
dma_conf.direction = DMA_MEM_TO_DEV;
|
||||
|
@ -156,7 +156,8 @@ struct ablk_ctx {
|
||||
};
|
||||
|
||||
struct aead_ctx {
|
||||
struct buffer_desc *buffer;
|
||||
struct buffer_desc *src;
|
||||
struct buffer_desc *dst;
|
||||
struct scatterlist ivlist;
|
||||
/* used when the hmac is not on one sg entry */
|
||||
u8 *hmac_virt;
|
||||
@ -198,6 +199,15 @@ struct ixp_alg {
|
||||
int registered;
|
||||
};
|
||||
|
||||
struct ixp_aead_alg {
|
||||
struct aead_alg crypto;
|
||||
const struct ix_hash_algo *hash;
|
||||
u32 cfg_enc;
|
||||
u32 cfg_dec;
|
||||
|
||||
int registered;
|
||||
};
|
||||
|
||||
static const struct ix_hash_algo hash_alg_md5 = {
|
||||
.cfgword = 0xAA010004,
|
||||
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
|
||||
@ -339,11 +349,11 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
|
||||
struct aead_ctx *req_ctx = aead_request_ctx(req);
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
int authsize = crypto_aead_authsize(tfm);
|
||||
int decryptlen = req->cryptlen - authsize;
|
||||
int decryptlen = req->assoclen + req->cryptlen - authsize;
|
||||
|
||||
if (req_ctx->encrypt) {
|
||||
scatterwalk_map_and_copy(req_ctx->hmac_virt,
|
||||
req->src, decryptlen, authsize, 1);
|
||||
req->dst, decryptlen, authsize, 1);
|
||||
}
|
||||
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
|
||||
}
|
||||
@ -364,7 +374,8 @@ static void one_packet(dma_addr_t phys)
|
||||
struct aead_request *req = crypt->data.aead_req;
|
||||
struct aead_ctx *req_ctx = aead_request_ctx(req);
|
||||
|
||||
free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
|
||||
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
|
||||
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
|
||||
if (req_ctx->hmac_virt) {
|
||||
finish_scattered_hmac(crypt);
|
||||
}
|
||||
@ -573,11 +584,10 @@ static int init_tfm_ablk(struct crypto_tfm *tfm)
|
||||
return init_tfm(tfm);
|
||||
}
|
||||
|
||||
static int init_tfm_aead(struct crypto_tfm *tfm)
|
||||
static int init_tfm_aead(struct crypto_aead *tfm)
|
||||
{
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
sizeof(struct aead_ctx));
|
||||
return init_tfm(tfm);
|
||||
crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
|
||||
return init_tfm(crypto_aead_tfm(tfm));
|
||||
}
|
||||
|
||||
static void exit_tfm(struct crypto_tfm *tfm)
|
||||
@ -587,6 +597,11 @@ static void exit_tfm(struct crypto_tfm *tfm)
|
||||
free_sa_dir(&ctx->decrypt);
|
||||
}
|
||||
|
||||
static void exit_tfm_aead(struct crypto_aead *tfm)
|
||||
{
|
||||
exit_tfm(crypto_aead_tfm(tfm));
|
||||
}
|
||||
|
||||
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
|
||||
int init_len, u32 ctx_addr, const u8 *key, int key_len)
|
||||
{
|
||||
@ -969,24 +984,6 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
int offset = 0;
|
||||
|
||||
if (!nbytes)
|
||||
return 0;
|
||||
|
||||
for (;;) {
|
||||
if (start < offset + sg->length)
|
||||
break;
|
||||
|
||||
offset += sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
return (start + nbytes > offset + sg->length);
|
||||
}
|
||||
|
||||
static int aead_perform(struct aead_request *req, int encrypt,
|
||||
int cryptoffset, int eff_cryptlen, u8 *iv)
|
||||
{
|
||||
@ -1002,6 +999,8 @@ static int aead_perform(struct aead_request *req, int encrypt,
|
||||
struct device *dev = &pdev->dev;
|
||||
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
|
||||
unsigned int lastlen;
|
||||
|
||||
if (qmgr_stat_full(SEND_QID))
|
||||
return -EAGAIN;
|
||||
@ -1030,35 +1029,55 @@ static int aead_perform(struct aead_request *req, int encrypt,
|
||||
crypt->crypt_len = eff_cryptlen;
|
||||
|
||||
crypt->auth_offs = 0;
|
||||
crypt->auth_len = req->assoclen + ivsize + cryptlen;
|
||||
crypt->auth_len = req->assoclen + cryptlen;
|
||||
BUG_ON(ivsize && !req->iv);
|
||||
memcpy(crypt->iv, req->iv, ivsize);
|
||||
|
||||
req_ctx->dst = NULL;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
BUG(); /* -ENOTSUP because of my laziness */
|
||||
struct buffer_desc dst_hook;
|
||||
|
||||
crypt->mode |= NPE_OP_NOT_IN_PLACE;
|
||||
src_direction = DMA_TO_DEVICE;
|
||||
|
||||
buf = chainup_buffers(dev, req->dst, crypt->auth_len,
|
||||
&dst_hook, flags, DMA_FROM_DEVICE);
|
||||
req_ctx->dst = dst_hook.next;
|
||||
crypt->dst_buf = dst_hook.phys_next;
|
||||
|
||||
if (!buf)
|
||||
goto free_buf_dst;
|
||||
|
||||
if (encrypt) {
|
||||
lastlen = buf->buf_len;
|
||||
if (lastlen >= authsize)
|
||||
crypt->icv_rev_aes = buf->phys_addr +
|
||||
buf->buf_len - authsize;
|
||||
}
|
||||
}
|
||||
|
||||
/* ASSOC data */
|
||||
buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
|
||||
flags, DMA_TO_DEVICE);
|
||||
req_ctx->buffer = src_hook.next;
|
||||
buf = chainup_buffers(dev, req->src, crypt->auth_len,
|
||||
&src_hook, flags, src_direction);
|
||||
req_ctx->src = src_hook.next;
|
||||
crypt->src_buf = src_hook.phys_next;
|
||||
if (!buf)
|
||||
goto out;
|
||||
/* IV */
|
||||
sg_init_table(&req_ctx->ivlist, 1);
|
||||
sg_set_buf(&req_ctx->ivlist, iv, ivsize);
|
||||
buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!buf)
|
||||
goto free_chain;
|
||||
if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
|
||||
goto free_buf_src;
|
||||
|
||||
if (!encrypt || !req_ctx->dst) {
|
||||
lastlen = buf->buf_len;
|
||||
if (lastlen >= authsize)
|
||||
crypt->icv_rev_aes = buf->phys_addr +
|
||||
buf->buf_len - authsize;
|
||||
}
|
||||
|
||||
if (unlikely(lastlen < authsize)) {
|
||||
/* The 12 hmac bytes are scattered,
|
||||
* we need to copy them into a safe buffer */
|
||||
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
|
||||
&crypt->icv_rev_aes);
|
||||
if (unlikely(!req_ctx->hmac_virt))
|
||||
goto free_chain;
|
||||
goto free_buf_src;
|
||||
if (!encrypt) {
|
||||
scatterwalk_map_and_copy(req_ctx->hmac_virt,
|
||||
req->src, cryptlen, authsize, 0);
|
||||
@ -1067,27 +1086,16 @@ static int aead_perform(struct aead_request *req, int encrypt,
|
||||
} else {
|
||||
req_ctx->hmac_virt = NULL;
|
||||
}
|
||||
/* Crypt */
|
||||
buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!buf)
|
||||
goto free_hmac_virt;
|
||||
if (!req_ctx->hmac_virt) {
|
||||
crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
|
||||
}
|
||||
|
||||
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
|
||||
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
|
||||
BUG_ON(qmgr_stat_overflow(SEND_QID));
|
||||
return -EINPROGRESS;
|
||||
free_hmac_virt:
|
||||
if (req_ctx->hmac_virt) {
|
||||
dma_pool_free(buffer_pool, req_ctx->hmac_virt,
|
||||
crypt->icv_rev_aes);
|
||||
}
|
||||
free_chain:
|
||||
free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
|
||||
out:
|
||||
|
||||
free_buf_src:
|
||||
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
|
||||
free_buf_dst:
|
||||
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
|
||||
crypt->ctl_flags = CTL_FLAG_UNUSED;
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1173,40 +1181,12 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
|
||||
static int aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
|
||||
return aead_perform(req, 1, req->assoclen + ivsize,
|
||||
req->cryptlen, req->iv);
|
||||
return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
|
||||
}
|
||||
|
||||
static int aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
|
||||
return aead_perform(req, 0, req->assoclen + ivsize,
|
||||
req->cryptlen, req->iv);
|
||||
}
|
||||
|
||||
static int aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
|
||||
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
unsigned len, ivsize = crypto_aead_ivsize(tfm);
|
||||
__be64 seq;
|
||||
|
||||
/* copied from eseqiv.c */
|
||||
if (!ctx->salted) {
|
||||
get_random_bytes(ctx->salt, ivsize);
|
||||
ctx->salted = 1;
|
||||
}
|
||||
memcpy(req->areq.iv, ctx->salt, ivsize);
|
||||
len = ivsize;
|
||||
if (ivsize > sizeof(u64)) {
|
||||
memset(req->giv, 0, ivsize - sizeof(u64));
|
||||
len = sizeof(u64);
|
||||
}
|
||||
seq = cpu_to_be64(req->seq);
|
||||
memcpy(req->giv + ivsize - len, &seq, len);
|
||||
return aead_perform(&req->areq, 1, req->areq.assoclen,
|
||||
req->areq.cryptlen +ivsize, req->giv);
|
||||
return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
|
||||
}
|
||||
|
||||
static struct ixp_alg ixp4xx_algos[] = {
|
||||
@ -1319,80 +1299,77 @@ static struct ixp_alg ixp4xx_algos[] = {
|
||||
},
|
||||
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
|
||||
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
|
||||
}, {
|
||||
} };
|
||||
|
||||
static struct ixp_aead_alg ixp4xx_aeads[] = {
|
||||
{
|
||||
.crypto = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des))",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_u = { .aead = {
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
}
|
||||
}
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des))",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
},
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
},
|
||||
.hash = &hash_alg_md5,
|
||||
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
|
||||
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
|
||||
}, {
|
||||
.crypto = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_u = { .aead = {
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
}
|
||||
}
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
},
|
||||
.hash = &hash_alg_md5,
|
||||
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
|
||||
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
|
||||
}, {
|
||||
.crypto = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des))",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_u = { .aead = {
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des))",
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
},
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
}
|
||||
}
|
||||
},
|
||||
.hash = &hash_alg_sha1,
|
||||
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
|
||||
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
|
||||
}, {
|
||||
.crypto = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_u = { .aead = {
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
}
|
||||
}
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
},
|
||||
.hash = &hash_alg_sha1,
|
||||
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
|
||||
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
|
||||
}, {
|
||||
.crypto = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(aes))",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_u = { .aead = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
}
|
||||
}
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(aes))",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
},
|
||||
.hash = &hash_alg_md5,
|
||||
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
|
||||
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
|
||||
}, {
|
||||
.crypto = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_u = { .aead = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
}
|
||||
}
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
},
|
||||
.hash = &hash_alg_sha1,
|
||||
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
|
||||
@ -1436,32 +1413,20 @@ static int __init ixp_module_init(void)
|
||||
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
|
||||
continue;
|
||||
}
|
||||
if (!ixp4xx_algos[i].hash) {
|
||||
/* block ciphers */
|
||||
cra->cra_type = &crypto_ablkcipher_type;
|
||||
cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC;
|
||||
if (!cra->cra_ablkcipher.setkey)
|
||||
cra->cra_ablkcipher.setkey = ablk_setkey;
|
||||
if (!cra->cra_ablkcipher.encrypt)
|
||||
cra->cra_ablkcipher.encrypt = ablk_encrypt;
|
||||
if (!cra->cra_ablkcipher.decrypt)
|
||||
cra->cra_ablkcipher.decrypt = ablk_decrypt;
|
||||
cra->cra_init = init_tfm_ablk;
|
||||
} else {
|
||||
/* authenc */
|
||||
cra->cra_type = &crypto_aead_type;
|
||||
cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC;
|
||||
cra->cra_aead.setkey = aead_setkey;
|
||||
cra->cra_aead.setauthsize = aead_setauthsize;
|
||||
cra->cra_aead.encrypt = aead_encrypt;
|
||||
cra->cra_aead.decrypt = aead_decrypt;
|
||||
cra->cra_aead.givencrypt = aead_givencrypt;
|
||||
cra->cra_init = init_tfm_aead;
|
||||
}
|
||||
|
||||
/* block ciphers */
|
||||
cra->cra_type = &crypto_ablkcipher_type;
|
||||
cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC;
|
||||
if (!cra->cra_ablkcipher.setkey)
|
||||
cra->cra_ablkcipher.setkey = ablk_setkey;
|
||||
if (!cra->cra_ablkcipher.encrypt)
|
||||
cra->cra_ablkcipher.encrypt = ablk_encrypt;
|
||||
if (!cra->cra_ablkcipher.decrypt)
|
||||
cra->cra_ablkcipher.decrypt = ablk_decrypt;
|
||||
cra->cra_init = init_tfm_ablk;
|
||||
|
||||
cra->cra_ctxsize = sizeof(struct ixp_ctx);
|
||||
cra->cra_module = THIS_MODULE;
|
||||
cra->cra_alignmask = 3;
|
||||
@ -1473,6 +1438,38 @@ static int __init ixp_module_init(void)
|
||||
else
|
||||
ixp4xx_algos[i].registered = 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
|
||||
struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
|
||||
|
||||
if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s"IXP_POSTFIX, cra->base.cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
continue;
|
||||
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
|
||||
continue;
|
||||
|
||||
/* authenc */
|
||||
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC;
|
||||
cra->setkey = aead_setkey;
|
||||
cra->setauthsize = aead_setauthsize;
|
||||
cra->encrypt = aead_encrypt;
|
||||
cra->decrypt = aead_decrypt;
|
||||
cra->init = init_tfm_aead;
|
||||
cra->exit = exit_tfm_aead;
|
||||
|
||||
cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
|
||||
cra->base.cra_module = THIS_MODULE;
|
||||
cra->base.cra_alignmask = 3;
|
||||
cra->base.cra_priority = 300;
|
||||
|
||||
if (crypto_register_aead(cra))
|
||||
printk(KERN_ERR "Failed to register '%s'\n",
|
||||
cra->base.cra_driver_name);
|
||||
else
|
||||
ixp4xx_aeads[i].registered = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1481,6 +1478,11 @@ static void __exit ixp_module_exit(void)
|
||||
int num = ARRAY_SIZE(ixp4xx_algos);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
|
||||
if (ixp4xx_aeads[i].registered)
|
||||
crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
|
||||
}
|
||||
|
||||
for (i=0; i< num; i++) {
|
||||
if (ixp4xx_algos[i].registered)
|
||||
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
|
||||
|
@ -533,7 +533,6 @@ static struct platform_driver marvell_cesa = {
|
||||
.probe = mv_cesa_probe,
|
||||
.remove = mv_cesa_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "marvell-cesa",
|
||||
.of_match_table = mv_cesa_of_match_table,
|
||||
},
|
||||
|
@ -14,11 +14,14 @@ config CRYPTO_DEV_NX_ENCRYPT
|
||||
config CRYPTO_DEV_NX_COMPRESS
|
||||
tristate "Compression acceleration support"
|
||||
default y
|
||||
select CRYPTO_ALGAPI
|
||||
select 842_DECOMPRESS
|
||||
help
|
||||
Support for PowerPC Nest (NX) compression acceleration. This
|
||||
module supports acceleration for compressing memory with the 842
|
||||
algorithm. One of the platform drivers must be selected also.
|
||||
If you choose 'M' here, this module will be called nx_compress.
|
||||
algorithm using the cryptographic API. One of the platform
|
||||
drivers must be selected also. If you choose 'M' here, this
|
||||
module will be called nx_compress.
|
||||
|
||||
if CRYPTO_DEV_NX_COMPRESS
|
||||
|
||||
@ -42,14 +45,4 @@ config CRYPTO_DEV_NX_COMPRESS_POWERNV
|
||||
algorithm. This supports NX hardware on the PowerNV platform.
|
||||
If you choose 'M' here, this module will be called nx_compress_powernv.
|
||||
|
||||
config CRYPTO_DEV_NX_COMPRESS_CRYPTO
|
||||
tristate "Compression acceleration cryptographic interface"
|
||||
select CRYPTO_ALGAPI
|
||||
select 842_DECOMPRESS
|
||||
default y
|
||||
help
|
||||
Support for PowerPC Nest (NX) accelerators using the cryptographic
|
||||
API. If you choose 'M' here, this module will be called
|
||||
nx_compress_crypto.
|
||||
|
||||
endif
|
||||
|
@ -10,12 +10,8 @@ nx-crypto-objs := nx.o \
|
||||
nx-sha256.o \
|
||||
nx-sha512.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
|
||||
nx-compress-objs := nx-842.o
|
||||
nx-compress-platform-objs := nx-842-platform.o
|
||||
nx-compress-pseries-objs := nx-842-pseries.o
|
||||
nx-compress-powernv-objs := nx-842-powernv.o
|
||||
nx-compress-crypto-objs := nx-842-crypto.o
|
||||
|
@ -1,580 +0,0 @@
|
||||
/*
|
||||
* Cryptographic API for the NX-842 hardware compression.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2011-2015
|
||||
*
|
||||
* Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
|
||||
* Seth Jennings <sjenning@linux.vnet.ibm.com>
|
||||
*
|
||||
* Rewrite: Dan Streetman <ddstreet@ieee.org>
|
||||
*
|
||||
* This is an interface to the NX-842 compression hardware in PowerPC
|
||||
* processors. Most of the complexity of this drvier is due to the fact that
|
||||
* the NX-842 compression hardware requires the input and output data buffers
|
||||
* to be specifically aligned, to be a specific multiple in length, and within
|
||||
* specific minimum and maximum lengths. Those restrictions, provided by the
|
||||
* nx-842 driver via nx842_constraints, mean this driver must use bounce
|
||||
* buffers and headers to correct misaligned in or out buffers, and to split
|
||||
* input buffers that are too large.
|
||||
*
|
||||
* This driver will fall back to software decompression if the hardware
|
||||
* decompression fails, so this driver's decompression should never fail as
|
||||
* long as the provided compressed buffer is valid. Any compressed buffer
|
||||
* created by this driver will have a header (except ones where the input
|
||||
* perfectly matches the constraints); so users of this driver cannot simply
|
||||
* pass a compressed buffer created by this driver over to the 842 software
|
||||
* decompression library. Instead, users must use this driver to decompress;
|
||||
* if the hardware fails or is unavailable, the compressed buffer will be
|
||||
* parsed and the header removed, and the raw 842 buffer(s) passed to the 842
|
||||
* software decompression library.
|
||||
*
|
||||
* This does not fall back to software compression, however, since the caller
|
||||
* of this function is specifically requesting hardware compression; if the
|
||||
* hardware compression fails, the caller can fall back to software
|
||||
* compression, and the raw 842 compressed buffer that the software compressor
|
||||
* creates can be passed to this driver for hardware decompression; any
|
||||
* buffer without our specific header magic is assumed to be a raw 842 buffer
|
||||
* and passed directly to the hardware. Note that the software compression
|
||||
* library will produce a compressed buffer that is incompatible with the
|
||||
* hardware decompressor if the original input buffer length is not a multiple
|
||||
* of 8; if such a compressed buffer is passed to this driver for
|
||||
* decompression, the hardware will reject it and this driver will then pass
|
||||
* it over to the software library for decompression.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sw842.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include "nx-842.h"
|
||||
|
||||
/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
|
||||
* template (see lib/842/842.h), so this magic number will never appear at
|
||||
* the start of a raw 842 compressed buffer. That is important, as any buffer
|
||||
* passed to us without this magic is assumed to be a raw 842 compressed
|
||||
* buffer, and passed directly to the hardware to decompress.
|
||||
*/
|
||||
#define NX842_CRYPTO_MAGIC (0xf842)
|
||||
#define NX842_CRYPTO_GROUP_MAX (0x20)
|
||||
#define NX842_CRYPTO_HEADER_SIZE(g) \
|
||||
(sizeof(struct nx842_crypto_header) + \
|
||||
sizeof(struct nx842_crypto_header_group) * (g))
|
||||
#define NX842_CRYPTO_HEADER_MAX_SIZE \
|
||||
NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
|
||||
|
||||
/* bounce buffer size */
|
||||
#define BOUNCE_BUFFER_ORDER (2)
|
||||
#define BOUNCE_BUFFER_SIZE \
|
||||
((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
|
||||
|
||||
/* try longer on comp because we can fallback to sw decomp if hw is busy */
|
||||
#define COMP_BUSY_TIMEOUT (250) /* ms */
|
||||
#define DECOMP_BUSY_TIMEOUT (50) /* ms */
|
||||
|
||||
struct nx842_crypto_header_group {
|
||||
__be16 padding; /* unused bytes at start of group */
|
||||
__be32 compressed_length; /* compressed bytes in group */
|
||||
__be32 uncompressed_length; /* bytes after decompression */
|
||||
} __packed;
|
||||
|
||||
struct nx842_crypto_header {
|
||||
__be16 magic; /* NX842_CRYPTO_MAGIC */
|
||||
__be16 ignore; /* decompressed end bytes to ignore */
|
||||
u8 groups; /* total groups in this header */
|
||||
struct nx842_crypto_header_group group[];
|
||||
} __packed;
|
||||
|
||||
struct nx842_crypto_param {
|
||||
u8 *in;
|
||||
unsigned int iremain;
|
||||
u8 *out;
|
||||
unsigned int oremain;
|
||||
unsigned int ototal;
|
||||
};
|
||||
|
||||
static int update_param(struct nx842_crypto_param *p,
|
||||
unsigned int slen, unsigned int dlen)
|
||||
{
|
||||
if (p->iremain < slen)
|
||||
return -EOVERFLOW;
|
||||
if (p->oremain < dlen)
|
||||
return -ENOSPC;
|
||||
|
||||
p->in += slen;
|
||||
p->iremain -= slen;
|
||||
p->out += dlen;
|
||||
p->oremain -= dlen;
|
||||
p->ototal += dlen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nx842_crypto_ctx {
|
||||
u8 *wmem;
|
||||
u8 *sbounce, *dbounce;
|
||||
|
||||
struct nx842_crypto_header header;
|
||||
struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
|
||||
};
|
||||
|
||||
static int nx842_crypto_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL);
|
||||
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
|
||||
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
|
||||
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
|
||||
kfree(ctx->wmem);
|
||||
free_page((unsigned long)ctx->sbounce);
|
||||
free_page((unsigned long)ctx->dbounce);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nx842_crypto_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
kfree(ctx->wmem);
|
||||
free_page((unsigned long)ctx->sbounce);
|
||||
free_page((unsigned long)ctx->dbounce);
|
||||
}
|
||||
|
||||
static int read_constraints(struct nx842_constraints *c)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nx842_constraints(c);
|
||||
if (ret) {
|
||||
pr_err_ratelimited("could not get nx842 constraints : %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* limit maximum, to always have enough bounce buffer to decompress */
|
||||
if (c->maximum > BOUNCE_BUFFER_SIZE) {
|
||||
c->maximum = BOUNCE_BUFFER_SIZE;
|
||||
pr_info_once("limiting nx842 maximum to %x\n", c->maximum);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
|
||||
{
|
||||
int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
|
||||
|
||||
/* compress should have added space for header */
|
||||
if (s > be16_to_cpu(hdr->group[0].padding)) {
|
||||
pr_err("Internal error: no space for header\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(buf, hdr, s);
|
||||
|
||||
print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compress(struct nx842_crypto_ctx *ctx,
|
||||
struct nx842_crypto_param *p,
|
||||
struct nx842_crypto_header_group *g,
|
||||
struct nx842_constraints *c,
|
||||
u16 *ignore,
|
||||
unsigned int hdrsize)
|
||||
{
|
||||
unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
|
||||
unsigned int adj_slen = slen;
|
||||
u8 *src = p->in, *dst = p->out;
|
||||
int ret, dskip = 0;
|
||||
ktime_t timeout;
|
||||
|
||||
if (p->iremain == 0)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (p->oremain == 0 || hdrsize + c->minimum > dlen)
|
||||
return -ENOSPC;
|
||||
|
||||
if (slen % c->multiple)
|
||||
adj_slen = round_up(slen, c->multiple);
|
||||
if (slen < c->minimum)
|
||||
adj_slen = c->minimum;
|
||||
if (slen > c->maximum)
|
||||
adj_slen = slen = c->maximum;
|
||||
if (adj_slen > slen || (u64)src % c->alignment) {
|
||||
adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
|
||||
slen = min(slen, BOUNCE_BUFFER_SIZE);
|
||||
if (adj_slen > slen)
|
||||
memset(ctx->sbounce + slen, 0, adj_slen - slen);
|
||||
memcpy(ctx->sbounce, src, slen);
|
||||
src = ctx->sbounce;
|
||||
slen = adj_slen;
|
||||
pr_debug("using comp sbounce buffer, len %x\n", slen);
|
||||
}
|
||||
|
||||
dst += hdrsize;
|
||||
dlen -= hdrsize;
|
||||
|
||||
if ((u64)dst % c->alignment) {
|
||||
dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
|
||||
dst += dskip;
|
||||
dlen -= dskip;
|
||||
}
|
||||
if (dlen % c->multiple)
|
||||
dlen = round_down(dlen, c->multiple);
|
||||
if (dlen < c->minimum) {
|
||||
nospc:
|
||||
dst = ctx->dbounce;
|
||||
dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
|
||||
dlen = round_down(dlen, c->multiple);
|
||||
dskip = 0;
|
||||
pr_debug("using comp dbounce buffer, len %x\n", dlen);
|
||||
}
|
||||
if (dlen > c->maximum)
|
||||
dlen = c->maximum;
|
||||
|
||||
tmplen = dlen;
|
||||
timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
|
||||
do {
|
||||
dlen = tmplen; /* reset dlen, if we're retrying */
|
||||
ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem);
|
||||
/* possibly we should reduce the slen here, instead of
|
||||
* retrying with the dbounce buffer?
|
||||
*/
|
||||
if (ret == -ENOSPC && dst != ctx->dbounce)
|
||||
goto nospc;
|
||||
} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dskip += hdrsize;
|
||||
|
||||
if (dst == ctx->dbounce)
|
||||
memcpy(p->out + dskip, dst, dlen);
|
||||
|
||||
g->padding = cpu_to_be16(dskip);
|
||||
g->compressed_length = cpu_to_be32(dlen);
|
||||
g->uncompressed_length = cpu_to_be32(slen);
|
||||
|
||||
if (p->iremain < slen) {
|
||||
*ignore = slen - p->iremain;
|
||||
slen = p->iremain;
|
||||
}
|
||||
|
||||
pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
|
||||
slen, *ignore, dlen, dskip);
|
||||
|
||||
return update_param(p, slen, dskip + dlen);
|
||||
}
|
||||
|
||||
static int nx842_crypto_compress(struct crypto_tfm *tfm,
|
||||
const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen)
|
||||
{
|
||||
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct nx842_crypto_header *hdr = &ctx->header;
|
||||
struct nx842_crypto_param p;
|
||||
struct nx842_constraints c;
|
||||
unsigned int groups, hdrsize, h;
|
||||
int ret, n;
|
||||
bool add_header;
|
||||
u16 ignore = 0;
|
||||
|
||||
p.in = (u8 *)src;
|
||||
p.iremain = slen;
|
||||
p.out = dst;
|
||||
p.oremain = *dlen;
|
||||
p.ototal = 0;
|
||||
|
||||
*dlen = 0;
|
||||
|
||||
ret = read_constraints(&c);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
|
||||
DIV_ROUND_UP(p.iremain, c.maximum));
|
||||
hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
|
||||
|
||||
/* skip adding header if the buffers meet all constraints */
|
||||
add_header = (p.iremain % c.multiple ||
|
||||
p.iremain < c.minimum ||
|
||||
p.iremain > c.maximum ||
|
||||
(u64)p.in % c.alignment ||
|
||||
p.oremain % c.multiple ||
|
||||
p.oremain < c.minimum ||
|
||||
p.oremain > c.maximum ||
|
||||
(u64)p.out % c.alignment);
|
||||
|
||||
hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
|
||||
hdr->groups = 0;
|
||||
hdr->ignore = 0;
|
||||
|
||||
while (p.iremain > 0) {
|
||||
n = hdr->groups++;
|
||||
if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
|
||||
return -ENOSPC;
|
||||
|
||||
/* header goes before first group */
|
||||
h = !n && add_header ? hdrsize : 0;
|
||||
|
||||
if (ignore)
|
||||
pr_warn("interal error, ignore is set %x\n", ignore);
|
||||
|
||||
ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!add_header && hdr->groups > 1) {
|
||||
pr_err("Internal error: No header but multiple groups\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ignore indicates the input stream needed to be padded */
|
||||
hdr->ignore = cpu_to_be16(ignore);
|
||||
if (ignore)
|
||||
pr_debug("marked %d bytes as ignore\n", ignore);
|
||||
|
||||
if (add_header)
|
||||
ret = nx842_crypto_add_header(hdr, dst);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*dlen = p.ototal;
|
||||
|
||||
pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decompress(struct nx842_crypto_ctx *ctx,
|
||||
struct nx842_crypto_param *p,
|
||||
struct nx842_crypto_header_group *g,
|
||||
struct nx842_constraints *c,
|
||||
u16 ignore,
|
||||
bool usehw)
|
||||
{
|
||||
unsigned int slen = be32_to_cpu(g->compressed_length);
|
||||
unsigned int required_len = be32_to_cpu(g->uncompressed_length);
|
||||
unsigned int dlen = p->oremain, tmplen;
|
||||
unsigned int adj_slen = slen;
|
||||
u8 *src = p->in, *dst = p->out;
|
||||
u16 padding = be16_to_cpu(g->padding);
|
||||
int ret, spadding = 0, dpadding = 0;
|
||||
ktime_t timeout;
|
||||
|
||||
if (!slen || !required_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (p->iremain <= 0 || padding + slen > p->iremain)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (p->oremain <= 0 || required_len - ignore > p->oremain)
|
||||
return -ENOSPC;
|
||||
|
||||
src += padding;
|
||||
|
||||
if (!usehw)
|
||||
goto usesw;
|
||||
|
||||
if (slen % c->multiple)
|
||||
adj_slen = round_up(slen, c->multiple);
|
||||
if (slen < c->minimum)
|
||||
adj_slen = c->minimum;
|
||||
if (slen > c->maximum)
|
||||
goto usesw;
|
||||
if (slen < adj_slen || (u64)src % c->alignment) {
|
||||
/* we can append padding bytes because the 842 format defines
|
||||
* an "end" template (see lib/842/842_decompress.c) and will
|
||||
* ignore any bytes following it.
|
||||
*/
|
||||
if (slen < adj_slen)
|
||||
memset(ctx->sbounce + slen, 0, adj_slen - slen);
|
||||
memcpy(ctx->sbounce, src, slen);
|
||||
src = ctx->sbounce;
|
||||
spadding = adj_slen - slen;
|
||||
slen = adj_slen;
|
||||
pr_debug("using decomp sbounce buffer, len %x\n", slen);
|
||||
}
|
||||
|
||||
if (dlen % c->multiple)
|
||||
dlen = round_down(dlen, c->multiple);
|
||||
if (dlen < required_len || (u64)dst % c->alignment) {
|
||||
dst = ctx->dbounce;
|
||||
dlen = min(required_len, BOUNCE_BUFFER_SIZE);
|
||||
pr_debug("using decomp dbounce buffer, len %x\n", dlen);
|
||||
}
|
||||
if (dlen < c->minimum)
|
||||
goto usesw;
|
||||
if (dlen > c->maximum)
|
||||
dlen = c->maximum;
|
||||
|
||||
tmplen = dlen;
|
||||
timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
|
||||
do {
|
||||
dlen = tmplen; /* reset dlen, if we're retrying */
|
||||
ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem);
|
||||
} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
|
||||
if (ret) {
|
||||
usesw:
|
||||
/* reset everything, sw doesn't have constraints */
|
||||
src = p->in + padding;
|
||||
slen = be32_to_cpu(g->compressed_length);
|
||||
spadding = 0;
|
||||
dst = p->out;
|
||||
dlen = p->oremain;
|
||||
dpadding = 0;
|
||||
if (dlen < required_len) { /* have ignore bytes */
|
||||
dst = ctx->dbounce;
|
||||
dlen = BOUNCE_BUFFER_SIZE;
|
||||
}
|
||||
pr_info_ratelimited("using software 842 decompression\n");
|
||||
ret = sw842_decompress(src, slen, dst, &dlen);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
slen -= spadding;
|
||||
|
||||
dlen -= ignore;
|
||||
if (ignore)
|
||||
pr_debug("ignoring last %x bytes\n", ignore);
|
||||
|
||||
if (dst == ctx->dbounce)
|
||||
memcpy(p->out, dst, dlen);
|
||||
|
||||
pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
|
||||
slen, padding, dlen, ignore);
|
||||
|
||||
return update_param(p, slen + padding, dlen);
|
||||
}
|
||||
|
||||
static int nx842_crypto_decompress(struct crypto_tfm *tfm,
|
||||
const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen)
|
||||
{
|
||||
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct nx842_crypto_header *hdr;
|
||||
struct nx842_crypto_param p;
|
||||
struct nx842_constraints c;
|
||||
int n, ret, hdr_len;
|
||||
u16 ignore = 0;
|
||||
bool usehw = true;
|
||||
|
||||
p.in = (u8 *)src;
|
||||
p.iremain = slen;
|
||||
p.out = dst;
|
||||
p.oremain = *dlen;
|
||||
p.ototal = 0;
|
||||
|
||||
*dlen = 0;
|
||||
|
||||
if (read_constraints(&c))
|
||||
usehw = false;
|
||||
|
||||
hdr = (struct nx842_crypto_header *)src;
|
||||
|
||||
/* If it doesn't start with our header magic number, assume it's a raw
|
||||
* 842 compressed buffer and pass it directly to the hardware driver
|
||||
*/
|
||||
if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
|
||||
struct nx842_crypto_header_group g = {
|
||||
.padding = 0,
|
||||
.compressed_length = cpu_to_be32(p.iremain),
|
||||
.uncompressed_length = cpu_to_be32(p.oremain),
|
||||
};
|
||||
|
||||
ret = decompress(ctx, &p, &g, &c, 0, usehw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*dlen = p.ototal;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!hdr->groups) {
|
||||
pr_err("header has no groups\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
|
||||
pr_err("header has too many groups %x, max %x\n",
|
||||
hdr->groups, NX842_CRYPTO_GROUP_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
|
||||
if (hdr_len > slen)
|
||||
return -EOVERFLOW;
|
||||
|
||||
memcpy(&ctx->header, src, hdr_len);
|
||||
hdr = &ctx->header;
|
||||
|
||||
for (n = 0; n < hdr->groups; n++) {
|
||||
/* ignore applies to last group */
|
||||
if (n + 1 == hdr->groups)
|
||||
ignore = be16_to_cpu(hdr->ignore);
|
||||
|
||||
ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
*dlen = p.ototal;
|
||||
|
||||
pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "842",
|
||||
.cra_driver_name = "842-nx",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||
.cra_ctxsize = sizeof(struct nx842_crypto_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = nx842_crypto_init,
|
||||
.cra_exit = nx842_crypto_exit,
|
||||
.cra_u = { .compress = {
|
||||
.coa_compress = nx842_crypto_compress,
|
||||
.coa_decompress = nx842_crypto_decompress } }
|
||||
};
|
||||
|
||||
static int __init nx842_crypto_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
}
|
||||
module_init(nx842_crypto_mod_init);
|
||||
|
||||
static void __exit nx842_crypto_mod_exit(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
module_exit(nx842_crypto_mod_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface");
|
||||
MODULE_ALIAS_CRYPTO("842");
|
||||
MODULE_ALIAS_CRYPTO("842-nx");
|
||||
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
|
@ -1,84 +0,0 @@
|
||||
|
||||
#include "nx-842.h"
|
||||
|
||||
/* this is needed, separate from the main nx-842.c driver, because that main
|
||||
* driver loads the platform drivers during its init(), and it expects one
|
||||
* (or none) of the platform drivers to set this pointer to its driver.
|
||||
* That means this pointer can't be in the main nx-842 driver, because it
|
||||
* wouldn't be accessible until after the main driver loaded, which wouldn't
|
||||
* be possible as it's waiting for the platform driver to load. So place it
|
||||
* here.
|
||||
*/
|
||||
static struct nx842_driver *driver;
|
||||
static DEFINE_SPINLOCK(driver_lock);
|
||||
|
||||
struct nx842_driver *nx842_platform_driver(void)
|
||||
{
|
||||
return driver;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_platform_driver);
|
||||
|
||||
bool nx842_platform_driver_set(struct nx842_driver *_driver)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
spin_lock(&driver_lock);
|
||||
|
||||
if (!driver) {
|
||||
driver = _driver;
|
||||
ret = true;
|
||||
} else
|
||||
WARN(1, "can't set platform driver, already set to %s\n",
|
||||
driver->name);
|
||||
|
||||
spin_unlock(&driver_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_platform_driver_set);
|
||||
|
||||
/* only call this from the platform driver exit function */
|
||||
void nx842_platform_driver_unset(struct nx842_driver *_driver)
|
||||
{
|
||||
spin_lock(&driver_lock);
|
||||
|
||||
if (driver == _driver)
|
||||
driver = NULL;
|
||||
else if (driver)
|
||||
WARN(1, "can't unset platform driver %s, currently set to %s\n",
|
||||
_driver->name, driver->name);
|
||||
else
|
||||
WARN(1, "can't unset platform driver, already unset\n");
|
||||
|
||||
spin_unlock(&driver_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_platform_driver_unset);
|
||||
|
||||
bool nx842_platform_driver_get(void)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
spin_lock(&driver_lock);
|
||||
|
||||
if (driver)
|
||||
ret = try_module_get(driver->owner);
|
||||
|
||||
spin_unlock(&driver_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_platform_driver_get);
|
||||
|
||||
void nx842_platform_driver_put(void)
|
||||
{
|
||||
spin_lock(&driver_lock);
|
||||
|
||||
if (driver)
|
||||
module_put(driver->owner);
|
||||
|
||||
spin_unlock(&driver_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_platform_driver_put);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
|
||||
MODULE_DESCRIPTION("842 H/W Compression platform driver");
|
@ -26,6 +26,8 @@
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
|
||||
MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
|
||||
MODULE_ALIAS_CRYPTO("842");
|
||||
MODULE_ALIAS_CRYPTO("842-nx");
|
||||
|
||||
#define WORKMEM_ALIGN (CRB_ALIGN)
|
||||
#define CSB_WAIT_MAX (5000) /* ms */
|
||||
@ -344,7 +346,8 @@ static int wait_for_csb(struct nx842_workmem *wmem,
|
||||
}
|
||||
|
||||
/* successful completion */
|
||||
pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count,
|
||||
pr_debug_ratelimited("Processed %u bytes in %lu us\n",
|
||||
be32_to_cpu(csb->count),
|
||||
(unsigned long)ktime_us_delta(now, start));
|
||||
|
||||
return 0;
|
||||
@ -581,9 +584,29 @@ static struct nx842_driver nx842_powernv_driver = {
|
||||
.decompress = nx842_powernv_decompress,
|
||||
};
|
||||
|
||||
static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
return nx842_crypto_init(tfm, &nx842_powernv_driver);
|
||||
}
|
||||
|
||||
static struct crypto_alg nx842_powernv_alg = {
|
||||
.cra_name = "842",
|
||||
.cra_driver_name = "842-nx",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||
.cra_ctxsize = sizeof(struct nx842_crypto_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = nx842_powernv_crypto_init,
|
||||
.cra_exit = nx842_crypto_exit,
|
||||
.cra_u = { .compress = {
|
||||
.coa_compress = nx842_crypto_compress,
|
||||
.coa_decompress = nx842_crypto_decompress } }
|
||||
};
|
||||
|
||||
static __init int nx842_powernv_init(void)
|
||||
{
|
||||
struct device_node *dn;
|
||||
int ret;
|
||||
|
||||
/* verify workmem size/align restrictions */
|
||||
BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
|
||||
@ -594,17 +617,14 @@ static __init int nx842_powernv_init(void)
|
||||
BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
|
||||
BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
|
||||
|
||||
pr_info("loading\n");
|
||||
|
||||
for_each_compatible_node(dn, NULL, "ibm,power-nx")
|
||||
nx842_powernv_probe(dn);
|
||||
|
||||
if (!nx842_ct) {
|
||||
pr_err("no coprocessors found\n");
|
||||
if (!nx842_ct)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!nx842_platform_driver_set(&nx842_powernv_driver)) {
|
||||
ret = crypto_register_alg(&nx842_powernv_alg);
|
||||
if (ret) {
|
||||
struct nx842_coproc *coproc, *n;
|
||||
|
||||
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
|
||||
@ -612,11 +632,9 @@ static __init int nx842_powernv_init(void)
|
||||
kfree(coproc);
|
||||
}
|
||||
|
||||
return -EEXIST;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_info("loaded\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(nx842_powernv_init);
|
||||
@ -625,13 +643,11 @@ static void __exit nx842_powernv_exit(void)
|
||||
{
|
||||
struct nx842_coproc *coproc, *n;
|
||||
|
||||
nx842_platform_driver_unset(&nx842_powernv_driver);
|
||||
crypto_unregister_alg(&nx842_powernv_alg);
|
||||
|
||||
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
|
||||
list_del(&coproc->list);
|
||||
kfree(coproc);
|
||||
}
|
||||
|
||||
pr_info("unloaded\n");
|
||||
}
|
||||
module_exit(nx842_powernv_exit);
|
||||
|
@ -29,6 +29,8 @@
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
|
||||
MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
|
||||
MODULE_ALIAS_CRYPTO("842");
|
||||
MODULE_ALIAS_CRYPTO("842-nx");
|
||||
|
||||
static struct nx842_constraints nx842_pseries_constraints = {
|
||||
.alignment = DDE_BUFFER_ALIGN,
|
||||
@ -99,11 +101,6 @@ struct nx842_workmem {
|
||||
#define NX842_HW_PAGE_SIZE (4096)
|
||||
#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
|
||||
|
||||
enum nx842_status {
|
||||
UNAVAILABLE,
|
||||
AVAILABLE
|
||||
};
|
||||
|
||||
struct ibm_nx842_counters {
|
||||
atomic64_t comp_complete;
|
||||
atomic64_t comp_failed;
|
||||
@ -121,7 +118,6 @@ static struct nx842_devdata {
|
||||
unsigned int max_sg_len;
|
||||
unsigned int max_sync_size;
|
||||
unsigned int max_sync_sg;
|
||||
enum nx842_status status;
|
||||
} __rcu *devdata;
|
||||
static DEFINE_SPINLOCK(devdata_mutex);
|
||||
|
||||
@ -230,9 +226,12 @@ static int nx842_validate_result(struct device *dev,
|
||||
switch (csb->completion_code) {
|
||||
case 0: /* Completed without error */
|
||||
break;
|
||||
case 64: /* Target bytes > Source bytes during compression */
|
||||
case 64: /* Compression ok, but output larger than input */
|
||||
dev_dbg(dev, "%s: output size larger than input size\n",
|
||||
__func__);
|
||||
break;
|
||||
case 13: /* Output buffer too small */
|
||||
dev_dbg(dev, "%s: Compression output larger than input\n",
|
||||
dev_dbg(dev, "%s: Out of space in output buffer\n",
|
||||
__func__);
|
||||
return -ENOSPC;
|
||||
case 66: /* Input data contains an illegal template field */
|
||||
@ -537,41 +536,36 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
|
||||
devdata->max_sync_size = 0;
|
||||
devdata->max_sync_sg = 0;
|
||||
devdata->max_sg_len = 0;
|
||||
devdata->status = UNAVAILABLE;
|
||||
return 0;
|
||||
} else
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* nx842_OF_upd_status -- Update the device info from OF status prop
|
||||
* nx842_OF_upd_status -- Check the device info from OF status prop
|
||||
*
|
||||
* The status property indicates if the accelerator is enabled. If the
|
||||
* device is in the OF tree it indicates that the hardware is present.
|
||||
* The status field indicates if the device is enabled when the status
|
||||
* is 'okay'. Otherwise the device driver will be disabled.
|
||||
*
|
||||
* @devdata - struct nx842_devdata to update
|
||||
* @prop - struct property point containing the maxsyncop for the update
|
||||
*
|
||||
* Returns:
|
||||
* 0 - Device is available
|
||||
* -EINVAL - Device is not available
|
||||
* -ENODEV - Device is not available
|
||||
*/
|
||||
static int nx842_OF_upd_status(struct nx842_devdata *devdata,
|
||||
struct property *prop) {
|
||||
int ret = 0;
|
||||
static int nx842_OF_upd_status(struct property *prop)
|
||||
{
|
||||
const char *status = (const char *)prop->value;
|
||||
|
||||
if (!strncmp(status, "okay", (size_t)prop->length)) {
|
||||
devdata->status = AVAILABLE;
|
||||
} else {
|
||||
dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
|
||||
__func__, status);
|
||||
devdata->status = UNAVAILABLE;
|
||||
}
|
||||
if (!strncmp(status, "okay", (size_t)prop->length))
|
||||
return 0;
|
||||
if (!strncmp(status, "disabled", (size_t)prop->length))
|
||||
return -ENODEV;
|
||||
dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
|
||||
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -735,6 +729,10 @@ static int nx842_OF_upd(struct property *new_prop)
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
|
||||
if (!new_devdata)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&devdata_mutex, flags);
|
||||
old_devdata = rcu_dereference_check(devdata,
|
||||
lockdep_is_held(&devdata_mutex));
|
||||
@ -744,16 +742,10 @@ static int nx842_OF_upd(struct property *new_prop)
|
||||
if (!old_devdata || !of_node) {
|
||||
pr_err("%s: device is not available\n", __func__);
|
||||
spin_unlock_irqrestore(&devdata_mutex, flags);
|
||||
kfree(new_devdata);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
|
||||
if (!new_devdata) {
|
||||
dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
|
||||
new_devdata->counters = old_devdata->counters;
|
||||
|
||||
@ -777,7 +769,7 @@ static int nx842_OF_upd(struct property *new_prop)
|
||||
goto out;
|
||||
|
||||
/* Perform property updates */
|
||||
ret = nx842_OF_upd_status(new_devdata, status);
|
||||
ret = nx842_OF_upd_status(status);
|
||||
if (ret)
|
||||
goto error_out;
|
||||
|
||||
@ -970,13 +962,43 @@ static struct nx842_driver nx842_pseries_driver = {
|
||||
.decompress = nx842_pseries_decompress,
|
||||
};
|
||||
|
||||
static int __init nx842_probe(struct vio_dev *viodev,
|
||||
const struct vio_device_id *id)
|
||||
static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
return nx842_crypto_init(tfm, &nx842_pseries_driver);
|
||||
}
|
||||
|
||||
static struct crypto_alg nx842_pseries_alg = {
|
||||
.cra_name = "842",
|
||||
.cra_driver_name = "842-nx",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||
.cra_ctxsize = sizeof(struct nx842_crypto_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = nx842_pseries_crypto_init,
|
||||
.cra_exit = nx842_crypto_exit,
|
||||
.cra_u = { .compress = {
|
||||
.coa_compress = nx842_crypto_compress,
|
||||
.coa_decompress = nx842_crypto_decompress } }
|
||||
};
|
||||
|
||||
static int nx842_probe(struct vio_dev *viodev,
|
||||
const struct vio_device_id *id)
|
||||
{
|
||||
struct nx842_devdata *old_devdata, *new_devdata = NULL;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
|
||||
if (!new_devdata)
|
||||
return -ENOMEM;
|
||||
|
||||
new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
|
||||
GFP_NOFS);
|
||||
if (!new_devdata->counters) {
|
||||
kfree(new_devdata);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&devdata_mutex, flags);
|
||||
old_devdata = rcu_dereference_check(devdata,
|
||||
lockdep_is_held(&devdata_mutex));
|
||||
@ -989,21 +1011,6 @@ static int __init nx842_probe(struct vio_dev *viodev,
|
||||
|
||||
dev_set_drvdata(&viodev->dev, NULL);
|
||||
|
||||
new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
|
||||
if (!new_devdata) {
|
||||
dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
|
||||
GFP_NOFS);
|
||||
if (!new_devdata->counters) {
|
||||
dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
new_devdata->vdev = viodev;
|
||||
new_devdata->dev = &viodev->dev;
|
||||
nx842_OF_set_defaults(new_devdata);
|
||||
@ -1016,9 +1023,12 @@ static int __init nx842_probe(struct vio_dev *viodev,
|
||||
of_reconfig_notifier_register(&nx842_of_nb);
|
||||
|
||||
ret = nx842_OF_upd(NULL);
|
||||
if (ret && ret != -ENODEV) {
|
||||
dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
|
||||
ret = -1;
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = crypto_register_alg(&nx842_pseries_alg);
|
||||
if (ret) {
|
||||
dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -1043,7 +1053,7 @@ static int __init nx842_probe(struct vio_dev *viodev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __exit nx842_remove(struct vio_dev *viodev)
|
||||
static int nx842_remove(struct vio_dev *viodev)
|
||||
{
|
||||
struct nx842_devdata *old_devdata;
|
||||
unsigned long flags;
|
||||
@ -1051,6 +1061,8 @@ static int __exit nx842_remove(struct vio_dev *viodev)
|
||||
pr_info("Removing IBM Power 842 compression device\n");
|
||||
sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
|
||||
|
||||
crypto_unregister_alg(&nx842_pseries_alg);
|
||||
|
||||
spin_lock_irqsave(&devdata_mutex, flags);
|
||||
old_devdata = rcu_dereference_check(devdata,
|
||||
lockdep_is_held(&devdata_mutex));
|
||||
@ -1074,18 +1086,16 @@ static struct vio_device_id nx842_vio_driver_ids[] = {
|
||||
static struct vio_driver nx842_vio_driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.probe = nx842_probe,
|
||||
.remove = __exit_p(nx842_remove),
|
||||
.remove = nx842_remove,
|
||||
.get_desired_dma = nx842_get_desired_dma,
|
||||
.id_table = nx842_vio_driver_ids,
|
||||
};
|
||||
|
||||
static int __init nx842_init(void)
|
||||
static int __init nx842_pseries_init(void)
|
||||
{
|
||||
struct nx842_devdata *new_devdata;
|
||||
int ret;
|
||||
|
||||
pr_info("Registering IBM Power 842 compression driver\n");
|
||||
|
||||
if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
|
||||
return -ENODEV;
|
||||
|
||||
@ -1095,7 +1105,6 @@ static int __init nx842_init(void)
|
||||
pr_err("Could not allocate memory for device data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
new_devdata->status = UNAVAILABLE;
|
||||
RCU_INIT_POINTER(devdata, new_devdata);
|
||||
|
||||
ret = vio_register_driver(&nx842_vio_driver);
|
||||
@ -1106,24 +1115,18 @@ static int __init nx842_init(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!nx842_platform_driver_set(&nx842_pseries_driver)) {
|
||||
vio_unregister_driver(&nx842_vio_driver);
|
||||
kfree(new_devdata);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(nx842_init);
|
||||
module_init(nx842_pseries_init);
|
||||
|
||||
static void __exit nx842_exit(void)
|
||||
static void __exit nx842_pseries_exit(void)
|
||||
{
|
||||
struct nx842_devdata *old_devdata;
|
||||
unsigned long flags;
|
||||
|
||||
pr_info("Exiting IBM Power 842 compression driver\n");
|
||||
nx842_platform_driver_unset(&nx842_pseries_driver);
|
||||
crypto_unregister_alg(&nx842_pseries_alg);
|
||||
|
||||
spin_lock_irqsave(&devdata_mutex, flags);
|
||||
old_devdata = rcu_dereference_check(devdata,
|
||||
lockdep_is_held(&devdata_mutex));
|
||||
@ -1136,5 +1139,5 @@ static void __exit nx842_exit(void)
|
||||
vio_unregister_driver(&nx842_vio_driver);
|
||||
}
|
||||
|
||||
module_exit(nx842_exit);
|
||||
module_exit(nx842_pseries_exit);
|
||||
|
||||
|
@ -1,10 +1,5 @@
|
||||
/*
|
||||
* Driver frontend for IBM Power 842 compression accelerator
|
||||
*
|
||||
* Copyright (C) 2015 Dan Streetman, IBM Corp
|
||||
*
|
||||
* Designer of the Power data compression engine:
|
||||
* Bulent Abali <abali@us.ibm.com>
|
||||
* Cryptographic API for the NX-842 hardware compression.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -15,89 +10,522 @@
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2011-2015
|
||||
*
|
||||
* Designer of the Power data compression engine:
|
||||
* Bulent Abali <abali@us.ibm.com>
|
||||
*
|
||||
* Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
|
||||
* Seth Jennings <sjenning@linux.vnet.ibm.com>
|
||||
*
|
||||
* Rewrite: Dan Streetman <ddstreet@ieee.org>
|
||||
*
|
||||
* This is an interface to the NX-842 compression hardware in PowerPC
|
||||
* processors. Most of the complexity of this drvier is due to the fact that
|
||||
* the NX-842 compression hardware requires the input and output data buffers
|
||||
* to be specifically aligned, to be a specific multiple in length, and within
|
||||
* specific minimum and maximum lengths. Those restrictions, provided by the
|
||||
* nx-842 driver via nx842_constraints, mean this driver must use bounce
|
||||
* buffers and headers to correct misaligned in or out buffers, and to split
|
||||
* input buffers that are too large.
|
||||
*
|
||||
* This driver will fall back to software decompression if the hardware
|
||||
* decompression fails, so this driver's decompression should never fail as
|
||||
* long as the provided compressed buffer is valid. Any compressed buffer
|
||||
* created by this driver will have a header (except ones where the input
|
||||
* perfectly matches the constraints); so users of this driver cannot simply
|
||||
* pass a compressed buffer created by this driver over to the 842 software
|
||||
* decompression library. Instead, users must use this driver to decompress;
|
||||
* if the hardware fails or is unavailable, the compressed buffer will be
|
||||
* parsed and the header removed, and the raw 842 buffer(s) passed to the 842
|
||||
* software decompression library.
|
||||
*
|
||||
* This does not fall back to software compression, however, since the caller
|
||||
* of this function is specifically requesting hardware compression; if the
|
||||
* hardware compression fails, the caller can fall back to software
|
||||
* compression, and the raw 842 compressed buffer that the software compressor
|
||||
* creates can be passed to this driver for hardware decompression; any
|
||||
* buffer without our specific header magic is assumed to be a raw 842 buffer
|
||||
* and passed directly to the hardware. Note that the software compression
|
||||
* library will produce a compressed buffer that is incompatible with the
|
||||
* hardware decompressor if the original input buffer length is not a multiple
|
||||
* of 8; if such a compressed buffer is passed to this driver for
|
||||
* decompression, the hardware will reject it and this driver will then pass
|
||||
* it over to the software library for decompression.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sw842.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "nx-842.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
|
||||
MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
|
||||
|
||||
/**
|
||||
* nx842_constraints
|
||||
*
|
||||
* This provides the driver's constraints. Different nx842 implementations
|
||||
* may have varying requirements. The constraints are:
|
||||
* @alignment: All buffers should be aligned to this
|
||||
* @multiple: All buffer lengths should be a multiple of this
|
||||
* @minimum: Buffer lengths must not be less than this amount
|
||||
* @maximum: Buffer lengths must not be more than this amount
|
||||
*
|
||||
* The constraints apply to all buffers and lengths, both input and output,
|
||||
* for both compression and decompression, except for the minimum which
|
||||
* only applies to compression input and decompression output; the
|
||||
* compressed data can be less than the minimum constraint. It can be
|
||||
* assumed that compressed data will always adhere to the multiple
|
||||
* constraint.
|
||||
*
|
||||
* The driver may succeed even if these constraints are violated;
|
||||
* however the driver can return failure or suffer reduced performance
|
||||
* if any constraint is not met.
|
||||
/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
|
||||
* template (see lib/842/842.h), so this magic number will never appear at
|
||||
* the start of a raw 842 compressed buffer. That is important, as any buffer
|
||||
* passed to us without this magic is assumed to be a raw 842 compressed
|
||||
* buffer, and passed directly to the hardware to decompress.
|
||||
*/
|
||||
int nx842_constraints(struct nx842_constraints *c)
|
||||
#define NX842_CRYPTO_MAGIC (0xf842)
|
||||
#define NX842_CRYPTO_HEADER_SIZE(g) \
|
||||
(sizeof(struct nx842_crypto_header) + \
|
||||
sizeof(struct nx842_crypto_header_group) * (g))
|
||||
#define NX842_CRYPTO_HEADER_MAX_SIZE \
|
||||
NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
|
||||
|
||||
/* bounce buffer size */
|
||||
#define BOUNCE_BUFFER_ORDER (2)
|
||||
#define BOUNCE_BUFFER_SIZE \
|
||||
((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
|
||||
|
||||
/* try longer on comp because we can fallback to sw decomp if hw is busy */
|
||||
#define COMP_BUSY_TIMEOUT (250) /* ms */
|
||||
#define DECOMP_BUSY_TIMEOUT (50) /* ms */
|
||||
|
||||
struct nx842_crypto_param {
|
||||
u8 *in;
|
||||
unsigned int iremain;
|
||||
u8 *out;
|
||||
unsigned int oremain;
|
||||
unsigned int ototal;
|
||||
};
|
||||
|
||||
static int update_param(struct nx842_crypto_param *p,
|
||||
unsigned int slen, unsigned int dlen)
|
||||
{
|
||||
memcpy(c, nx842_platform_driver()->constraints, sizeof(*c));
|
||||
if (p->iremain < slen)
|
||||
return -EOVERFLOW;
|
||||
if (p->oremain < dlen)
|
||||
return -ENOSPC;
|
||||
|
||||
p->in += slen;
|
||||
p->iremain -= slen;
|
||||
p->out += dlen;
|
||||
p->oremain -= dlen;
|
||||
p->ototal += dlen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_constraints);
|
||||
|
||||
/**
|
||||
* nx842_workmem_size
|
||||
*
|
||||
* Get the amount of working memory the driver requires.
|
||||
*/
|
||||
size_t nx842_workmem_size(void)
|
||||
int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
|
||||
{
|
||||
return nx842_platform_driver()->workmem_size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_workmem_size);
|
||||
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
int nx842_compress(const unsigned char *in, unsigned int ilen,
|
||||
unsigned char *out, unsigned int *olen, void *wmem)
|
||||
{
|
||||
return nx842_platform_driver()->compress(in, ilen, out, olen, wmem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_compress);
|
||||
|
||||
int nx842_decompress(const unsigned char *in, unsigned int ilen,
|
||||
unsigned char *out, unsigned int *olen, void *wmem)
|
||||
{
|
||||
return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_decompress);
|
||||
|
||||
static __init int nx842_init(void)
|
||||
{
|
||||
request_module("nx-compress-powernv");
|
||||
request_module("nx-compress-pseries");
|
||||
|
||||
/* we prevent loading if there's no platform driver, and we get the
|
||||
* module that set it so it won't unload, so we don't need to check
|
||||
* if it's set in any of the above functions
|
||||
*/
|
||||
if (!nx842_platform_driver_get()) {
|
||||
pr_err("no nx842 driver found.\n");
|
||||
return -ENODEV;
|
||||
spin_lock_init(&ctx->lock);
|
||||
ctx->driver = driver;
|
||||
ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
|
||||
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
|
||||
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
|
||||
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
|
||||
kfree(ctx->wmem);
|
||||
free_page((unsigned long)ctx->sbounce);
|
||||
free_page((unsigned long)ctx->dbounce);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(nx842_init);
|
||||
EXPORT_SYMBOL_GPL(nx842_crypto_init);
|
||||
|
||||
static void __exit nx842_exit(void)
|
||||
void nx842_crypto_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
nx842_platform_driver_put();
|
||||
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
kfree(ctx->wmem);
|
||||
free_page((unsigned long)ctx->sbounce);
|
||||
free_page((unsigned long)ctx->dbounce);
|
||||
}
|
||||
module_exit(nx842_exit);
|
||||
EXPORT_SYMBOL_GPL(nx842_crypto_exit);
|
||||
|
||||
static void check_constraints(struct nx842_constraints *c)
|
||||
{
|
||||
/* limit maximum, to always have enough bounce buffer to decompress */
|
||||
if (c->maximum > BOUNCE_BUFFER_SIZE)
|
||||
c->maximum = BOUNCE_BUFFER_SIZE;
|
||||
}
|
||||
|
||||
static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
|
||||
{
|
||||
int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
|
||||
|
||||
/* compress should have added space for header */
|
||||
if (s > be16_to_cpu(hdr->group[0].padding)) {
|
||||
pr_err("Internal error: no space for header\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(buf, hdr, s);
|
||||
|
||||
print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compress(struct nx842_crypto_ctx *ctx,
|
||||
struct nx842_crypto_param *p,
|
||||
struct nx842_crypto_header_group *g,
|
||||
struct nx842_constraints *c,
|
||||
u16 *ignore,
|
||||
unsigned int hdrsize)
|
||||
{
|
||||
unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
|
||||
unsigned int adj_slen = slen;
|
||||
u8 *src = p->in, *dst = p->out;
|
||||
int ret, dskip = 0;
|
||||
ktime_t timeout;
|
||||
|
||||
if (p->iremain == 0)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (p->oremain == 0 || hdrsize + c->minimum > dlen)
|
||||
return -ENOSPC;
|
||||
|
||||
if (slen % c->multiple)
|
||||
adj_slen = round_up(slen, c->multiple);
|
||||
if (slen < c->minimum)
|
||||
adj_slen = c->minimum;
|
||||
if (slen > c->maximum)
|
||||
adj_slen = slen = c->maximum;
|
||||
if (adj_slen > slen || (u64)src % c->alignment) {
|
||||
adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
|
||||
slen = min(slen, BOUNCE_BUFFER_SIZE);
|
||||
if (adj_slen > slen)
|
||||
memset(ctx->sbounce + slen, 0, adj_slen - slen);
|
||||
memcpy(ctx->sbounce, src, slen);
|
||||
src = ctx->sbounce;
|
||||
slen = adj_slen;
|
||||
pr_debug("using comp sbounce buffer, len %x\n", slen);
|
||||
}
|
||||
|
||||
dst += hdrsize;
|
||||
dlen -= hdrsize;
|
||||
|
||||
if ((u64)dst % c->alignment) {
|
||||
dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
|
||||
dst += dskip;
|
||||
dlen -= dskip;
|
||||
}
|
||||
if (dlen % c->multiple)
|
||||
dlen = round_down(dlen, c->multiple);
|
||||
if (dlen < c->minimum) {
|
||||
nospc:
|
||||
dst = ctx->dbounce;
|
||||
dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
|
||||
dlen = round_down(dlen, c->multiple);
|
||||
dskip = 0;
|
||||
pr_debug("using comp dbounce buffer, len %x\n", dlen);
|
||||
}
|
||||
if (dlen > c->maximum)
|
||||
dlen = c->maximum;
|
||||
|
||||
tmplen = dlen;
|
||||
timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
|
||||
do {
|
||||
dlen = tmplen; /* reset dlen, if we're retrying */
|
||||
ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
|
||||
/* possibly we should reduce the slen here, instead of
|
||||
* retrying with the dbounce buffer?
|
||||
*/
|
||||
if (ret == -ENOSPC && dst != ctx->dbounce)
|
||||
goto nospc;
|
||||
} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dskip += hdrsize;
|
||||
|
||||
if (dst == ctx->dbounce)
|
||||
memcpy(p->out + dskip, dst, dlen);
|
||||
|
||||
g->padding = cpu_to_be16(dskip);
|
||||
g->compressed_length = cpu_to_be32(dlen);
|
||||
g->uncompressed_length = cpu_to_be32(slen);
|
||||
|
||||
if (p->iremain < slen) {
|
||||
*ignore = slen - p->iremain;
|
||||
slen = p->iremain;
|
||||
}
|
||||
|
||||
pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
|
||||
slen, *ignore, dlen, dskip);
|
||||
|
||||
return update_param(p, slen, dskip + dlen);
|
||||
}
|
||||
|
||||
int nx842_crypto_compress(struct crypto_tfm *tfm,
|
||||
const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen)
|
||||
{
|
||||
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct nx842_crypto_header *hdr = &ctx->header;
|
||||
struct nx842_crypto_param p;
|
||||
struct nx842_constraints c = *ctx->driver->constraints;
|
||||
unsigned int groups, hdrsize, h;
|
||||
int ret, n;
|
||||
bool add_header;
|
||||
u16 ignore = 0;
|
||||
|
||||
check_constraints(&c);
|
||||
|
||||
p.in = (u8 *)src;
|
||||
p.iremain = slen;
|
||||
p.out = dst;
|
||||
p.oremain = *dlen;
|
||||
p.ototal = 0;
|
||||
|
||||
*dlen = 0;
|
||||
|
||||
groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
|
||||
DIV_ROUND_UP(p.iremain, c.maximum));
|
||||
hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
|
||||
/* skip adding header if the buffers meet all constraints */
|
||||
add_header = (p.iremain % c.multiple ||
|
||||
p.iremain < c.minimum ||
|
||||
p.iremain > c.maximum ||
|
||||
(u64)p.in % c.alignment ||
|
||||
p.oremain % c.multiple ||
|
||||
p.oremain < c.minimum ||
|
||||
p.oremain > c.maximum ||
|
||||
(u64)p.out % c.alignment);
|
||||
|
||||
hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
|
||||
hdr->groups = 0;
|
||||
hdr->ignore = 0;
|
||||
|
||||
while (p.iremain > 0) {
|
||||
n = hdr->groups++;
|
||||
ret = -ENOSPC;
|
||||
if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
|
||||
goto unlock;
|
||||
|
||||
/* header goes before first group */
|
||||
h = !n && add_header ? hdrsize : 0;
|
||||
|
||||
if (ignore)
|
||||
pr_warn("interal error, ignore is set %x\n", ignore);
|
||||
|
||||
ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!add_header && hdr->groups > 1) {
|
||||
pr_err("Internal error: No header but multiple groups\n");
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* ignore indicates the input stream needed to be padded */
|
||||
hdr->ignore = cpu_to_be16(ignore);
|
||||
if (ignore)
|
||||
pr_debug("marked %d bytes as ignore\n", ignore);
|
||||
|
||||
if (add_header)
|
||||
ret = nx842_crypto_add_header(hdr, dst);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
*dlen = p.ototal;
|
||||
|
||||
pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_crypto_compress);
|
||||
|
||||
static int decompress(struct nx842_crypto_ctx *ctx,
|
||||
struct nx842_crypto_param *p,
|
||||
struct nx842_crypto_header_group *g,
|
||||
struct nx842_constraints *c,
|
||||
u16 ignore)
|
||||
{
|
||||
unsigned int slen = be32_to_cpu(g->compressed_length);
|
||||
unsigned int required_len = be32_to_cpu(g->uncompressed_length);
|
||||
unsigned int dlen = p->oremain, tmplen;
|
||||
unsigned int adj_slen = slen;
|
||||
u8 *src = p->in, *dst = p->out;
|
||||
u16 padding = be16_to_cpu(g->padding);
|
||||
int ret, spadding = 0, dpadding = 0;
|
||||
ktime_t timeout;
|
||||
|
||||
if (!slen || !required_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (p->iremain <= 0 || padding + slen > p->iremain)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (p->oremain <= 0 || required_len - ignore > p->oremain)
|
||||
return -ENOSPC;
|
||||
|
||||
src += padding;
|
||||
|
||||
if (slen % c->multiple)
|
||||
adj_slen = round_up(slen, c->multiple);
|
||||
if (slen < c->minimum)
|
||||
adj_slen = c->minimum;
|
||||
if (slen > c->maximum)
|
||||
goto usesw;
|
||||
if (slen < adj_slen || (u64)src % c->alignment) {
|
||||
/* we can append padding bytes because the 842 format defines
|
||||
* an "end" template (see lib/842/842_decompress.c) and will
|
||||
* ignore any bytes following it.
|
||||
*/
|
||||
if (slen < adj_slen)
|
||||
memset(ctx->sbounce + slen, 0, adj_slen - slen);
|
||||
memcpy(ctx->sbounce, src, slen);
|
||||
src = ctx->sbounce;
|
||||
spadding = adj_slen - slen;
|
||||
slen = adj_slen;
|
||||
pr_debug("using decomp sbounce buffer, len %x\n", slen);
|
||||
}
|
||||
|
||||
if (dlen % c->multiple)
|
||||
dlen = round_down(dlen, c->multiple);
|
||||
if (dlen < required_len || (u64)dst % c->alignment) {
|
||||
dst = ctx->dbounce;
|
||||
dlen = min(required_len, BOUNCE_BUFFER_SIZE);
|
||||
pr_debug("using decomp dbounce buffer, len %x\n", dlen);
|
||||
}
|
||||
if (dlen < c->minimum)
|
||||
goto usesw;
|
||||
if (dlen > c->maximum)
|
||||
dlen = c->maximum;
|
||||
|
||||
tmplen = dlen;
|
||||
timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
|
||||
do {
|
||||
dlen = tmplen; /* reset dlen, if we're retrying */
|
||||
ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
|
||||
} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
|
||||
if (ret) {
|
||||
usesw:
|
||||
/* reset everything, sw doesn't have constraints */
|
||||
src = p->in + padding;
|
||||
slen = be32_to_cpu(g->compressed_length);
|
||||
spadding = 0;
|
||||
dst = p->out;
|
||||
dlen = p->oremain;
|
||||
dpadding = 0;
|
||||
if (dlen < required_len) { /* have ignore bytes */
|
||||
dst = ctx->dbounce;
|
||||
dlen = BOUNCE_BUFFER_SIZE;
|
||||
}
|
||||
pr_info_ratelimited("using software 842 decompression\n");
|
||||
ret = sw842_decompress(src, slen, dst, &dlen);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
slen -= spadding;
|
||||
|
||||
dlen -= ignore;
|
||||
if (ignore)
|
||||
pr_debug("ignoring last %x bytes\n", ignore);
|
||||
|
||||
if (dst == ctx->dbounce)
|
||||
memcpy(p->out, dst, dlen);
|
||||
|
||||
pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
|
||||
slen, padding, dlen, ignore);
|
||||
|
||||
return update_param(p, slen + padding, dlen);
|
||||
}
|
||||
|
||||
int nx842_crypto_decompress(struct crypto_tfm *tfm,
|
||||
const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen)
|
||||
{
|
||||
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct nx842_crypto_header *hdr;
|
||||
struct nx842_crypto_param p;
|
||||
struct nx842_constraints c = *ctx->driver->constraints;
|
||||
int n, ret, hdr_len;
|
||||
u16 ignore = 0;
|
||||
|
||||
check_constraints(&c);
|
||||
|
||||
p.in = (u8 *)src;
|
||||
p.iremain = slen;
|
||||
p.out = dst;
|
||||
p.oremain = *dlen;
|
||||
p.ototal = 0;
|
||||
|
||||
*dlen = 0;
|
||||
|
||||
hdr = (struct nx842_crypto_header *)src;
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
|
||||
/* If it doesn't start with our header magic number, assume it's a raw
|
||||
* 842 compressed buffer and pass it directly to the hardware driver
|
||||
*/
|
||||
if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
|
||||
struct nx842_crypto_header_group g = {
|
||||
.padding = 0,
|
||||
.compressed_length = cpu_to_be32(p.iremain),
|
||||
.uncompressed_length = cpu_to_be32(p.oremain),
|
||||
};
|
||||
|
||||
ret = decompress(ctx, &p, &g, &c, 0);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
goto success;
|
||||
}
|
||||
|
||||
if (!hdr->groups) {
|
||||
pr_err("header has no groups\n");
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
|
||||
pr_err("header has too many groups %x, max %x\n",
|
||||
hdr->groups, NX842_CRYPTO_GROUP_MAX);
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
|
||||
if (hdr_len > slen) {
|
||||
ret = -EOVERFLOW;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
memcpy(&ctx->header, src, hdr_len);
|
||||
hdr = &ctx->header;
|
||||
|
||||
for (n = 0; n < hdr->groups; n++) {
|
||||
/* ignore applies to last group */
|
||||
if (n + 1 == hdr->groups)
|
||||
ignore = be16_to_cpu(hdr->ignore);
|
||||
|
||||
ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
success:
|
||||
*dlen = p.ototal;
|
||||
|
||||
pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
|
||||
|
||||
ret = 0;
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
|
||||
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
|
||||
|
@ -3,8 +3,9 @@
|
||||
#define __NX_842_H__
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sw842.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
@ -104,6 +105,25 @@ static inline unsigned long nx842_get_pa(void *addr)
|
||||
#define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m))
|
||||
#define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
|
||||
|
||||
/**
|
||||
* This provides the driver's constraints. Different nx842 implementations
|
||||
* may have varying requirements. The constraints are:
|
||||
* @alignment: All buffers should be aligned to this
|
||||
* @multiple: All buffer lengths should be a multiple of this
|
||||
* @minimum: Buffer lengths must not be less than this amount
|
||||
* @maximum: Buffer lengths must not be more than this amount
|
||||
*
|
||||
* The constraints apply to all buffers and lengths, both input and output,
|
||||
* for both compression and decompression, except for the minimum which
|
||||
* only applies to compression input and decompression output; the
|
||||
* compressed data can be less than the minimum constraint. It can be
|
||||
* assumed that compressed data will always adhere to the multiple
|
||||
* constraint.
|
||||
*
|
||||
* The driver may succeed even if these constraints are violated;
|
||||
* however the driver can return failure or suffer reduced performance
|
||||
* if any constraint is not met.
|
||||
*/
|
||||
struct nx842_constraints {
|
||||
int alignment;
|
||||
int multiple;
|
||||
@ -126,19 +146,40 @@ struct nx842_driver {
|
||||
void *wrkmem);
|
||||
};
|
||||
|
||||
struct nx842_driver *nx842_platform_driver(void);
|
||||
bool nx842_platform_driver_set(struct nx842_driver *driver);
|
||||
void nx842_platform_driver_unset(struct nx842_driver *driver);
|
||||
bool nx842_platform_driver_get(void);
|
||||
void nx842_platform_driver_put(void);
|
||||
struct nx842_crypto_header_group {
|
||||
__be16 padding; /* unused bytes at start of group */
|
||||
__be32 compressed_length; /* compressed bytes in group */
|
||||
__be32 uncompressed_length; /* bytes after decompression */
|
||||
} __packed;
|
||||
|
||||
size_t nx842_workmem_size(void);
|
||||
struct nx842_crypto_header {
|
||||
__be16 magic; /* NX842_CRYPTO_MAGIC */
|
||||
__be16 ignore; /* decompressed end bytes to ignore */
|
||||
u8 groups; /* total groups in this header */
|
||||
struct nx842_crypto_header_group group[];
|
||||
} __packed;
|
||||
|
||||
int nx842_constraints(struct nx842_constraints *constraints);
|
||||
#define NX842_CRYPTO_GROUP_MAX (0x20)
|
||||
|
||||
int nx842_compress(const unsigned char *in, unsigned int in_len,
|
||||
unsigned char *out, unsigned int *out_len, void *wrkmem);
|
||||
int nx842_decompress(const unsigned char *in, unsigned int in_len,
|
||||
unsigned char *out, unsigned int *out_len, void *wrkmem);
|
||||
struct nx842_crypto_ctx {
|
||||
spinlock_t lock;
|
||||
|
||||
u8 *wmem;
|
||||
u8 *sbounce, *dbounce;
|
||||
|
||||
struct nx842_crypto_header header;
|
||||
struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
|
||||
|
||||
struct nx842_driver *driver;
|
||||
};
|
||||
|
||||
int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
|
||||
void nx842_crypto_exit(struct crypto_tfm *tfm);
|
||||
int nx842_crypto_compress(struct crypto_tfm *tfm,
|
||||
const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen);
|
||||
int nx842_crypto_decompress(struct crypto_tfm *tfm,
|
||||
const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen);
|
||||
|
||||
#endif /* __NX_842_H__ */
|
||||
|
@ -94,8 +94,6 @@ static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
crypto_aead_crt(tfm)->authsize = authsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -111,8 +109,6 @@ static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
crypto_aead_crt(tfm)->authsize = authsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -174,6 +170,7 @@ static int generate_pat(u8 *iv,
|
||||
struct nx_crypto_ctx *nx_ctx,
|
||||
unsigned int authsize,
|
||||
unsigned int nbytes,
|
||||
unsigned int assoclen,
|
||||
u8 *out)
|
||||
{
|
||||
struct nx_sg *nx_insg = nx_ctx->in_sg;
|
||||
@ -200,16 +197,16 @@ static int generate_pat(u8 *iv,
|
||||
* greater than 2^32.
|
||||
*/
|
||||
|
||||
if (!req->assoclen) {
|
||||
if (!assoclen) {
|
||||
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
|
||||
} else if (req->assoclen <= 14) {
|
||||
} else if (assoclen <= 14) {
|
||||
/* if associated data is 14 bytes or less, we do 1 GCM
|
||||
* operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
|
||||
* which is fed in through the source buffers here */
|
||||
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
|
||||
b1 = nx_ctx->priv.ccm.iauth_tag;
|
||||
iauth_len = req->assoclen;
|
||||
} else if (req->assoclen <= 65280) {
|
||||
iauth_len = assoclen;
|
||||
} else if (assoclen <= 65280) {
|
||||
/* if associated data is less than (2^16 - 2^8), we construct
|
||||
* B1 differently and feed in the associated data to a CCA
|
||||
* operation */
|
||||
@ -223,7 +220,7 @@ static int generate_pat(u8 *iv,
|
||||
}
|
||||
|
||||
/* generate B0 */
|
||||
rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
|
||||
rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -233,22 +230,22 @@ static int generate_pat(u8 *iv,
|
||||
*/
|
||||
if (b1) {
|
||||
memset(b1, 0, 16);
|
||||
if (req->assoclen <= 65280) {
|
||||
*(u16 *)b1 = (u16)req->assoclen;
|
||||
scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
|
||||
if (assoclen <= 65280) {
|
||||
*(u16 *)b1 = assoclen;
|
||||
scatterwalk_map_and_copy(b1 + 2, req->src, 0,
|
||||
iauth_len, SCATTERWALK_FROM_SG);
|
||||
} else {
|
||||
*(u16 *)b1 = (u16)(0xfffe);
|
||||
*(u32 *)&b1[2] = (u32)req->assoclen;
|
||||
scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
|
||||
*(u32 *)&b1[2] = assoclen;
|
||||
scatterwalk_map_and_copy(b1 + 6, req->src, 0,
|
||||
iauth_len, SCATTERWALK_FROM_SG);
|
||||
}
|
||||
}
|
||||
|
||||
/* now copy any remaining AAD to scatterlist and call nx... */
|
||||
if (!req->assoclen) {
|
||||
if (!assoclen) {
|
||||
return rc;
|
||||
} else if (req->assoclen <= 14) {
|
||||
} else if (assoclen <= 14) {
|
||||
unsigned int len = 16;
|
||||
|
||||
nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
|
||||
@ -280,7 +277,7 @@ static int generate_pat(u8 *iv,
|
||||
return rc;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
|
||||
|
||||
} else {
|
||||
unsigned int processed = 0, to_process;
|
||||
@ -294,15 +291,15 @@ static int generate_pat(u8 *iv,
|
||||
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
|
||||
|
||||
do {
|
||||
to_process = min_t(u32, req->assoclen - processed,
|
||||
to_process = min_t(u32, assoclen - processed,
|
||||
nx_ctx->ap->databytelen);
|
||||
|
||||
nx_insg = nx_walk_and_build(nx_ctx->in_sg,
|
||||
nx_ctx->ap->sglen,
|
||||
req->assoc, processed,
|
||||
req->src, processed,
|
||||
&to_process);
|
||||
|
||||
if ((to_process + processed) < req->assoclen) {
|
||||
if ((to_process + processed) < assoclen) {
|
||||
NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
|
||||
NX_FDM_INTERMEDIATE;
|
||||
} else {
|
||||
@ -328,11 +325,10 @@ static int generate_pat(u8 *iv,
|
||||
NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen,
|
||||
&(nx_ctx->stats->aes_bytes));
|
||||
atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < req->assoclen);
|
||||
} while (processed < assoclen);
|
||||
|
||||
result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
|
||||
}
|
||||
@ -343,7 +339,8 @@ static int generate_pat(u8 *iv,
|
||||
}
|
||||
|
||||
static int ccm_nx_decrypt(struct aead_request *req,
|
||||
struct blkcipher_desc *desc)
|
||||
struct blkcipher_desc *desc,
|
||||
unsigned int assoclen)
|
||||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
@ -360,10 +357,10 @@ static int ccm_nx_decrypt(struct aead_request *req,
|
||||
|
||||
/* copy out the auth tag to compare with later */
|
||||
scatterwalk_map_and_copy(priv->oauth_tag,
|
||||
req->src, nbytes, authsize,
|
||||
req->src, nbytes + req->assoclen, authsize,
|
||||
SCATTERWALK_FROM_SG);
|
||||
|
||||
rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
|
||||
rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
|
||||
csbcpb->cpb.aes_ccm.in_pat_or_b0);
|
||||
if (rc)
|
||||
goto out;
|
||||
@ -383,8 +380,8 @@ static int ccm_nx_decrypt(struct aead_request *req,
|
||||
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
|
||||
&to_process, processed,
|
||||
csbcpb->cpb.aes_ccm.iv_or_ctr);
|
||||
&to_process, processed + req->assoclen,
|
||||
csbcpb->cpb.aes_ccm.iv_or_ctr);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@ -420,7 +417,8 @@ static int ccm_nx_decrypt(struct aead_request *req,
|
||||
}
|
||||
|
||||
static int ccm_nx_encrypt(struct aead_request *req,
|
||||
struct blkcipher_desc *desc)
|
||||
struct blkcipher_desc *desc,
|
||||
unsigned int assoclen)
|
||||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
@ -432,7 +430,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
|
||||
|
||||
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
|
||||
|
||||
rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
|
||||
rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
|
||||
csbcpb->cpb.aes_ccm.in_pat_or_b0);
|
||||
if (rc)
|
||||
goto out;
|
||||
@ -451,7 +449,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
|
||||
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
|
||||
&to_process, processed,
|
||||
&to_process, processed + req->assoclen,
|
||||
csbcpb->cpb.aes_ccm.iv_or_ctr);
|
||||
if (rc)
|
||||
goto out;
|
||||
@ -483,7 +481,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
|
||||
|
||||
/* copy out the auth tag */
|
||||
scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
|
||||
req->dst, nbytes, authsize,
|
||||
req->dst, nbytes + req->assoclen, authsize,
|
||||
SCATTERWALK_TO_SG);
|
||||
|
||||
out:
|
||||
@ -503,9 +501,8 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
|
||||
memcpy(iv + 4, req->iv, 8);
|
||||
|
||||
desc.info = iv;
|
||||
desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
|
||||
|
||||
return ccm_nx_encrypt(req, &desc);
|
||||
return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
|
||||
}
|
||||
|
||||
static int ccm_aes_nx_encrypt(struct aead_request *req)
|
||||
@ -514,13 +511,12 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
|
||||
int rc;
|
||||
|
||||
desc.info = req->iv;
|
||||
desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
|
||||
|
||||
rc = crypto_ccm_check_iv(desc.info);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return ccm_nx_encrypt(req, &desc);
|
||||
return ccm_nx_encrypt(req, &desc, req->assoclen);
|
||||
}
|
||||
|
||||
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
|
||||
@ -535,9 +531,8 @@ static int ccm4309_aes_nx_decrypt(struct aead_request *req)
|
||||
memcpy(iv + 4, req->iv, 8);
|
||||
|
||||
desc.info = iv;
|
||||
desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
|
||||
|
||||
return ccm_nx_decrypt(req, &desc);
|
||||
return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
|
||||
}
|
||||
|
||||
static int ccm_aes_nx_decrypt(struct aead_request *req)
|
||||
@ -546,13 +541,12 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
|
||||
int rc;
|
||||
|
||||
desc.info = req->iv;
|
||||
desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
|
||||
|
||||
rc = crypto_ccm_check_iv(desc.info);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return ccm_nx_decrypt(req, &desc);
|
||||
return ccm_nx_decrypt(req, &desc, req->assoclen);
|
||||
}
|
||||
|
||||
/* tell the block cipher walk routines that this is a stream cipher by
|
||||
@ -560,47 +554,42 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
|
||||
* during encrypt/decrypt doesn't solve this problem, because it calls
|
||||
* blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
|
||||
* but instead uses this tfm->blocksize. */
|
||||
struct crypto_alg nx_ccm_aes_alg = {
|
||||
.cra_name = "ccm(aes)",
|
||||
.cra_driver_name = "ccm-aes-nx",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = nx_crypto_ctx_aes_ccm_init,
|
||||
.cra_exit = nx_crypto_ctx_exit,
|
||||
.cra_aead = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = ccm_aes_nx_set_key,
|
||||
.setauthsize = ccm_aes_nx_setauthsize,
|
||||
.encrypt = ccm_aes_nx_encrypt,
|
||||
.decrypt = ccm_aes_nx_decrypt,
|
||||
}
|
||||
struct aead_alg nx_ccm_aes_alg = {
|
||||
.base = {
|
||||
.cra_name = "ccm(aes)",
|
||||
.cra_driver_name = "ccm-aes-nx",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.init = nx_crypto_ctx_aes_ccm_init,
|
||||
.exit = nx_crypto_ctx_aead_exit,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = ccm_aes_nx_set_key,
|
||||
.setauthsize = ccm_aes_nx_setauthsize,
|
||||
.encrypt = ccm_aes_nx_encrypt,
|
||||
.decrypt = ccm_aes_nx_decrypt,
|
||||
};
|
||||
|
||||
struct crypto_alg nx_ccm4309_aes_alg = {
|
||||
.cra_name = "rfc4309(ccm(aes))",
|
||||
.cra_driver_name = "rfc4309-ccm-aes-nx",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
|
||||
.cra_type = &crypto_nivaead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = nx_crypto_ctx_aes_ccm_init,
|
||||
.cra_exit = nx_crypto_ctx_exit,
|
||||
.cra_aead = {
|
||||
.ivsize = 8,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = ccm4309_aes_nx_set_key,
|
||||
.setauthsize = ccm4309_aes_nx_setauthsize,
|
||||
.encrypt = ccm4309_aes_nx_encrypt,
|
||||
.decrypt = ccm4309_aes_nx_decrypt,
|
||||
.geniv = "seqiv",
|
||||
}
|
||||
struct aead_alg nx_ccm4309_aes_alg = {
|
||||
.base = {
|
||||
.cra_name = "rfc4309(ccm(aes))",
|
||||
.cra_driver_name = "rfc4309-ccm-aes-nx",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.init = nx_crypto_ctx_aes_ccm_init,
|
||||
.exit = nx_crypto_ctx_aead_exit,
|
||||
.ivsize = 8,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
.setkey = ccm4309_aes_nx_set_key,
|
||||
.setauthsize = ccm4309_aes_nx_setauthsize,
|
||||
.encrypt = ccm4309_aes_nx_encrypt,
|
||||
.decrypt = ccm4309_aes_nx_decrypt,
|
||||
};
|
||||
|
@ -144,27 +144,6 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
|
||||
return ctr_aes_nx_crypt(desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
struct crypto_alg nx_ctr_aes_alg = {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-nx",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = nx_crypto_ctx_aes_ctr_init,
|
||||
.cra_exit = nx_crypto_ctx_exit,
|
||||
.cra_blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ctr_aes_nx_set_key,
|
||||
.encrypt = ctr_aes_nx_crypt,
|
||||
.decrypt = ctr_aes_nx_crypt,
|
||||
}
|
||||
};
|
||||
|
||||
struct crypto_alg nx_ctr3686_aes_alg = {
|
||||
.cra_name = "rfc3686(ctr(aes))",
|
||||
.cra_driver_name = "rfc3686-ctr-aes-nx",
|
||||
|
@ -21,11 +21,9 @@
|
||||
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <asm/vio.h>
|
||||
|
||||
#include "nx_csbcpb.h"
|
||||
@ -36,7 +34,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
|
||||
const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
|
||||
|
||||
@ -75,7 +73,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
|
||||
const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
|
||||
char *nonce = nx_ctx->priv.gcm.nonce;
|
||||
int rc;
|
||||
|
||||
@ -110,13 +108,14 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
|
||||
|
||||
static int nx_gca(struct nx_crypto_ctx *nx_ctx,
|
||||
struct aead_request *req,
|
||||
u8 *out)
|
||||
u8 *out,
|
||||
unsigned int assoclen)
|
||||
{
|
||||
int rc;
|
||||
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
|
||||
struct scatter_walk walk;
|
||||
struct nx_sg *nx_sg = nx_ctx->in_sg;
|
||||
unsigned int nbytes = req->assoclen;
|
||||
unsigned int nbytes = assoclen;
|
||||
unsigned int processed = 0, to_process;
|
||||
unsigned int max_sg_len;
|
||||
|
||||
@ -167,7 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
|
||||
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
@ -177,13 +176,15 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
|
||||
static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
|
||||
unsigned int assoclen)
|
||||
{
|
||||
int rc;
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_crypto_ctx *nx_ctx =
|
||||
crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
struct nx_sg *nx_sg;
|
||||
unsigned int nbytes = req->assoclen;
|
||||
unsigned int nbytes = assoclen;
|
||||
unsigned int processed = 0, to_process;
|
||||
unsigned int max_sg_len;
|
||||
|
||||
@ -238,7 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
|
||||
|
||||
processed += to_process;
|
||||
} while (processed < nbytes);
|
||||
@ -253,7 +254,8 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
|
||||
int enc)
|
||||
{
|
||||
int rc;
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_crypto_ctx *nx_ctx =
|
||||
crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
char out[AES_BLOCK_SIZE];
|
||||
struct nx_sg *in_sg, *out_sg;
|
||||
@ -314,9 +316,11 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
|
||||
static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
|
||||
unsigned int assoclen)
|
||||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_crypto_ctx *nx_ctx =
|
||||
crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
|
||||
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
|
||||
struct blkcipher_desc desc;
|
||||
@ -332,10 +336,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
|
||||
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
|
||||
|
||||
if (nbytes == 0) {
|
||||
if (req->assoclen == 0)
|
||||
if (assoclen == 0)
|
||||
rc = gcm_empty(req, &desc, enc);
|
||||
else
|
||||
rc = gmac(req, &desc);
|
||||
rc = gmac(req, &desc, assoclen);
|
||||
if (rc)
|
||||
goto out;
|
||||
else
|
||||
@ -343,9 +347,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
|
||||
}
|
||||
|
||||
/* Process associated data */
|
||||
csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
|
||||
if (req->assoclen) {
|
||||
rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
|
||||
csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
|
||||
if (assoclen) {
|
||||
rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
|
||||
assoclen);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
@ -363,7 +368,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
|
||||
to_process = nbytes - processed;
|
||||
|
||||
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
|
||||
desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
|
||||
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
|
||||
req->src, &to_process,
|
||||
processed + req->assoclen,
|
||||
@ -430,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
|
||||
|
||||
memcpy(iv, req->iv, 12);
|
||||
|
||||
return gcm_aes_nx_crypt(req, 1);
|
||||
return gcm_aes_nx_crypt(req, 1, req->assoclen);
|
||||
}
|
||||
|
||||
static int gcm_aes_nx_decrypt(struct aead_request *req)
|
||||
@ -440,12 +444,13 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
|
||||
|
||||
memcpy(iv, req->iv, 12);
|
||||
|
||||
return gcm_aes_nx_crypt(req, 0);
|
||||
return gcm_aes_nx_crypt(req, 0, req->assoclen);
|
||||
}
|
||||
|
||||
static int gcm4106_aes_nx_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_crypto_ctx *nx_ctx =
|
||||
crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
|
||||
char *iv = rctx->iv;
|
||||
char *nonce = nx_ctx->priv.gcm.nonce;
|
||||
@ -453,12 +458,16 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
|
||||
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
|
||||
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
|
||||
|
||||
return gcm_aes_nx_crypt(req, 1);
|
||||
if (req->assoclen < 8)
|
||||
return -EINVAL;
|
||||
|
||||
return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
|
||||
}
|
||||
|
||||
static int gcm4106_aes_nx_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct nx_crypto_ctx *nx_ctx =
|
||||
crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
|
||||
char *iv = rctx->iv;
|
||||
char *nonce = nx_ctx->priv.gcm.nonce;
|
||||
@ -466,7 +475,10 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
|
||||
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
|
||||
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
|
||||
|
||||
return gcm_aes_nx_crypt(req, 0);
|
||||
if (req->assoclen < 8)
|
||||
return -EINVAL;
|
||||
|
||||
return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
|
||||
}
|
||||
|
||||
/* tell the block cipher walk routines that this is a stream cipher by
|
||||
|
@ -596,13 +596,9 @@ static int nx_register_algs(void)
|
||||
if (rc)
|
||||
goto out_unreg_ecb;
|
||||
|
||||
rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
|
||||
if (rc)
|
||||
goto out_unreg_cbc;
|
||||
|
||||
rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
|
||||
if (rc)
|
||||
goto out_unreg_ctr;
|
||||
goto out_unreg_cbc;
|
||||
|
||||
rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
|
||||
if (rc)
|
||||
@ -612,11 +608,11 @@ static int nx_register_algs(void)
|
||||
if (rc)
|
||||
goto out_unreg_gcm;
|
||||
|
||||
rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
if (rc)
|
||||
goto out_unreg_gcm4106;
|
||||
|
||||
rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
if (rc)
|
||||
goto out_unreg_ccm;
|
||||
|
||||
@ -644,17 +640,15 @@ static int nx_register_algs(void)
|
||||
nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
|
||||
NX_PROPS_SHA256);
|
||||
out_unreg_ccm4309:
|
||||
nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
out_unreg_ccm:
|
||||
nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
out_unreg_gcm4106:
|
||||
nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
|
||||
out_unreg_gcm:
|
||||
nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
|
||||
out_unreg_ctr3686:
|
||||
nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
|
||||
out_unreg_ctr:
|
||||
nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
|
||||
out_unreg_cbc:
|
||||
nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
|
||||
out_unreg_ecb:
|
||||
@ -711,11 +705,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
|
||||
}
|
||||
|
||||
/* entry points from the crypto tfm initializers */
|
||||
int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
|
||||
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
|
||||
{
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
sizeof(struct nx_ccm_rctx));
|
||||
return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
|
||||
crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
|
||||
return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
|
||||
NX_MODE_AES_CCM);
|
||||
}
|
||||
|
||||
@ -813,16 +806,15 @@ static int nx_remove(struct vio_dev *viodev)
|
||||
NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
|
||||
nx_unregister_shash(&nx_shash_sha256_alg,
|
||||
NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
|
||||
nx_unregister_alg(&nx_ccm4309_aes_alg,
|
||||
NX_FC_AES, NX_MODE_AES_CCM);
|
||||
nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
nx_unregister_aead(&nx_ccm4309_aes_alg,
|
||||
NX_FC_AES, NX_MODE_AES_CCM);
|
||||
nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
|
||||
nx_unregister_aead(&nx_gcm4106_aes_alg,
|
||||
NX_FC_AES, NX_MODE_AES_GCM);
|
||||
nx_unregister_aead(&nx_gcm_aes_alg,
|
||||
NX_FC_AES, NX_MODE_AES_GCM);
|
||||
nx_unregister_alg(&nx_ctr3686_aes_alg,
|
||||
NX_FC_AES, NX_MODE_AES_CTR);
|
||||
nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
|
||||
nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
|
||||
nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
|
||||
}
|
||||
|
@ -149,8 +149,10 @@ struct nx_crypto_ctx {
|
||||
} priv;
|
||||
};
|
||||
|
||||
struct crypto_aead;
|
||||
|
||||
/* prototypes */
|
||||
int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
|
||||
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
|
||||
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
|
||||
int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
|
||||
int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
|
||||
@ -187,10 +189,9 @@ extern struct crypto_alg nx_cbc_aes_alg;
|
||||
extern struct crypto_alg nx_ecb_aes_alg;
|
||||
extern struct aead_alg nx_gcm_aes_alg;
|
||||
extern struct aead_alg nx_gcm4106_aes_alg;
|
||||
extern struct crypto_alg nx_ctr_aes_alg;
|
||||
extern struct crypto_alg nx_ctr3686_aes_alg;
|
||||
extern struct crypto_alg nx_ccm_aes_alg;
|
||||
extern struct crypto_alg nx_ccm4309_aes_alg;
|
||||
extern struct aead_alg nx_ccm_aes_alg;
|
||||
extern struct aead_alg nx_ccm4309_aes_alg;
|
||||
extern struct shash_alg nx_shash_aes_xcbc_alg;
|
||||
extern struct shash_alg nx_shash_sha512_alg;
|
||||
extern struct shash_alg nx_shash_sha256_alg;
|
||||
|
@ -52,29 +52,30 @@
|
||||
#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
|
||||
|
||||
#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
|
||||
#define AES_REG_CTRL_CTR (1 << 6)
|
||||
#define AES_REG_CTRL_CBC (1 << 5)
|
||||
#define AES_REG_CTRL_KEY_SIZE (3 << 3)
|
||||
#define AES_REG_CTRL_DIRECTION (1 << 2)
|
||||
#define AES_REG_CTRL_INPUT_READY (1 << 1)
|
||||
#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_32 0
|
||||
#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
|
||||
#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
|
||||
#define AES_REG_CTRL_CTR BIT(6)
|
||||
#define AES_REG_CTRL_CBC BIT(5)
|
||||
#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
|
||||
#define AES_REG_CTRL_DIRECTION BIT(2)
|
||||
#define AES_REG_CTRL_INPUT_READY BIT(1)
|
||||
#define AES_REG_CTRL_OUTPUT_READY BIT(0)
|
||||
#define AES_REG_CTRL_MASK GENMASK(24, 2)
|
||||
|
||||
#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
|
||||
|
||||
#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
|
||||
|
||||
#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
|
||||
#define AES_REG_MASK_SIDLE (1 << 6)
|
||||
#define AES_REG_MASK_START (1 << 5)
|
||||
#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
|
||||
#define AES_REG_MASK_DMA_IN_EN (1 << 2)
|
||||
#define AES_REG_MASK_SOFTRESET (1 << 1)
|
||||
#define AES_REG_AUTOIDLE (1 << 0)
|
||||
#define AES_REG_MASK_SIDLE BIT(6)
|
||||
#define AES_REG_MASK_START BIT(5)
|
||||
#define AES_REG_MASK_DMA_OUT_EN BIT(3)
|
||||
#define AES_REG_MASK_DMA_IN_EN BIT(2)
|
||||
#define AES_REG_MASK_SOFTRESET BIT(1)
|
||||
#define AES_REG_AUTOIDLE BIT(0)
|
||||
|
||||
#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
|
||||
|
||||
@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
|
||||
{
|
||||
unsigned int key32;
|
||||
int i, err;
|
||||
u32 val, mask = 0;
|
||||
u32 val;
|
||||
|
||||
err = omap_aes_hw_init(dd);
|
||||
if (err)
|
||||
@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
|
||||
val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
|
||||
if (dd->flags & FLAGS_CBC)
|
||||
val |= AES_REG_CTRL_CBC;
|
||||
if (dd->flags & FLAGS_CTR) {
|
||||
if (dd->flags & FLAGS_CTR)
|
||||
val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
|
||||
mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
|
||||
}
|
||||
|
||||
if (dd->flags & FLAGS_ENCRYPT)
|
||||
val |= AES_REG_CTRL_DIRECTION;
|
||||
|
||||
mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
|
||||
AES_REG_CTRL_KEY_SIZE;
|
||||
|
||||
omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
|
||||
omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -558,6 +555,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
|
||||
{
|
||||
int len = 0;
|
||||
|
||||
if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
while (sg) {
|
||||
if (!IS_ALIGNED(sg->offset, 4))
|
||||
return -1;
|
||||
@ -577,9 +577,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
|
||||
static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
|
||||
{
|
||||
void *buf_in, *buf_out;
|
||||
int pages;
|
||||
int pages, total;
|
||||
|
||||
pages = get_order(dd->total);
|
||||
total = ALIGN(dd->total, AES_BLOCK_SIZE);
|
||||
pages = get_order(total);
|
||||
|
||||
buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
|
||||
buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
|
||||
@ -594,11 +595,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
|
||||
sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
|
||||
|
||||
sg_init_table(&dd->in_sgl, 1);
|
||||
sg_set_buf(&dd->in_sgl, buf_in, dd->total);
|
||||
sg_set_buf(&dd->in_sgl, buf_in, total);
|
||||
dd->in_sg = &dd->in_sgl;
|
||||
|
||||
sg_init_table(&dd->out_sgl, 1);
|
||||
sg_set_buf(&dd->out_sgl, buf_out, dd->total);
|
||||
sg_set_buf(&dd->out_sgl, buf_out, total);
|
||||
dd->out_sg = &dd->out_sgl;
|
||||
|
||||
return 0;
|
||||
@ -611,7 +612,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
|
||||
struct omap_aes_ctx *ctx;
|
||||
struct omap_aes_reqctx *rctx;
|
||||
unsigned long flags;
|
||||
int err, ret = 0;
|
||||
int err, ret = 0, len;
|
||||
|
||||
spin_lock_irqsave(&dd->lock, flags);
|
||||
if (req)
|
||||
@ -650,8 +651,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
|
||||
dd->sgs_copied = 0;
|
||||
}
|
||||
|
||||
dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
|
||||
dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
|
||||
len = ALIGN(dd->total, AES_BLOCK_SIZE);
|
||||
dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
|
||||
dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
|
||||
BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
|
||||
|
||||
rctx = ablkcipher_request_ctx(req);
|
||||
@ -678,7 +680,7 @@ static void omap_aes_done_task(unsigned long data)
|
||||
{
|
||||
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
|
||||
void *buf_in, *buf_out;
|
||||
int pages;
|
||||
int pages, len;
|
||||
|
||||
pr_debug("enter done_task\n");
|
||||
|
||||
@ -697,7 +699,8 @@ static void omap_aes_done_task(unsigned long data)
|
||||
|
||||
sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
|
||||
|
||||
pages = get_order(dd->total_save);
|
||||
len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
|
||||
pages = get_order(len);
|
||||
free_pages((unsigned long)buf_in, pages);
|
||||
free_pages((unsigned long)buf_out, pages);
|
||||
}
|
||||
@ -726,11 +729,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
|
||||
!!(mode & FLAGS_ENCRYPT),
|
||||
!!(mode & FLAGS_CBC));
|
||||
|
||||
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
|
||||
pr_err("request size is not exact amount of AES blocks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dd = omap_aes_find_dev(ctx);
|
||||
if (!dd)
|
||||
return -ENODEV;
|
||||
@ -833,7 +831,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
|
||||
{
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-omap",
|
||||
.cra_priority = 100,
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC,
|
||||
@ -855,7 +853,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
|
||||
{
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-omap",
|
||||
.cra_priority = 100,
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC,
|
||||
@ -881,7 +879,7 @@ static struct crypto_alg algs_ctr[] = {
|
||||
{
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-omap",
|
||||
.cra_priority = 100,
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC,
|
||||
@ -1046,9 +1044,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
dd->total -= AES_BLOCK_SIZE;
|
||||
|
||||
BUG_ON(dd->total < 0);
|
||||
dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
|
||||
|
||||
/* Clear IRQ status */
|
||||
status &= ~AES_REG_IRQ_DATA_OUT;
|
||||
|
@ -99,11 +99,16 @@ struct spacc_req {
|
||||
dma_addr_t src_addr, dst_addr;
|
||||
struct spacc_ddt *src_ddt, *dst_ddt;
|
||||
void (*complete)(struct spacc_req *req);
|
||||
};
|
||||
|
||||
/* AEAD specific bits. */
|
||||
u8 *giv;
|
||||
size_t giv_len;
|
||||
dma_addr_t giv_pa;
|
||||
struct spacc_aead {
|
||||
unsigned long ctrl_default;
|
||||
unsigned long type;
|
||||
struct aead_alg alg;
|
||||
struct spacc_engine *engine;
|
||||
struct list_head entry;
|
||||
int key_offs;
|
||||
int iv_offs;
|
||||
};
|
||||
|
||||
struct spacc_engine {
|
||||
@ -121,6 +126,9 @@ struct spacc_engine {
|
||||
struct spacc_alg *algs;
|
||||
unsigned num_algs;
|
||||
struct list_head registered_algs;
|
||||
struct spacc_aead *aeads;
|
||||
unsigned num_aeads;
|
||||
struct list_head registered_aeads;
|
||||
size_t cipher_pg_sz;
|
||||
size_t hash_pg_sz;
|
||||
const char *name;
|
||||
@ -174,8 +182,6 @@ struct spacc_aead_ctx {
|
||||
u8 cipher_key_len;
|
||||
u8 hash_key_len;
|
||||
struct crypto_aead *sw_cipher;
|
||||
size_t auth_size;
|
||||
u8 salt[AES_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
static int spacc_ablk_submit(struct spacc_req *req);
|
||||
@ -185,6 +191,11 @@ static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
|
||||
return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
|
||||
}
|
||||
|
||||
static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
|
||||
{
|
||||
return container_of(alg, struct spacc_aead, alg);
|
||||
}
|
||||
|
||||
static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
|
||||
{
|
||||
u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
|
||||
@ -310,120 +321,117 @@ static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
|
||||
static int spacc_aead_make_ddts(struct aead_request *areq)
|
||||
{
|
||||
struct aead_request *areq = container_of(req->req, struct aead_request,
|
||||
base);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
|
||||
struct spacc_req *req = aead_request_ctx(areq);
|
||||
struct spacc_engine *engine = req->engine;
|
||||
struct spacc_ddt *src_ddt, *dst_ddt;
|
||||
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
|
||||
unsigned nents = sg_count(areq->src, areq->cryptlen);
|
||||
unsigned total;
|
||||
dma_addr_t iv_addr;
|
||||
unsigned int src_nents, dst_nents;
|
||||
struct scatterlist *cur;
|
||||
int i, dst_ents, src_ents, assoc_ents;
|
||||
u8 *iv = giv ? giv : areq->iv;
|
||||
int i, dst_ents, src_ents;
|
||||
|
||||
total = areq->assoclen + areq->cryptlen;
|
||||
if (req->is_encrypt)
|
||||
total += crypto_aead_authsize(aead);
|
||||
|
||||
src_nents = sg_count(areq->src, total);
|
||||
if (src_nents + 1 > MAX_DDT_LEN)
|
||||
return -E2BIG;
|
||||
|
||||
dst_nents = 0;
|
||||
if (areq->src != areq->dst) {
|
||||
dst_nents = sg_count(areq->dst, total);
|
||||
if (src_nents + 1 > MAX_DDT_LEN)
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
|
||||
if (!src_ddt)
|
||||
return -ENOMEM;
|
||||
goto err;
|
||||
|
||||
dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
|
||||
if (!dst_ddt) {
|
||||
dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!dst_ddt)
|
||||
goto err_free_src;
|
||||
|
||||
req->src_ddt = src_ddt;
|
||||
req->dst_ddt = dst_ddt;
|
||||
|
||||
assoc_ents = dma_map_sg(engine->dev, areq->assoc,
|
||||
sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
|
||||
if (areq->src != areq->dst) {
|
||||
src_ents = dma_map_sg(engine->dev, areq->src, nents,
|
||||
if (dst_nents) {
|
||||
src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
|
||||
DMA_TO_DEVICE);
|
||||
dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
|
||||
if (!src_ents)
|
||||
goto err_free_dst;
|
||||
|
||||
dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (!dst_ents) {
|
||||
dma_unmap_sg(engine->dev, areq->src, src_nents,
|
||||
DMA_TO_DEVICE);
|
||||
goto err_free_dst;
|
||||
}
|
||||
} else {
|
||||
src_ents = dma_map_sg(engine->dev, areq->src, nents,
|
||||
src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
dst_ents = 0;
|
||||
if (!src_ents)
|
||||
goto err_free_dst;
|
||||
dst_ents = src_ents;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the IV/GIV. For the GIV it needs to be bidirectional as it is
|
||||
* formed by the crypto block and sent as the ESP IV for IPSEC.
|
||||
*/
|
||||
iv_addr = dma_map_single(engine->dev, iv, ivsize,
|
||||
giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
|
||||
req->giv_pa = iv_addr;
|
||||
|
||||
/*
|
||||
* Map the associated data. For decryption we don't copy the
|
||||
* associated data.
|
||||
*/
|
||||
total = areq->assoclen;
|
||||
for_each_sg(areq->assoc, cur, assoc_ents, i) {
|
||||
unsigned len = sg_dma_len(cur);
|
||||
|
||||
if (len > total)
|
||||
len = total;
|
||||
|
||||
total -= len;
|
||||
|
||||
ddt_set(src_ddt++, sg_dma_address(cur), len);
|
||||
if (req->is_encrypt)
|
||||
ddt_set(dst_ddt++, sg_dma_address(cur), len);
|
||||
}
|
||||
ddt_set(src_ddt++, iv_addr, ivsize);
|
||||
|
||||
if (giv || req->is_encrypt)
|
||||
ddt_set(dst_ddt++, iv_addr, ivsize);
|
||||
|
||||
/*
|
||||
* Now map in the payload for the source and destination and terminate
|
||||
* with the NULL pointers.
|
||||
*/
|
||||
for_each_sg(areq->src, cur, src_ents, i) {
|
||||
for_each_sg(areq->src, cur, src_ents, i)
|
||||
ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
|
||||
if (areq->src == areq->dst)
|
||||
ddt_set(dst_ddt++, sg_dma_address(cur),
|
||||
sg_dma_len(cur));
|
||||
}
|
||||
|
||||
for_each_sg(areq->dst, cur, dst_ents, i)
|
||||
ddt_set(dst_ddt++, sg_dma_address(cur),
|
||||
sg_dma_len(cur));
|
||||
/* For decryption we need to skip the associated data. */
|
||||
total = req->is_encrypt ? 0 : areq->assoclen;
|
||||
for_each_sg(areq->dst, cur, dst_ents, i) {
|
||||
unsigned len = sg_dma_len(cur);
|
||||
|
||||
if (len <= total) {
|
||||
total -= len;
|
||||
continue;
|
||||
}
|
||||
|
||||
ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
|
||||
}
|
||||
|
||||
ddt_set(src_ddt, 0, 0);
|
||||
ddt_set(dst_ddt, 0, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_dst:
|
||||
dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
|
||||
err_free_src:
|
||||
dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
|
||||
err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void spacc_aead_free_ddts(struct spacc_req *req)
|
||||
{
|
||||
struct aead_request *areq = container_of(req->req, struct aead_request,
|
||||
base);
|
||||
struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
|
||||
struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
|
||||
unsigned total = areq->assoclen + areq->cryptlen +
|
||||
(req->is_encrypt ? crypto_aead_authsize(aead) : 0);
|
||||
struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
|
||||
struct spacc_engine *engine = aead_ctx->generic.engine;
|
||||
unsigned ivsize = alg->alg.cra_aead.ivsize;
|
||||
unsigned nents = sg_count(areq->src, areq->cryptlen);
|
||||
unsigned nents = sg_count(areq->src, total);
|
||||
|
||||
if (areq->src != areq->dst) {
|
||||
dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
|
||||
dma_unmap_sg(engine->dev, areq->dst,
|
||||
sg_count(areq->dst, areq->cryptlen),
|
||||
sg_count(areq->dst, total),
|
||||
DMA_FROM_DEVICE);
|
||||
} else
|
||||
dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
|
||||
|
||||
dma_unmap_sg(engine->dev, areq->assoc,
|
||||
sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
|
||||
|
||||
dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
|
||||
|
||||
dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
|
||||
dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
|
||||
}
|
||||
@ -438,65 +446,22 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
|
||||
dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set key for a DES operation in an AEAD cipher. This also performs weak key
|
||||
* checking if required.
|
||||
*/
|
||||
static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
|
||||
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
if (unlikely(!des_ekey(tmp, key)) &&
|
||||
(crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(ctx->cipher_key, key, len);
|
||||
ctx->cipher_key_len = len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set the key for the AES block cipher component of the AEAD transform. */
|
||||
static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
|
||||
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
/*
|
||||
* IPSec engine only supports 128 and 256 bit AES keys. If we get a
|
||||
* request for any other size (192 bits) then we need to do a software
|
||||
* fallback.
|
||||
*/
|
||||
if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
|
||||
/*
|
||||
* Set the fallback transform to use the same request flags as
|
||||
* the hardware transform.
|
||||
*/
|
||||
ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
ctx->sw_cipher->base.crt_flags |=
|
||||
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
|
||||
return crypto_aead_setkey(ctx->sw_cipher, key, len);
|
||||
}
|
||||
|
||||
memcpy(ctx->cipher_key, key, len);
|
||||
ctx->cipher_key_len = len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
|
||||
struct crypto_authenc_keys keys;
|
||||
int err = -EINVAL;
|
||||
int err;
|
||||
|
||||
crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
|
||||
crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
|
||||
crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
||||
goto badkey;
|
||||
@ -507,14 +472,8 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
if (keys.authkeylen > sizeof(ctx->hash_ctx))
|
||||
goto badkey;
|
||||
|
||||
if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
|
||||
SPA_CTRL_CIPH_ALG_AES)
|
||||
err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
|
||||
else
|
||||
err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
|
||||
|
||||
if (err)
|
||||
goto badkey;
|
||||
memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
|
||||
ctx->cipher_key_len = keys.enckeylen;
|
||||
|
||||
memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
|
||||
ctx->hash_key_len = keys.authkeylen;
|
||||
@ -531,9 +490,7 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
|
||||
{
|
||||
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
|
||||
|
||||
ctx->auth_size = authsize;
|
||||
|
||||
return 0;
|
||||
return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -541,15 +498,13 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
|
||||
* be completed in hardware because the hardware may not support certain key
|
||||
* sizes. In these cases we need to complete the request in software.
|
||||
*/
|
||||
static int spacc_aead_need_fallback(struct spacc_req *req)
|
||||
static int spacc_aead_need_fallback(struct aead_request *aead_req)
|
||||
{
|
||||
struct aead_request *aead_req;
|
||||
struct crypto_tfm *tfm = req->req->tfm;
|
||||
struct crypto_alg *alg = req->req->tfm->__crt_alg;
|
||||
struct spacc_alg *spacc_alg = to_spacc_alg(alg);
|
||||
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
|
||||
struct aead_alg *alg = crypto_aead_alg(aead);
|
||||
struct spacc_aead *spacc_alg = to_spacc_aead(alg);
|
||||
struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
|
||||
|
||||
aead_req = container_of(req->req, struct aead_request, base);
|
||||
/*
|
||||
* If we have a non-supported key-length, then we need to do a
|
||||
* software fallback.
|
||||
@ -568,22 +523,17 @@ static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
|
||||
{
|
||||
struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
|
||||
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
|
||||
int err;
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
|
||||
if (ctx->sw_cipher) {
|
||||
/*
|
||||
* Change the request to use the software fallback transform,
|
||||
* and once the ciphering has completed, put the old transform
|
||||
* back into the request.
|
||||
*/
|
||||
aead_request_set_tfm(req, ctx->sw_cipher);
|
||||
err = is_encrypt ? crypto_aead_encrypt(req) :
|
||||
crypto_aead_decrypt(req);
|
||||
aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
|
||||
} else
|
||||
err = -EINVAL;
|
||||
aead_request_set_tfm(subreq, ctx->sw_cipher);
|
||||
aead_request_set_callback(subreq, req->base.flags,
|
||||
req->base.complete, req->base.data);
|
||||
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
|
||||
req->iv);
|
||||
aead_request_set_ad(subreq, req->assoclen);
|
||||
|
||||
return err;
|
||||
return is_encrypt ? crypto_aead_encrypt(subreq) :
|
||||
crypto_aead_decrypt(subreq);
|
||||
}
|
||||
|
||||
static void spacc_aead_complete(struct spacc_req *req)
|
||||
@ -594,18 +544,19 @@ static void spacc_aead_complete(struct spacc_req *req)
|
||||
|
||||
static int spacc_aead_submit(struct spacc_req *req)
|
||||
{
|
||||
struct crypto_tfm *tfm = req->req->tfm;
|
||||
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_alg *alg = req->req->tfm->__crt_alg;
|
||||
struct spacc_alg *spacc_alg = to_spacc_alg(alg);
|
||||
struct spacc_engine *engine = ctx->generic.engine;
|
||||
u32 ctrl, proc_len, assoc_len;
|
||||
struct aead_request *aead_req =
|
||||
container_of(req->req, struct aead_request, base);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct aead_alg *alg = crypto_aead_alg(aead);
|
||||
struct spacc_aead *spacc_alg = to_spacc_aead(alg);
|
||||
struct spacc_engine *engine = ctx->generic.engine;
|
||||
u32 ctrl, proc_len, assoc_len;
|
||||
|
||||
req->result = -EINPROGRESS;
|
||||
req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
|
||||
ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
|
||||
ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
|
||||
ctx->hash_ctx, ctx->hash_key_len);
|
||||
|
||||
/* Set the source and destination DDT pointers. */
|
||||
@ -616,26 +567,16 @@ static int spacc_aead_submit(struct spacc_req *req)
|
||||
assoc_len = aead_req->assoclen;
|
||||
proc_len = aead_req->cryptlen + assoc_len;
|
||||
|
||||
/*
|
||||
* If we aren't generating an IV, then we need to include the IV in the
|
||||
* associated data so that it is included in the hash.
|
||||
*/
|
||||
if (!req->giv) {
|
||||
assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
|
||||
proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
|
||||
} else
|
||||
proc_len += req->giv_len;
|
||||
|
||||
/*
|
||||
* If we are decrypting, we need to take the length of the ICV out of
|
||||
* the processing length.
|
||||
*/
|
||||
if (!req->is_encrypt)
|
||||
proc_len -= ctx->auth_size;
|
||||
proc_len -= authsize;
|
||||
|
||||
writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
|
||||
writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
|
||||
writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
|
||||
writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
|
||||
writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
|
||||
writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
|
||||
|
||||
@ -674,32 +615,29 @@ static void spacc_push(struct spacc_engine *engine)
|
||||
/*
|
||||
* Setup an AEAD request for processing. This will configure the engine, load
|
||||
* the context and then start the packet processing.
|
||||
*
|
||||
* @giv Pointer to destination address for a generated IV. If the
|
||||
* request does not need to generate an IV then this should be set to NULL.
|
||||
*/
|
||||
static int spacc_aead_setup(struct aead_request *req, u8 *giv,
|
||||
static int spacc_aead_setup(struct aead_request *req,
|
||||
unsigned alg_type, bool is_encrypt)
|
||||
{
|
||||
struct crypto_alg *alg = req->base.tfm->__crt_alg;
|
||||
struct spacc_engine *engine = to_spacc_alg(alg)->engine;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct aead_alg *alg = crypto_aead_alg(aead);
|
||||
struct spacc_engine *engine = to_spacc_aead(alg)->engine;
|
||||
struct spacc_req *dev_req = aead_request_ctx(req);
|
||||
int err = -EINPROGRESS;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
|
||||
|
||||
dev_req->giv = giv;
|
||||
dev_req->giv_len = ivsize;
|
||||
dev_req->req = &req->base;
|
||||
dev_req->is_encrypt = is_encrypt;
|
||||
dev_req->result = -EBUSY;
|
||||
dev_req->engine = engine;
|
||||
dev_req->complete = spacc_aead_complete;
|
||||
|
||||
if (unlikely(spacc_aead_need_fallback(dev_req)))
|
||||
if (unlikely(spacc_aead_need_fallback(req) ||
|
||||
((err = spacc_aead_make_ddts(req)) == -E2BIG)))
|
||||
return spacc_aead_do_fallback(req, alg_type, is_encrypt);
|
||||
|
||||
spacc_aead_make_ddts(dev_req, dev_req->giv);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = -EINPROGRESS;
|
||||
spin_lock_irqsave(&engine->hw_lock, flags);
|
||||
@ -728,70 +666,44 @@ static int spacc_aead_setup(struct aead_request *req, u8 *giv,
|
||||
static int spacc_aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
|
||||
struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
|
||||
struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
|
||||
|
||||
return spacc_aead_setup(req, NULL, alg->type, 1);
|
||||
}
|
||||
|
||||
static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
|
||||
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
size_t ivsize = crypto_aead_ivsize(tfm);
|
||||
struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
|
||||
unsigned len;
|
||||
__be64 seq;
|
||||
|
||||
memcpy(req->areq.iv, ctx->salt, ivsize);
|
||||
len = ivsize;
|
||||
if (ivsize > sizeof(u64)) {
|
||||
memset(req->giv, 0, ivsize - sizeof(u64));
|
||||
len = sizeof(u64);
|
||||
}
|
||||
seq = cpu_to_be64(req->seq);
|
||||
memcpy(req->giv + ivsize - len, &seq, len);
|
||||
|
||||
return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
|
||||
return spacc_aead_setup(req, alg->type, 1);
|
||||
}
|
||||
|
||||
static int spacc_aead_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
|
||||
struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
|
||||
struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
|
||||
|
||||
return spacc_aead_setup(req, NULL, alg->type, 0);
|
||||
return spacc_aead_setup(req, alg->type, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise a new AEAD context. This is responsible for allocating the
|
||||
* fallback cipher and initialising the context.
|
||||
*/
|
||||
static int spacc_aead_cra_init(struct crypto_tfm *tfm)
|
||||
static int spacc_aead_cra_init(struct crypto_aead *tfm)
|
||||
{
|
||||
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct spacc_alg *spacc_alg = to_spacc_alg(alg);
|
||||
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct aead_alg *alg = crypto_aead_alg(tfm);
|
||||
struct spacc_aead *spacc_alg = to_spacc_aead(alg);
|
||||
struct spacc_engine *engine = spacc_alg->engine;
|
||||
|
||||
ctx->generic.flags = spacc_alg->type;
|
||||
ctx->generic.engine = engine;
|
||||
ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
|
||||
CRYPTO_ALG_ASYNC |
|
||||
ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ctx->sw_cipher)) {
|
||||
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
|
||||
alg->cra_name);
|
||||
ctx->sw_cipher = NULL;
|
||||
}
|
||||
if (IS_ERR(ctx->sw_cipher))
|
||||
return PTR_ERR(ctx->sw_cipher);
|
||||
ctx->generic.key_offs = spacc_alg->key_offs;
|
||||
ctx->generic.iv_offs = spacc_alg->iv_offs;
|
||||
|
||||
get_random_bytes(ctx->salt, sizeof(ctx->salt));
|
||||
|
||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
||||
sizeof(struct spacc_req));
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
max(sizeof(struct spacc_req),
|
||||
sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(ctx->sw_cipher)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -800,13 +712,11 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm)
|
||||
* Destructor for an AEAD context. This is called when the transform is freed
|
||||
* and must free the fallback cipher.
|
||||
*/
|
||||
static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
|
||||
static void spacc_aead_cra_exit(struct crypto_aead *tfm)
|
||||
{
|
||||
struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
if (ctx->sw_cipher)
|
||||
crypto_free_aead(ctx->sw_cipher);
|
||||
ctx->sw_cipher = NULL;
|
||||
crypto_free_aead(ctx->sw_cipher);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1458,180 +1368,188 @@ static struct spacc_alg ipsec_engine_algs[] = {
|
||||
.cra_exit = spacc_ablk_cra_exit,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct spacc_aead ipsec_engine_aeads[] = {
|
||||
{
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
|
||||
SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_SHA |
|
||||
SPA_CTRL_HASH_MODE_HMAC,
|
||||
.key_offs = 0,
|
||||
.iv_offs = AES_MAX_KEY_SIZE,
|
||||
.alg = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_aead = {
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.givencrypt = spacc_aead_givencrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-"
|
||||
"cbc-aes-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.cra_init = spacc_aead_cra_init,
|
||||
.cra_exit = spacc_aead_cra_exit,
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.init = spacc_aead_cra_init,
|
||||
.exit = spacc_aead_cra_exit,
|
||||
},
|
||||
},
|
||||
{
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
|
||||
SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_SHA256 |
|
||||
SPA_CTRL_HASH_MODE_HMAC,
|
||||
.key_offs = 0,
|
||||
.iv_offs = AES_MAX_KEY_SIZE,
|
||||
.alg = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_aead = {
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.givencrypt = spacc_aead_givencrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-"
|
||||
"cbc-aes-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.cra_init = spacc_aead_cra_init,
|
||||
.cra_exit = spacc_aead_cra_exit,
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.init = spacc_aead_cra_init,
|
||||
.exit = spacc_aead_cra_exit,
|
||||
},
|
||||
},
|
||||
{
|
||||
.key_offs = 0,
|
||||
.iv_offs = AES_MAX_KEY_SIZE,
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
|
||||
SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_MD5 |
|
||||
SPA_CTRL_HASH_MODE_HMAC,
|
||||
.alg = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_aead = {
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.givencrypt = spacc_aead_givencrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-md5-"
|
||||
"cbc-aes-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.cra_init = spacc_aead_cra_init,
|
||||
.cra_exit = spacc_aead_cra_exit,
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.init = spacc_aead_cra_init,
|
||||
.exit = spacc_aead_cra_exit,
|
||||
},
|
||||
},
|
||||
{
|
||||
.key_offs = DES_BLOCK_SIZE,
|
||||
.iv_offs = 0,
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
|
||||
SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_SHA |
|
||||
SPA_CTRL_HASH_MODE_HMAC,
|
||||
.alg = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_aead = {
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.givencrypt = spacc_aead_givencrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-"
|
||||
"cbc-3des-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.cra_init = spacc_aead_cra_init,
|
||||
.cra_exit = spacc_aead_cra_exit,
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.init = spacc_aead_cra_init,
|
||||
.exit = spacc_aead_cra_exit,
|
||||
},
|
||||
},
|
||||
{
|
||||
.key_offs = DES_BLOCK_SIZE,
|
||||
.iv_offs = 0,
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
|
||||
SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_SHA256 |
|
||||
SPA_CTRL_HASH_MODE_HMAC,
|
||||
.alg = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_aead = {
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.givencrypt = spacc_aead_givencrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha256),"
|
||||
"cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-"
|
||||
"cbc-3des-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.cra_init = spacc_aead_cra_init,
|
||||
.cra_exit = spacc_aead_cra_exit,
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.init = spacc_aead_cra_init,
|
||||
.exit = spacc_aead_cra_exit,
|
||||
},
|
||||
},
|
||||
{
|
||||
.key_offs = DES_BLOCK_SIZE,
|
||||
.iv_offs = 0,
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
|
||||
.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
|
||||
SPA_CTRL_CIPH_MODE_CBC |
|
||||
SPA_CTRL_HASH_ALG_MD5 |
|
||||
SPA_CTRL_HASH_MODE_HMAC,
|
||||
.alg = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_type = &crypto_aead_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_aead = {
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.givencrypt = spacc_aead_givencrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-md5-"
|
||||
"cbc-3des-picoxcell",
|
||||
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct spacc_aead_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.cra_init = spacc_aead_cra_init,
|
||||
.cra_exit = spacc_aead_cra_exit,
|
||||
.setkey = spacc_aead_setkey,
|
||||
.setauthsize = spacc_aead_setauthsize,
|
||||
.encrypt = spacc_aead_encrypt,
|
||||
.decrypt = spacc_aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.init = spacc_aead_cra_init,
|
||||
.exit = spacc_aead_cra_exit,
|
||||
},
|
||||
},
|
||||
};
|
||||
@ -1707,6 +1625,8 @@ static int spacc_probe(struct platform_device *pdev)
|
||||
engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
|
||||
engine->algs = ipsec_engine_algs;
|
||||
engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
|
||||
engine->aeads = ipsec_engine_aeads;
|
||||
engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads);
|
||||
} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
|
||||
engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
|
||||
engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
|
||||
@ -1815,17 +1735,40 @@ static int spacc_probe(struct platform_device *pdev)
|
||||
engine->algs[i].alg.cra_name);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&engine->registered_aeads);
|
||||
for (i = 0; i < engine->num_aeads; ++i) {
|
||||
engine->aeads[i].engine = engine;
|
||||
err = crypto_register_aead(&engine->aeads[i].alg);
|
||||
if (!err) {
|
||||
list_add_tail(&engine->aeads[i].entry,
|
||||
&engine->registered_aeads);
|
||||
ret = 0;
|
||||
}
|
||||
if (err)
|
||||
dev_err(engine->dev, "failed to register alg \"%s\"\n",
|
||||
engine->aeads[i].alg.base.cra_name);
|
||||
else
|
||||
dev_dbg(engine->dev, "registered alg \"%s\"\n",
|
||||
engine->aeads[i].alg.base.cra_name);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spacc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct spacc_aead *aead, *an;
|
||||
struct spacc_alg *alg, *next;
|
||||
struct spacc_engine *engine = platform_get_drvdata(pdev);
|
||||
|
||||
del_timer_sync(&engine->packet_timeout);
|
||||
device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
|
||||
|
||||
list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
|
||||
list_del(&aead->entry);
|
||||
crypto_unregister_aead(&aead->alg);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
|
||||
list_del(&alg->entry);
|
||||
crypto_unregister_alg(&alg->alg);
|
||||
|
@ -3,11 +3,13 @@ config CRYPTO_DEV_QAT
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_AKCIPHER
|
||||
select CRYPTO_HMAC
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
select FW_LOADER
|
||||
select ASN1
|
||||
|
||||
config CRYPTO_DEV_QAT_DH895xCC
|
||||
tristate "Support for Intel(R) DH895xCC"
|
||||
@ -19,3 +21,16 @@ config CRYPTO_DEV_QAT_DH895xCC
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called qat_dh895xcc.
|
||||
|
||||
config CRYPTO_DEV_QAT_DH895xCCVF
|
||||
tristate "Support for Intel(R) DH895xCC Virtual Function"
|
||||
depends on X86 && PCI
|
||||
select PCI_IOV
|
||||
select CRYPTO_DEV_QAT
|
||||
|
||||
help
|
||||
Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
|
||||
Virtual Function for accelerating crypto and compression workloads.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called qat_dh895xccvf.
|
||||
|
@ -1,2 +1,3 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
|
||||
|
1
drivers/crypto/qat/qat_common/.gitignore
vendored
Normal file
1
drivers/crypto/qat/qat_common/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
*-asn1.[ch]
|
@ -1,3 +1,6 @@
|
||||
$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h
|
||||
clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
|
||||
intel_qat-objs := adf_cfg.o \
|
||||
adf_ctl_drv.o \
|
||||
@ -6,9 +9,14 @@ intel_qat-objs := adf_cfg.o \
|
||||
adf_accel_engine.o \
|
||||
adf_aer.o \
|
||||
adf_transport.o \
|
||||
adf_admin.o \
|
||||
adf_hw_arbiter.o \
|
||||
qat_crypto.o \
|
||||
qat_algs.o \
|
||||
qat_rsakey-asn1.o \
|
||||
qat_asym_algs.o \
|
||||
qat_uclo.o \
|
||||
qat_hal.o
|
||||
|
||||
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
|
||||
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
|
||||
|
@ -46,13 +46,17 @@
|
||||
*/
|
||||
#ifndef ADF_ACCEL_DEVICES_H_
|
||||
#define ADF_ACCEL_DEVICES_H_
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include "adf_cfg_common.h"
|
||||
|
||||
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
|
||||
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
|
||||
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
|
||||
#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
|
||||
#define ADF_PCI_MAX_BARS 3
|
||||
#define ADF_DEVICE_NAME_LENGTH 32
|
||||
#define ADF_ETR_MAX_RINGS_PER_BANK 16
|
||||
@ -79,6 +83,7 @@ struct adf_bar {
|
||||
struct adf_accel_msix {
|
||||
struct msix_entry *entries;
|
||||
char **names;
|
||||
u32 num_entries;
|
||||
} __packed;
|
||||
|
||||
struct adf_accel_pci {
|
||||
@ -99,6 +104,7 @@ enum dev_sku_info {
|
||||
DEV_SKU_2,
|
||||
DEV_SKU_3,
|
||||
DEV_SKU_4,
|
||||
DEV_SKU_VF,
|
||||
DEV_SKU_UNKNOWN,
|
||||
};
|
||||
|
||||
@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info)
|
||||
return "SKU3";
|
||||
case DEV_SKU_4:
|
||||
return "SKU4";
|
||||
case DEV_SKU_VF:
|
||||
return "SKUVF";
|
||||
case DEV_SKU_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
@ -135,23 +143,29 @@ struct adf_hw_device_data {
|
||||
struct adf_hw_device_class *dev_class;
|
||||
uint32_t (*get_accel_mask)(uint32_t fuse);
|
||||
uint32_t (*get_ae_mask)(uint32_t fuse);
|
||||
uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
|
||||
uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
|
||||
uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
|
||||
uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
|
||||
uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
|
||||
uint32_t (*get_pf2vf_offset)(uint32_t i);
|
||||
uint32_t (*get_vintmsk_offset)(uint32_t i);
|
||||
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
|
||||
void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
|
||||
void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
|
||||
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
|
||||
void (*free_irq)(struct adf_accel_dev *accel_dev);
|
||||
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
|
||||
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
|
||||
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
|
||||
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
|
||||
int (*init_arb)(struct adf_accel_dev *accel_dev);
|
||||
void (*exit_arb)(struct adf_accel_dev *accel_dev);
|
||||
void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
|
||||
const uint32_t **cfg);
|
||||
void (*disable_iov)(struct adf_accel_dev *accel_dev);
|
||||
void (*enable_ints)(struct adf_accel_dev *accel_dev);
|
||||
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
|
||||
const char *fw_name;
|
||||
uint32_t pci_dev_id;
|
||||
const char *fw_mmp_name;
|
||||
uint32_t fuses;
|
||||
uint32_t accel_capabilities_mask;
|
||||
uint16_t accel_mask;
|
||||
@ -163,6 +177,7 @@ struct adf_hw_device_data {
|
||||
uint8_t num_accel;
|
||||
uint8_t num_logical_accel;
|
||||
uint8_t num_engines;
|
||||
uint8_t min_iov_compat_ver;
|
||||
} __packed;
|
||||
|
||||
/* CSR write macro */
|
||||
@ -184,6 +199,16 @@ struct icp_qat_fw_loader_handle;
|
||||
struct adf_fw_loader_data {
|
||||
struct icp_qat_fw_loader_handle *fw_loader;
|
||||
const struct firmware *uof_fw;
|
||||
const struct firmware *mmp_fw;
|
||||
};
|
||||
|
||||
struct adf_accel_vf_info {
|
||||
struct adf_accel_dev *accel_dev;
|
||||
struct tasklet_struct vf2pf_bh_tasklet;
|
||||
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
|
||||
struct ratelimit_state vf2pf_ratelimit;
|
||||
u32 vf_nr;
|
||||
bool init;
|
||||
};
|
||||
|
||||
struct adf_accel_dev {
|
||||
@ -199,6 +224,21 @@ struct adf_accel_dev {
|
||||
struct list_head list;
|
||||
struct module *owner;
|
||||
struct adf_accel_pci accel_pci_dev;
|
||||
union {
|
||||
struct {
|
||||
/* vf_info is non-zero when SR-IOV is init'ed */
|
||||
struct adf_accel_vf_info *vf_info;
|
||||
} pf;
|
||||
struct {
|
||||
char *irq_name;
|
||||
struct tasklet_struct pf2vf_bh_tasklet;
|
||||
struct mutex vf2pf_lock; /* protect CSR access */
|
||||
struct completion iov_msg_completion;
|
||||
uint8_t compatible;
|
||||
uint8_t pf_version;
|
||||
} vf;
|
||||
};
|
||||
bool is_vf;
|
||||
uint8_t accel_id;
|
||||
} __packed;
|
||||
#endif
|
||||
|
@ -55,24 +55,36 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
void *uof_addr;
|
||||
uint32_t uof_size;
|
||||
void *uof_addr, *mmp_addr;
|
||||
u32 uof_size, mmp_size;
|
||||
|
||||
if (!hw_device->fw_name)
|
||||
return 0;
|
||||
|
||||
if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
|
||||
&accel_dev->accel_pci_dev.pci_dev->dev)) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
|
||||
hw_device->fw_mmp_name);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
|
||||
&accel_dev->accel_pci_dev.pci_dev->dev)) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n",
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
|
||||
hw_device->fw_name);
|
||||
return -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
uof_size = loader_data->uof_fw->size;
|
||||
uof_addr = (void *)loader_data->uof_fw->data;
|
||||
mmp_size = loader_data->mmp_fw->size;
|
||||
mmp_addr = (void *)loader_data->mmp_fw->data;
|
||||
qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
|
||||
if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
|
||||
goto out_err;
|
||||
}
|
||||
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
|
||||
goto out_err;
|
||||
}
|
||||
return 0;
|
||||
@ -85,11 +97,17 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
|
||||
void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
|
||||
if (!hw_device->fw_name)
|
||||
return;
|
||||
|
||||
qat_uclo_del_uof_obj(loader_data->fw_loader);
|
||||
qat_hal_deinit(loader_data->fw_loader);
|
||||
release_firmware(loader_data->uof_fw);
|
||||
release_firmware(loader_data->mmp_fw);
|
||||
loader_data->uof_fw = NULL;
|
||||
loader_data->mmp_fw = NULL;
|
||||
loader_data->fw_loader = NULL;
|
||||
}
|
||||
|
||||
@ -99,6 +117,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
|
||||
|
||||
if (!hw_data->fw_name)
|
||||
return 0;
|
||||
|
||||
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
|
||||
if (hw_data->ae_mask & (1 << ae)) {
|
||||
qat_hal_start(loader_data->fw_loader, ae, 0xFF);
|
||||
@ -117,6 +138,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
|
||||
|
||||
if (!hw_data->fw_name)
|
||||
return 0;
|
||||
|
||||
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
|
||||
if (hw_data->ae_mask & (1 << ae)) {
|
||||
qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
|
||||
@ -143,6 +167,10 @@ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
|
||||
int adf_ae_init(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data;
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
|
||||
if (!hw_device->fw_name)
|
||||
return 0;
|
||||
|
||||
loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
|
||||
if (!loader_data)
|
||||
@ -166,6 +194,10 @@ int adf_ae_init(struct adf_accel_dev *accel_dev)
|
||||
int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
|
||||
if (!hw_device->fw_name)
|
||||
return 0;
|
||||
|
||||
qat_hal_deinit(loader_data->fw_loader);
|
||||
kfree(accel_dev->fw_loader);
|
||||
|
290
drivers/crypto/qat/qat_common/adf_admin.c
Normal file
290
drivers/crypto/qat/qat_common/adf_admin.c
Normal file
@ -0,0 +1,290 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "icp_qat_fw_init_admin.h"
|
||||
|
||||
/* Admin Messages Registers */
|
||||
#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
|
||||
#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
|
||||
#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
|
||||
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
|
||||
#define ADF_ADMINMSG_LEN 32
|
||||
|
||||
static const u8 const_tab[1024] = {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
|
||||
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
|
||||
0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
|
||||
0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
|
||||
0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
|
||||
0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
|
||||
0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
|
||||
0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
|
||||
0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
|
||||
0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
|
||||
0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
|
||||
0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
|
||||
0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
|
||||
0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
|
||||
0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
|
||||
0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
|
||||
0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
|
||||
0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
|
||||
0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
|
||||
0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
||||
|
||||
struct adf_admin_comms {
|
||||
dma_addr_t phy_addr;
|
||||
dma_addr_t const_tbl_addr;
|
||||
void *virt_addr;
|
||||
void __iomem *mailbox_addr;
|
||||
struct mutex lock; /* protects adf_admin_comms struct */
|
||||
};
|
||||
|
||||
static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
|
||||
void *in, void *out)
|
||||
{
|
||||
struct adf_admin_comms *admin = accel_dev->admin;
|
||||
int offset = ae * ADF_ADMINMSG_LEN * 2;
|
||||
void __iomem *mailbox = admin->mailbox_addr;
|
||||
int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
|
||||
int times, received;
|
||||
|
||||
mutex_lock(&admin->lock);
|
||||
|
||||
if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
|
||||
mutex_unlock(&admin->lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
|
||||
ADF_CSR_WR(mailbox, mb_offset, 1);
|
||||
received = 0;
|
||||
for (times = 0; times < 50; times++) {
|
||||
msleep(20);
|
||||
if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
|
||||
received = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (received)
|
||||
memcpy(out, admin->virt_addr + offset +
|
||||
ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
|
||||
else
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to send admin msg to accelerator\n");
|
||||
|
||||
mutex_unlock(&admin->lock);
|
||||
return received ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
|
||||
{
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
struct icp_qat_fw_init_admin_req req;
|
||||
struct icp_qat_fw_init_admin_resp resp;
|
||||
int i;
|
||||
|
||||
memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
|
||||
req.init_admin_cmd_id = cmd;
|
||||
|
||||
if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
|
||||
req.init_cfg_sz = 1024;
|
||||
req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
|
||||
}
|
||||
for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
|
||||
memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
|
||||
if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
|
||||
resp.init_resp_hdr.status)
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_send_admin_init() - Function sends init message to FW
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function sends admin init message to the FW
|
||||
*
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_send_admin_init(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_send_admin_init);
|
||||
|
||||
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_admin_comms *admin;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_bar *pmisc =
|
||||
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
|
||||
void __iomem *csr = pmisc->virt_addr;
|
||||
void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
|
||||
u64 reg_val;
|
||||
|
||||
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
|
||||
dev_to_node(&GET_DEV(accel_dev)));
|
||||
if (!admin)
|
||||
return -ENOMEM;
|
||||
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
|
||||
&admin->phy_addr, GFP_KERNEL);
|
||||
if (!admin->virt_addr) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
|
||||
kfree(admin);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
|
||||
(void *) const_tab, 1024,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
|
||||
admin->const_tbl_addr))) {
|
||||
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
|
||||
admin->virt_addr, admin->phy_addr);
|
||||
kfree(admin);
|
||||
return -ENOMEM;
|
||||
}
|
||||
reg_val = (u64)admin->phy_addr;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
|
||||
mutex_init(&admin->lock);
|
||||
admin->mailbox_addr = mailbox;
|
||||
accel_dev->admin = admin;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_init_admin_comms);
|
||||
|
||||
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_admin_comms *admin = accel_dev->admin;
|
||||
|
||||
if (!admin)
|
||||
return;
|
||||
|
||||
if (admin->virt_addr)
|
||||
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
|
||||
admin->virt_addr, admin->phy_addr);
|
||||
|
||||
dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
|
||||
DMA_TO_DEVICE);
|
||||
mutex_destroy(&admin->lock);
|
||||
kfree(admin);
|
||||
accel_dev->admin = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
|
@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
|
||||
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
|
||||
accel_dev->accel_id);
|
||||
|
||||
if (!parent)
|
||||
parent = pdev;
|
||||
|
||||
if (!pci_wait_for_pending_transaction(pdev))
|
||||
dev_info(&GET_DEV(accel_dev),
|
||||
"Transaction still in progress. Proceeding\n");
|
||||
@ -206,7 +209,7 @@ static struct pci_error_handlers adf_err_handler = {
|
||||
* QAT acceleration device accel_dev.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
|
||||
{
|
||||
|
@ -123,7 +123,7 @@ static const struct file_operations qat_dev_cfg_fops = {
|
||||
* The table stores device specific config values.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
|
||||
|
||||
if (!dev_cfg_data)
|
||||
return;
|
||||
|
||||
down_write(&dev_cfg_data->lock);
|
||||
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
|
||||
up_write(&dev_cfg_data->lock);
|
||||
@ -276,7 +279,7 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
|
||||
* in the given acceleration device
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
|
||||
const char *section_name,
|
||||
@ -327,7 +330,7 @@ EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
|
||||
* will be stored.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
|
||||
{
|
||||
|
@ -60,7 +60,7 @@
|
||||
#define ADF_CFG_NO_DEVICE 0xFF
|
||||
#define ADF_CFG_AFFINITY_WHATEVER 0xFF
|
||||
#define MAX_DEVICE_NAME_SIZE 32
|
||||
#define ADF_MAX_DEVICES 32
|
||||
#define ADF_MAX_DEVICES (32 * 32)
|
||||
|
||||
enum adf_cfg_val_type {
|
||||
ADF_DEC,
|
||||
@ -71,6 +71,7 @@ enum adf_cfg_val_type {
|
||||
enum adf_device_type {
|
||||
DEV_UNKNOWN = 0,
|
||||
DEV_DH895XCC,
|
||||
DEV_DH895XCCVF,
|
||||
};
|
||||
|
||||
struct adf_dev_status_info {
|
||||
|
@ -54,8 +54,8 @@
|
||||
#include "icp_qat_hal.h"
|
||||
|
||||
#define ADF_MAJOR_VERSION 0
|
||||
#define ADF_MINOR_VERSION 1
|
||||
#define ADF_BUILD_VERSION 3
|
||||
#define ADF_MINOR_VERSION 2
|
||||
#define ADF_BUILD_VERSION 0
|
||||
#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \
|
||||
__stringify(ADF_MINOR_VERSION) "." \
|
||||
__stringify(ADF_BUILD_VERSION)
|
||||
@ -91,9 +91,13 @@ struct service_hndl {
|
||||
unsigned long start_status;
|
||||
char *name;
|
||||
struct list_head list;
|
||||
int admin;
|
||||
};
|
||||
|
||||
static inline int get_current_node(void)
|
||||
{
|
||||
return topology_physical_package_id(smp_processor_id());
|
||||
}
|
||||
|
||||
int adf_service_register(struct service_hndl *service);
|
||||
int adf_service_unregister(struct service_hndl *service);
|
||||
|
||||
@ -102,13 +106,24 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
|
||||
int adf_dev_stop(struct adf_accel_dev *accel_dev);
|
||||
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
|
||||
|
||||
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
|
||||
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
|
||||
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
|
||||
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
|
||||
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
|
||||
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
|
||||
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
|
||||
void adf_clean_vf_map(bool);
|
||||
|
||||
int adf_ctl_dev_register(void);
|
||||
void adf_ctl_dev_unregister(void);
|
||||
int adf_processes_dev_register(void);
|
||||
void adf_processes_dev_unregister(void);
|
||||
|
||||
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
|
||||
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
|
||||
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
|
||||
struct adf_accel_dev *pf);
|
||||
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
|
||||
struct adf_accel_dev *pf);
|
||||
struct list_head *adf_devmgr_get_head(void);
|
||||
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
|
||||
struct adf_accel_dev *adf_devmgr_get_first(void);
|
||||
@ -130,6 +145,12 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
|
||||
void adf_disable_aer(struct adf_accel_dev *accel_dev);
|
||||
int adf_init_aer(void);
|
||||
void adf_exit_aer(void);
|
||||
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
|
||||
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
|
||||
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
|
||||
int adf_init_arb(struct adf_accel_dev *accel_dev);
|
||||
void adf_exit_arb(struct adf_accel_dev *accel_dev);
|
||||
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
|
||||
|
||||
int adf_dev_get(struct adf_accel_dev *accel_dev);
|
||||
void adf_dev_put(struct adf_accel_dev *accel_dev);
|
||||
@ -141,10 +162,13 @@ int qat_crypto_unregister(void);
|
||||
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
|
||||
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
|
||||
void qat_alg_callback(void *resp);
|
||||
void qat_alg_asym_callback(void *resp);
|
||||
int qat_algs_init(void);
|
||||
void qat_algs_exit(void);
|
||||
int qat_algs_register(void);
|
||||
int qat_algs_unregister(void);
|
||||
int qat_asym_algs_register(void);
|
||||
void qat_asym_algs_unregister(void);
|
||||
|
||||
int qat_hal_init(struct adf_accel_dev *accel_dev);
|
||||
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
|
||||
@ -196,4 +220,23 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
|
||||
void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
|
||||
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
|
||||
void *addr_ptr, int mem_size);
|
||||
void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
|
||||
void *addr_ptr, int mem_size);
|
||||
#if defined(CONFIG_PCI_IOV)
|
||||
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
|
||||
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
|
||||
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
||||
uint32_t vf_mask);
|
||||
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
||||
uint32_t vf_mask);
|
||||
#else
|
||||
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
|
||||
}
|
||||
|
||||
accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
|
||||
if (!accel_dev) {
|
||||
pr_err("QAT: Device %d not found\n", dev_info.accel_id);
|
||||
if (!accel_dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
hw_data = accel_dev->hw_device;
|
||||
dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
|
||||
dev_info.num_ae = hw_data->get_num_aes(hw_data);
|
||||
@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
|
||||
adf_exit_aer();
|
||||
qat_crypto_unregister();
|
||||
qat_algs_exit();
|
||||
adf_clean_vf_map(false);
|
||||
mutex_destroy(&adf_ctl_lock);
|
||||
}
|
||||
|
||||
|
@ -50,21 +50,125 @@
|
||||
#include "adf_common_drv.h"
|
||||
|
||||
static LIST_HEAD(accel_table);
|
||||
static LIST_HEAD(vfs_table);
|
||||
static DEFINE_MUTEX(table_lock);
|
||||
static uint32_t num_devices;
|
||||
|
||||
struct vf_id_map {
|
||||
u32 bdf;
|
||||
u32 id;
|
||||
u32 fake_id;
|
||||
bool attached;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static int adf_get_vf_id(struct adf_accel_dev *vf)
|
||||
{
|
||||
return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
|
||||
PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
|
||||
(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
|
||||
}
|
||||
|
||||
static int adf_get_vf_num(struct adf_accel_dev *vf)
|
||||
{
|
||||
return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
|
||||
}
|
||||
|
||||
static struct vf_id_map *adf_find_vf(u32 bdf)
|
||||
{
|
||||
struct list_head *itr;
|
||||
|
||||
list_for_each(itr, &vfs_table) {
|
||||
struct vf_id_map *ptr =
|
||||
list_entry(itr, struct vf_id_map, list);
|
||||
|
||||
if (ptr->bdf == bdf)
|
||||
return ptr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int adf_get_vf_real_id(u32 fake)
|
||||
{
|
||||
struct list_head *itr;
|
||||
|
||||
list_for_each(itr, &vfs_table) {
|
||||
struct vf_id_map *ptr =
|
||||
list_entry(itr, struct vf_id_map, list);
|
||||
if (ptr->fake_id == fake)
|
||||
return ptr->id;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_clean_vf_map() - Cleans VF id mapings
|
||||
*
|
||||
* Function cleans internal ids for virtual functions.
|
||||
* @vf: flag indicating whether mappings is cleaned
|
||||
* for vfs only or for vfs and pfs
|
||||
*/
|
||||
void adf_clean_vf_map(bool vf)
|
||||
{
|
||||
struct vf_id_map *map;
|
||||
struct list_head *ptr, *tmp;
|
||||
|
||||
mutex_lock(&table_lock);
|
||||
list_for_each_safe(ptr, tmp, &vfs_table) {
|
||||
map = list_entry(ptr, struct vf_id_map, list);
|
||||
if (map->bdf != -1)
|
||||
num_devices--;
|
||||
|
||||
if (vf && map->bdf == -1)
|
||||
continue;
|
||||
|
||||
list_del(ptr);
|
||||
kfree(map);
|
||||
}
|
||||
mutex_unlock(&table_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_clean_vf_map);
|
||||
|
||||
/**
|
||||
* adf_devmgr_update_class_index() - Update internal index
|
||||
* @hw_data: Pointer to internal device data.
|
||||
*
|
||||
* Function updates internal dev index for VFs
|
||||
*/
|
||||
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
|
||||
{
|
||||
struct adf_hw_device_class *class = hw_data->dev_class;
|
||||
struct list_head *itr;
|
||||
int i = 0;
|
||||
|
||||
list_for_each(itr, &accel_table) {
|
||||
struct adf_accel_dev *ptr =
|
||||
list_entry(itr, struct adf_accel_dev, list);
|
||||
|
||||
if (ptr->hw_device->dev_class == class)
|
||||
ptr->hw_device->instance_id = i++;
|
||||
|
||||
if (i == class->instances)
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
|
||||
|
||||
/**
|
||||
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
* @pf: Corresponding PF if the accel_dev is a VF
|
||||
*
|
||||
* Function adds acceleration device to the acceleration framework.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
|
||||
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
|
||||
struct adf_accel_dev *pf)
|
||||
{
|
||||
struct list_head *itr;
|
||||
int ret = 0;
|
||||
|
||||
if (num_devices == ADF_MAX_DEVICES) {
|
||||
dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
|
||||
@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
|
||||
}
|
||||
|
||||
mutex_lock(&table_lock);
|
||||
list_for_each(itr, &accel_table) {
|
||||
struct adf_accel_dev *ptr =
|
||||
atomic_set(&accel_dev->ref_count, 0);
|
||||
|
||||
/* PF on host or VF on guest */
|
||||
if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
|
||||
struct vf_id_map *map;
|
||||
|
||||
list_for_each(itr, &accel_table) {
|
||||
struct adf_accel_dev *ptr =
|
||||
list_entry(itr, struct adf_accel_dev, list);
|
||||
|
||||
if (ptr == accel_dev) {
|
||||
mutex_unlock(&table_lock);
|
||||
return -EEXIST;
|
||||
if (ptr == accel_dev) {
|
||||
ret = -EEXIST;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail(&accel_dev->list, &accel_table);
|
||||
accel_dev->accel_id = num_devices++;
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
if (!map) {
|
||||
ret = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
map->bdf = ~0;
|
||||
map->id = accel_dev->accel_id;
|
||||
map->fake_id = map->id;
|
||||
map->attached = true;
|
||||
list_add_tail(&map->list, &vfs_table);
|
||||
} else if (accel_dev->is_vf && pf) {
|
||||
/* VF on host */
|
||||
struct adf_accel_vf_info *vf_info;
|
||||
struct vf_id_map *map;
|
||||
|
||||
vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
|
||||
|
||||
map = adf_find_vf(adf_get_vf_num(accel_dev));
|
||||
if (map) {
|
||||
struct vf_id_map *next;
|
||||
|
||||
accel_dev->accel_id = map->id;
|
||||
list_add_tail(&accel_dev->list, &accel_table);
|
||||
map->fake_id++;
|
||||
map->attached = true;
|
||||
next = list_next_entry(map, list);
|
||||
while (next && &next->list != &vfs_table) {
|
||||
next->fake_id++;
|
||||
next = list_next_entry(next, list);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
if (!map) {
|
||||
ret = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
accel_dev->accel_id = num_devices++;
|
||||
list_add_tail(&accel_dev->list, &accel_table);
|
||||
map->bdf = adf_get_vf_num(accel_dev);
|
||||
map->id = accel_dev->accel_id;
|
||||
map->fake_id = map->id;
|
||||
map->attached = true;
|
||||
list_add_tail(&map->list, &vfs_table);
|
||||
}
|
||||
atomic_set(&accel_dev->ref_count, 0);
|
||||
list_add_tail(&accel_dev->list, &accel_table);
|
||||
accel_dev->accel_id = num_devices++;
|
||||
unlock:
|
||||
mutex_unlock(&table_lock);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
|
||||
|
||||
@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void)
|
||||
/**
|
||||
* adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
* @pf: Corresponding PF if the accel_dev is a VF
|
||||
*
|
||||
* Function removes acceleration device from the acceleration framework.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
|
||||
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
|
||||
struct adf_accel_dev *pf)
|
||||
{
|
||||
mutex_lock(&table_lock);
|
||||
if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
|
||||
num_devices--;
|
||||
} else if (accel_dev->is_vf && pf) {
|
||||
struct vf_id_map *map, *next;
|
||||
|
||||
map = adf_find_vf(adf_get_vf_num(accel_dev));
|
||||
if (!map) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
|
||||
goto unlock;
|
||||
}
|
||||
map->fake_id--;
|
||||
map->attached = false;
|
||||
next = list_next_entry(map, list);
|
||||
while (next && &next->list != &vfs_table) {
|
||||
next->fake_id--;
|
||||
next = list_next_entry(next, list);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
list_del(&accel_dev->list);
|
||||
num_devices--;
|
||||
mutex_unlock(&table_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
|
||||
@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
|
||||
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
|
||||
{
|
||||
struct list_head *itr;
|
||||
int real_id;
|
||||
|
||||
mutex_lock(&table_lock);
|
||||
real_id = adf_get_vf_real_id(id);
|
||||
if (real_id < 0)
|
||||
goto unlock;
|
||||
|
||||
id = real_id;
|
||||
|
||||
list_for_each(itr, &accel_table) {
|
||||
struct adf_accel_dev *ptr =
|
||||
list_entry(itr, struct adf_accel_dev, list);
|
||||
|
||||
if (ptr->accel_id == id) {
|
||||
mutex_unlock(&table_lock);
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&table_lock);
|
||||
return NULL;
|
||||
}
|
||||
@ -180,21 +368,52 @@ int adf_devmgr_verify_id(uint32_t id)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
void adf_devmgr_get_num_dev(uint32_t *num)
|
||||
static int adf_get_num_dettached_vfs(void)
|
||||
{
|
||||
struct list_head *itr;
|
||||
int vfs = 0;
|
||||
|
||||
*num = 0;
|
||||
list_for_each(itr, &accel_table) {
|
||||
(*num)++;
|
||||
mutex_lock(&table_lock);
|
||||
list_for_each(itr, &vfs_table) {
|
||||
struct vf_id_map *ptr =
|
||||
list_entry(itr, struct vf_id_map, list);
|
||||
if (ptr->bdf != ~0 && !ptr->attached)
|
||||
vfs++;
|
||||
}
|
||||
mutex_unlock(&table_lock);
|
||||
return vfs;
|
||||
}
|
||||
|
||||
void adf_devmgr_get_num_dev(uint32_t *num)
|
||||
{
|
||||
*num = num_devices - adf_get_num_dettached_vfs();
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_dev_in_use() - Check whether accel_dev is currently in use
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 1 when device is in use, 0 otherwise.
|
||||
*/
|
||||
int adf_dev_in_use(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return atomic_read(&accel_dev->ref_count) != 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_dev_in_use);
|
||||
|
||||
/**
|
||||
* adf_dev_get() - Increment accel_dev reference count
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Increment the accel_dev refcount and if this is the first time
|
||||
* incrementing it during this period the accel_dev is in use,
|
||||
* increment the module refcount too.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 when successful, EFAULT when fail to bump module refcount
|
||||
*/
|
||||
int adf_dev_get(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
|
||||
@ -202,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_dev_get);
|
||||
|
||||
/**
|
||||
* adf_dev_put() - Decrement accel_dev reference count
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Decrement the accel_dev refcount and if this is the last time
|
||||
* decrementing it during this period the accel_dev is in use,
|
||||
* decrement the module refcount too.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void adf_dev_put(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
|
||||
module_put(accel_dev->owner);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_dev_put);
|
||||
|
||||
/**
|
||||
* adf_devmgr_in_reset() - Check whether device is in reset
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 1 when the device is being reset, 0 otherwise.
|
||||
*/
|
||||
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
|
||||
|
||||
/**
|
||||
* adf_dev_started() - Check whether device has started
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 1 when the device has started, 0 otherwise
|
||||
*/
|
||||
int adf_dev_started(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_dev_started);
|
||||
|
@ -44,9 +44,8 @@
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <adf_accel_devices.h>
|
||||
#include <adf_transport_internal.h>
|
||||
#include "adf_drv.h"
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_transport_internal.h"
|
||||
|
||||
#define ADF_ARB_NUM 4
|
||||
#define ADF_ARB_REQ_RING_NUM 8
|
||||
@ -58,7 +57,6 @@
|
||||
#define ADF_ARB_RO_EN_OFFSET 0x090
|
||||
#define ADF_ARB_WQCFG_OFFSET 0x100
|
||||
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
|
||||
#define ADF_ARB_WRK_2_SER_MAP 10
|
||||
#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
|
||||
|
||||
#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
|
||||
@ -89,10 +87,11 @@
|
||||
|
||||
int adf_init_arb(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
|
||||
uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
|
||||
uint32_t arb, i;
|
||||
const uint32_t *thd_2_arb_cfg;
|
||||
u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
|
||||
u32 arb, i;
|
||||
const u32 *thd_2_arb_cfg;
|
||||
|
||||
/* Service arb configured for 32 bytes responses and
|
||||
* ring flow control check enabled. */
|
||||
@ -109,30 +108,39 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
|
||||
WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
|
||||
|
||||
/* Setup worker queue registers */
|
||||
for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
|
||||
for (i = 0; i < hw_data->num_engines; i++)
|
||||
WRITE_CSR_ARB_WQCFG(csr, i, i);
|
||||
|
||||
/* Map worker threads to service arbiters */
|
||||
adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
|
||||
hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
|
||||
|
||||
if (!thd_2_arb_cfg)
|
||||
return -EFAULT;
|
||||
|
||||
for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
|
||||
for (i = 0; i < hw_data->num_engines; i++)
|
||||
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_init_arb);
|
||||
|
||||
void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
|
||||
/**
|
||||
* adf_update_ring_arb() - update ring arbitration rgister
|
||||
* @accel_dev: Pointer to ring data.
|
||||
*
|
||||
* Function enables or disables rings for/from arbitration.
|
||||
*/
|
||||
void adf_update_ring_arb(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
|
||||
ring->bank->bank_number,
|
||||
ring->bank->ring_mask & 0xFF);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_update_ring_arb);
|
||||
|
||||
void adf_exit_arb(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
void __iomem *csr;
|
||||
unsigned int i;
|
||||
|
||||
@ -146,14 +154,15 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
|
||||
WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
|
||||
|
||||
/* Shutdown work queue */
|
||||
for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
|
||||
for (i = 0; i < hw_data->num_engines; i++)
|
||||
WRITE_CSR_ARB_WQCFG(csr, i, 0);
|
||||
|
||||
/* Unmap worker threads to service arbiters */
|
||||
for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
|
||||
for (i = 0; i < hw_data->num_engines; i++)
|
||||
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
|
||||
|
||||
/* Disable arbitration on all rings */
|
||||
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
|
||||
WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_exit_arb);
|
@ -69,7 +69,7 @@ static void adf_service_add(struct service_hndl *service)
|
||||
* Function adds the acceleration service to the acceleration framework.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_service_register(struct service_hndl *service)
|
||||
{
|
||||
@ -94,7 +94,7 @@ static void adf_service_remove(struct service_hndl *service)
|
||||
* Function remove the acceleration service from the acceleration framework.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_service_unregister(struct service_hndl *service)
|
||||
{
|
||||
@ -114,7 +114,7 @@ EXPORT_SYMBOL_GPL(adf_service_unregister);
|
||||
* Initialize the ring data structures and the admin comms and arbitration
|
||||
* services.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_dev_init(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
@ -177,20 +177,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
|
||||
*/
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to initialise service %s\n",
|
||||
service->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(accel_dev->accel_id, &service->init_status);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to initialise service %s\n",
|
||||
@ -201,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
|
||||
}
|
||||
|
||||
hw_data->enable_error_correction(accel_dev);
|
||||
hw_data->enable_vf2pf_comms(accel_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -214,10 +201,11 @@ EXPORT_SYMBOL_GPL(adf_dev_init);
|
||||
* is ready to be used.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_dev_start(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct service_hndl *service;
|
||||
struct list_head *list_itr;
|
||||
|
||||
@ -229,22 +217,13 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
|
||||
}
|
||||
set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to start service %s\n",
|
||||
service->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(accel_dev->accel_id, &service->start_status);
|
||||
if (hw_data->send_admin_init(accel_dev)) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to start service %s\n",
|
||||
@ -257,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
|
||||
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
||||
|
||||
if (qat_algs_register()) {
|
||||
if (!list_empty(&accel_dev->crypto_list) &&
|
||||
(qat_algs_register() || qat_asym_algs_register())) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to register crypto algs\n");
|
||||
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||
@ -276,7 +256,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
|
||||
* is shuting down.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
@ -292,14 +272,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
||||
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
||||
|
||||
if (qat_algs_unregister())
|
||||
if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister())
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to unregister crypto algs\n");
|
||||
|
||||
if (!list_empty(&accel_dev->crypto_list))
|
||||
qat_asym_algs_unregister();
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (!test_bit(accel_dev->accel_id, &service->start_status))
|
||||
continue;
|
||||
ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
|
||||
@ -310,19 +291,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
||||
clear_bit(accel_dev->accel_id, &service->start_status);
|
||||
}
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (!test_bit(accel_dev->accel_id, &service->start_status))
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_STOP))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to shutdown service %s\n",
|
||||
service->name);
|
||||
else
|
||||
clear_bit(accel_dev->accel_id, &service->start_status);
|
||||
}
|
||||
|
||||
if (wait)
|
||||
msleep(100);
|
||||
@ -373,21 +341,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (!test_bit(accel_dev->accel_id, &service->init_status))
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to shutdown service %s\n",
|
||||
service->name);
|
||||
else
|
||||
clear_bit(accel_dev->accel_id, &service->init_status);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (!test_bit(accel_dev->accel_id, &service->init_status))
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
|
||||
@ -413,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
|
||||
if (hw_data->exit_admin_comms)
|
||||
hw_data->exit_admin_comms(accel_dev);
|
||||
|
||||
hw_data->disable_iov(accel_dev);
|
||||
adf_cleanup_etr_data(accel_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_dev_shutdown);
|
||||
@ -424,17 +378,6 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to restart service %s.\n",
|
||||
service->name);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to restart service %s.\n",
|
||||
@ -450,17 +393,6 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to restart service %s.\n",
|
||||
service->name);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to restart service %s.\n",
|
||||
|
438
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
Normal file
438
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
Normal file
@ -0,0 +1,438 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2015 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2015 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/delay.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_common_drv.h"
|
||||
#include "adf_pf2vf_msg.h"
|
||||
|
||||
#define ADF_DH895XCC_EP_OFFSET 0x3A000
|
||||
#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
|
||||
#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
|
||||
#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
|
||||
#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
|
||||
|
||||
/**
|
||||
* adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function enables PF to VF interrupts
|
||||
*/
|
||||
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
void __iomem *pmisc_bar_addr =
|
||||
pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
|
||||
|
||||
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
|
||||
|
||||
/**
|
||||
* adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function disables PF to VF interrupts
|
||||
*/
|
||||
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
void __iomem *pmisc_bar_addr =
|
||||
pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
|
||||
|
||||
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
|
||||
|
||||
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
||||
u32 vf_mask)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_bar *pmisc =
|
||||
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
u32 reg;
|
||||
|
||||
/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
|
||||
if (vf_mask & 0xFFFF) {
|
||||
reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
|
||||
reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
|
||||
}
|
||||
|
||||
/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
|
||||
if (vf_mask >> 16) {
|
||||
reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
|
||||
reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function disables VF to PF interrupts
|
||||
*/
|
||||
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_bar *pmisc =
|
||||
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
u32 reg;
|
||||
|
||||
/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
|
||||
if (vf_mask & 0xFFFF) {
|
||||
reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
|
||||
ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
|
||||
}
|
||||
|
||||
/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
|
||||
if (vf_mask >> 16) {
|
||||
reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
|
||||
ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
|
||||
|
||||
static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
|
||||
{
|
||||
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
void __iomem *pmisc_bar_addr =
|
||||
pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
|
||||
u32 val, pf2vf_offset, count = 0;
|
||||
u32 local_in_use_mask, local_in_use_pattern;
|
||||
u32 remote_in_use_mask, remote_in_use_pattern;
|
||||
struct mutex *lock; /* lock preventing concurrent acces of CSR */
|
||||
u32 int_bit;
|
||||
int ret = 0;
|
||||
|
||||
if (accel_dev->is_vf) {
|
||||
pf2vf_offset = hw_data->get_pf2vf_offset(0);
|
||||
lock = &accel_dev->vf.vf2pf_lock;
|
||||
local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
|
||||
local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
|
||||
remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
|
||||
remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
|
||||
int_bit = ADF_VF2PF_INT;
|
||||
} else {
|
||||
pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
|
||||
lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
|
||||
local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
|
||||
local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
|
||||
remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
|
||||
remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
|
||||
int_bit = ADF_PF2VF_INT;
|
||||
}
|
||||
|
||||
mutex_lock(lock);
|
||||
|
||||
/* Check if PF2VF CSR is in use by remote function */
|
||||
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
|
||||
if ((val & remote_in_use_mask) == remote_in_use_pattern) {
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"PF2VF CSR in use by remote function\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Attempt to get ownership of PF2VF CSR */
|
||||
msg &= ~local_in_use_mask;
|
||||
msg |= local_in_use_pattern;
|
||||
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
|
||||
|
||||
/* Wait in case remote func also attempting to get ownership */
|
||||
msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
|
||||
|
||||
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
|
||||
if ((val & local_in_use_mask) != local_in_use_pattern) {
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"PF2VF CSR in use by remote - collision detected\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function now owns the PV2VF CSR. The IN_USE_BY pattern must
|
||||
* remain in the PF2VF CSR for all writes including ACK from remote
|
||||
* until this local function relinquishes the CSR. Send the message
|
||||
* by interrupting the remote.
|
||||
*/
|
||||
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
|
||||
|
||||
/* Wait for confirmation from remote func it received the message */
|
||||
do {
|
||||
msleep(ADF_IOV_MSG_ACK_DELAY);
|
||||
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
|
||||
} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
|
||||
|
||||
if (val & int_bit) {
|
||||
dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
|
||||
val &= ~int_bit;
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
|
||||
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
|
||||
out:
|
||||
mutex_unlock(lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_iov_putmsg() - send PF2VF message
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
* @msg: Message to send
|
||||
* @vf_nr: VF number to which the message will be sent
|
||||
*
|
||||
* Function sends a messge from the PF to a VF
|
||||
*
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
|
||||
{
|
||||
u32 count = 0;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
|
||||
if (ret)
|
||||
msleep(ADF_IOV_MSG_RETRY_DELAY);
|
||||
} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_iov_putmsg);
|
||||
|
||||
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev = vf_info->accel_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
int bar_id = hw_data->get_misc_bar_id(hw_data);
|
||||
struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
|
||||
|
||||
/* Read message from the VF */
|
||||
msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
|
||||
|
||||
/* To ACK, clear the VF2PFINT bit */
|
||||
msg &= ~ADF_VF2PF_INT;
|
||||
ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
|
||||
|
||||
if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
|
||||
/* Ignore legacy non-system (non-kernel) VF2PF messages */
|
||||
goto err;
|
||||
|
||||
switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
|
||||
case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
|
||||
{
|
||||
u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
|
||||
|
||||
resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
|
||||
(ADF_PF2VF_MSGTYPE_VERSION_RESP <<
|
||||
ADF_PF2VF_MSGTYPE_SHIFT) |
|
||||
(ADF_PFVF_COMPATIBILITY_VERSION <<
|
||||
ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
|
||||
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"Compatibility Version Request from VF%d vers=%u\n",
|
||||
vf_nr + 1, vf_compat_ver);
|
||||
|
||||
if (vf_compat_ver < hw_data->min_iov_compat_ver) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"VF (vers %d) incompatible with PF (vers %d)\n",
|
||||
vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
|
||||
resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
|
||||
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
|
||||
} else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"VF (vers %d) compat with PF (vers %d) unkn.\n",
|
||||
vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
|
||||
resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
|
||||
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
|
||||
} else {
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"VF (vers %d) compatible with PF (vers %d)\n",
|
||||
vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
|
||||
resp |= ADF_PF2VF_VF_COMPATIBLE <<
|
||||
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case ADF_VF2PF_MSGTYPE_VERSION_REQ:
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"Legacy VersionRequest received from VF%d 0x%x\n",
|
||||
vf_nr + 1, msg);
|
||||
resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
|
||||
(ADF_PF2VF_MSGTYPE_VERSION_RESP <<
|
||||
ADF_PF2VF_MSGTYPE_SHIFT) |
|
||||
(ADF_PFVF_COMPATIBILITY_VERSION <<
|
||||
ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
|
||||
resp |= ADF_PF2VF_VF_COMPATIBLE <<
|
||||
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
|
||||
/* Set legacy major and minor version num */
|
||||
resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
|
||||
1 << ADF_PF2VF_MINORVERSION_SHIFT;
|
||||
break;
|
||||
case ADF_VF2PF_MSGTYPE_INIT:
|
||||
{
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"Init message received from VF%d 0x%x\n",
|
||||
vf_nr + 1, msg);
|
||||
vf_info->init = true;
|
||||
}
|
||||
break;
|
||||
case ADF_VF2PF_MSGTYPE_SHUTDOWN:
|
||||
{
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"Shutdown message received from VF%d 0x%x\n",
|
||||
vf_nr + 1, msg);
|
||||
vf_info->init = false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
|
||||
|
||||
/* re-enable interrupt on PF from this VF */
|
||||
adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
|
||||
return;
|
||||
err:
|
||||
dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
|
||||
vf_nr + 1, msg);
|
||||
}
|
||||
|
||||
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_vf_info *vf;
|
||||
u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
|
||||
(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
|
||||
int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
|
||||
|
||||
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
|
||||
if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to send restarting msg to VF%d\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
u32 msg = 0;
|
||||
int ret;
|
||||
|
||||
msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
|
||||
msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
|
||||
msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
|
||||
BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
|
||||
|
||||
/* Send request from VF to PF */
|
||||
ret = adf_iov_putmsg(accel_dev, msg, 0);
|
||||
if (ret) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to send Compatibility Version Request.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Wait for response */
|
||||
if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
|
||||
timeout)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"IOV request/response message timeout expired\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Response from PF received, check compatibility */
|
||||
switch (accel_dev->vf.compatible) {
|
||||
case ADF_PF2VF_VF_COMPATIBLE:
|
||||
break;
|
||||
case ADF_PF2VF_VF_COMPAT_UNKNOWN:
|
||||
/* VF is newer than PF and decides whether it is compatible */
|
||||
if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
|
||||
break;
|
||||
/* fall through */
|
||||
case ADF_PF2VF_VF_INCOMPATIBLE:
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"PF (vers %d) and VF (vers %d) are not compatible\n",
|
||||
accel_dev->vf.pf_version,
|
||||
ADF_PFVF_COMPATIBILITY_VERSION);
|
||||
return -EINVAL;
|
||||
default:
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Invalid response from PF; assume not compatible\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_enable_vf2pf_comms() - Function enables communication from vf to pf
|
||||
*
|
||||
* @accel_dev: Pointer to acceleration device virtual function.
|
||||
*
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
adf_enable_pf2vf_interrupts(accel_dev);
|
||||
return adf_vf2pf_request_version(accel_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
|
146
drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
Normal file
146
drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
Normal file
@ -0,0 +1,146 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2015 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2015 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_PF2VF_MSG_H
|
||||
#define ADF_PF2VF_MSG_H
|
||||
|
||||
/*
|
||||
* PF<->VF Messaging
|
||||
* The PF has an array of 32-bit PF2VF registers, one for each VF. The
|
||||
* PF can access all these registers; each VF can access only the one
|
||||
* register associated with that particular VF.
|
||||
*
|
||||
* The register functionally is split into two parts:
|
||||
* The bottom half is for PF->VF messages. In particular when the first
|
||||
* bit of this register (bit 0) gets set an interrupt will be triggered
|
||||
* in the respective VF.
|
||||
* The top half is for VF->PF messages. In particular when the first bit
|
||||
* of this half of register (bit 16) gets set an interrupt will be triggered
|
||||
* in the PF.
|
||||
*
|
||||
* The remaining bits within this register are available to encode messages.
|
||||
* and implement a collision control mechanism to prevent concurrent use of
|
||||
* the PF2VF register by both the PF and VF.
|
||||
*
|
||||
* 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
|
||||
* _______________________________________________
|
||||
* | | | | | | | | | | | | | | | | |
|
||||
* +-----------------------------------------------+
|
||||
* \___________________________/ \_________/ ^ ^
|
||||
* ^ ^ | |
|
||||
* | | | VF2PF Int
|
||||
* | | Message Origin
|
||||
* | Message Type
|
||||
* Message-specific Data/Reserved
|
||||
*
|
||||
* 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
|
||||
* _______________________________________________
|
||||
* | | | | | | | | | | | | | | | | |
|
||||
* +-----------------------------------------------+
|
||||
* \___________________________/ \_________/ ^ ^
|
||||
* ^ ^ | |
|
||||
* | | | PF2VF Int
|
||||
* | | Message Origin
|
||||
* | Message Type
|
||||
* Message-specific Data/Reserved
|
||||
*
|
||||
* Message Origin (Should always be 1)
|
||||
* A legacy out-of-tree QAT driver allowed for a set of messages not supported
|
||||
* by this driver; these had a Msg Origin of 0 and are ignored by this driver.
|
||||
*
|
||||
* When a PF or VF attempts to send a message in the lower or upper 16 bits,
|
||||
* respectively, the other 16 bits are written to first with a defined
|
||||
* IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
|
||||
*/
|
||||
|
||||
#define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */
|
||||
|
||||
/* PF->VF messages */
|
||||
#define ADF_PF2VF_INT BIT(0)
|
||||
#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1)
|
||||
#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C
|
||||
#define ADF_PF2VF_MSGTYPE_SHIFT 2
|
||||
#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01
|
||||
#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02
|
||||
#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000
|
||||
#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000
|
||||
|
||||
/* PF->VF Version Response */
|
||||
#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0
|
||||
#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6
|
||||
#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000
|
||||
#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14
|
||||
#define ADF_PF2VF_MINORVERSION_SHIFT 6
|
||||
#define ADF_PF2VF_MAJORVERSION_SHIFT 10
|
||||
#define ADF_PF2VF_VF_COMPATIBLE 1
|
||||
#define ADF_PF2VF_VF_INCOMPATIBLE 2
|
||||
#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3
|
||||
|
||||
/* VF->PF messages */
|
||||
#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2
|
||||
#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE
|
||||
#define ADF_VF2PF_INT BIT(16)
|
||||
#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17)
|
||||
#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000
|
||||
#define ADF_VF2PF_MSGTYPE_SHIFT 18
|
||||
#define ADF_VF2PF_MSGTYPE_INIT 0x3
|
||||
#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4
|
||||
#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5
|
||||
#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6
|
||||
|
||||
/* VF->PF Compatible Version Request */
|
||||
#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
|
||||
|
||||
/* Collision detection */
|
||||
#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10
|
||||
#define ADF_IOV_MSG_ACK_DELAY 2
|
||||
#define ADF_IOV_MSG_ACK_MAX_RETRY 100
|
||||
#define ADF_IOV_MSG_RETRY_DELAY 5
|
||||
#define ADF_IOV_MSG_MAX_RETRIES 3
|
||||
#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \
|
||||
ADF_IOV_MSG_ACK_MAX_RETRY + \
|
||||
ADF_IOV_MSG_COLLISION_DETECT_DELAY)
|
||||
#endif /* ADF_IOV_MSG_H */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user