mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Delay boot-up self-test for built-in algorithms Algorithms: - Remove fallback path on arm64 as SIMD now runs with softirq off Drivers: - Add Keem Bay OCS ECC Driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (61 commits) crypto: testmgr - fix wrong key length for pkcs1pad crypto: pcrypt - Delay write to padata->info crypto: ccp - Make use of the helper macro kthread_run() crypto: sa2ul - Use the defined variable to clean code crypto: s5p-sss - Add error handling in s5p_aes_probe() crypto: keembay-ocs-ecc - Add Keem Bay OCS ECC Driver dt-bindings: crypto: Add Keem Bay ECC bindings crypto: ecc - Export additional helper functions crypto: ecc - Move ecc.h to include/crypto/internal crypto: engine - Add KPP Support to Crypto Engine crypto: api - Do not create test larvals if manager is disabled crypto: tcrypt - fix skcipher multi-buffer tests for 1420B blocks hwrng: s390 - replace snprintf in show functions with sysfs_emit crypto: octeontx2 - set assoclen in aead_do_fallback() crypto: ccp - Fix whitespace in sev_cmd_buffer_len() hwrng: mtk - Force runtime pm ops for sleep ops crypto: testmgr - Only disable migration in crypto_disable_simd_for_test() crypto: qat - share adf_enable_pf2vf_comms() from adf_pf2vf_msg.c crypto: qat - extract send and wait from adf_vf2pf_request_version() crypto: qat - add VF and PF wrappers to common send function ...
This commit is contained in:
commit
bfc484fe6a
@ -69,6 +69,8 @@ the crypto engine via one of:
|
||||
|
||||
* crypto_transfer_hash_request_to_engine()
|
||||
|
||||
* crypto_transfer_kpp_request_to_engine()
|
||||
|
||||
* crypto_transfer_skcipher_request_to_engine()
|
||||
|
||||
At the end of the request process, a call to one of the following functions is needed:
|
||||
@ -79,4 +81,6 @@ At the end of the request process, a call to one of the following functions is n
|
||||
|
||||
* crypto_finalize_hash_request()
|
||||
|
||||
* crypto_finalize_kpp_request()
|
||||
|
||||
* crypto_finalize_skcipher_request()
|
||||
|
@ -0,0 +1,47 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/crypto/intel,keembay-ocs-ecc.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Intel Keem Bay OCS ECC Device Tree Bindings
|
||||
|
||||
maintainers:
|
||||
- Daniele Alessandrelli <daniele.alessandrelli@intel.com>
|
||||
- Prabhjot Khurana <prabhjot.khurana@intel.com>
|
||||
|
||||
description:
|
||||
The Intel Keem Bay Offload and Crypto Subsystem (OCS) Elliptic Curve
|
||||
Cryptography (ECC) device provides hardware acceleration for elliptic curve
|
||||
cryptography using the NIST P-256 and NIST P-384 elliptic curves.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: intel,keembay-ocs-ecc
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
crypto@30001000 {
|
||||
compatible = "intel,keembay-ocs-ecc";
|
||||
reg = <0x30001000 0x1000>;
|
||||
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&scmi_clk 95>;
|
||||
};
|
11
MAINTAINERS
11
MAINTAINERS
@ -9556,6 +9556,17 @@ F: drivers/crypto/keembay/keembay-ocs-aes-core.c
|
||||
F: drivers/crypto/keembay/ocs-aes.c
|
||||
F: drivers/crypto/keembay/ocs-aes.h
|
||||
|
||||
INTEL KEEM BAY OCS ECC CRYPTO DRIVER
|
||||
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
|
||||
M: Prabhjot Khurana <prabhjot.khurana@intel.com>
|
||||
M: Mark Gross <mgross@linux.intel.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-ecc.yaml
|
||||
F: drivers/crypto/keembay/Kconfig
|
||||
F: drivers/crypto/keembay/Makefile
|
||||
F: drivers/crypto/keembay/keembay-ocs-ecc.c
|
||||
F: drivers/crypto/keembay/ocs-ecc-curve-defs.h
|
||||
|
||||
INTEL KEEM BAY OCS HCU CRYPTO DRIVER
|
||||
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
|
||||
M: Declan Murphy <declan.murphy@intel.com>
|
||||
|
@ -88,16 +88,12 @@ config CRYPTO_AES_ARM64_CE_BLK
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_AES_ARM64_CE
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_SIMD
|
||||
|
||||
config CRYPTO_AES_ARM64_NEON_BLK
|
||||
tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_SIMD
|
||||
|
||||
config CRYPTO_CHACHA20_NEON
|
||||
tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions"
|
||||
@ -122,8 +118,6 @@ config CRYPTO_AES_ARM64_BS
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_AES_ARM64_NEON_BLK
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_SIMD
|
||||
|
||||
endif
|
||||
|
@ -12,22 +12,21 @@
|
||||
.arch armv8-a+crypto
|
||||
|
||||
/*
|
||||
* void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
|
||||
* u32 *macp, u8 const rk[], u32 rounds);
|
||||
* u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
|
||||
* u32 macp, u8 const rk[], u32 rounds);
|
||||
*/
|
||||
SYM_FUNC_START(ce_aes_ccm_auth_data)
|
||||
ldr w8, [x3] /* leftover from prev round? */
|
||||
ld1 {v0.16b}, [x0] /* load mac */
|
||||
cbz w8, 1f
|
||||
sub w8, w8, #16
|
||||
cbz w3, 1f
|
||||
sub w3, w3, #16
|
||||
eor v1.16b, v1.16b, v1.16b
|
||||
0: ldrb w7, [x1], #1 /* get 1 byte of input */
|
||||
subs w2, w2, #1
|
||||
add w8, w8, #1
|
||||
add w3, w3, #1
|
||||
ins v1.b[0], w7
|
||||
ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
|
||||
beq 8f /* out of input? */
|
||||
cbnz w8, 0b
|
||||
cbnz w3, 0b
|
||||
eor v0.16b, v0.16b, v1.16b
|
||||
1: ld1 {v3.4s}, [x4] /* load first round key */
|
||||
prfm pldl1strm, [x1]
|
||||
@ -62,7 +61,7 @@ SYM_FUNC_START(ce_aes_ccm_auth_data)
|
||||
beq 10f
|
||||
adds w2, w2, #16
|
||||
beq 10f
|
||||
mov w8, w2
|
||||
mov w3, w2
|
||||
7: ldrb w7, [x1], #1
|
||||
umov w6, v0.b[0]
|
||||
eor w6, w6, w7
|
||||
@ -71,15 +70,15 @@ SYM_FUNC_START(ce_aes_ccm_auth_data)
|
||||
beq 10f
|
||||
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
|
||||
b 7b
|
||||
8: cbz w8, 91f
|
||||
mov w7, w8
|
||||
add w8, w8, #16
|
||||
8: cbz w3, 91f
|
||||
mov w7, w3
|
||||
add w3, w3, #16
|
||||
9: ext v1.16b, v1.16b, v1.16b, #1
|
||||
adds w7, w7, #1
|
||||
bne 9b
|
||||
91: eor v0.16b, v0.16b, v1.16b
|
||||
st1 {v0.16b}, [x0]
|
||||
10: str w8, [x3]
|
||||
10: mov w0, w3
|
||||
ret
|
||||
SYM_FUNC_END(ce_aes_ccm_auth_data)
|
||||
|
||||
@ -124,6 +123,7 @@ SYM_FUNC_START(ce_aes_ccm_final)
|
||||
SYM_FUNC_END(ce_aes_ccm_final)
|
||||
|
||||
.macro aes_ccm_do_crypt,enc
|
||||
cbz x2, 5f
|
||||
ldr x8, [x6, #8] /* load lower ctr */
|
||||
ld1 {v0.16b}, [x5] /* load mac */
|
||||
CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
|
||||
|
@ -6,12 +6,10 @@
|
||||
*/
|
||||
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
@ -29,8 +27,8 @@ static int num_rounds(struct crypto_aes_ctx *ctx)
|
||||
return 6 + ctx->key_length / 4;
|
||||
}
|
||||
|
||||
asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
|
||||
u32 *macp, u32 const rk[], u32 rounds);
|
||||
asmlinkage u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
|
||||
u32 macp, u32 const rk[], u32 rounds);
|
||||
|
||||
asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
|
||||
u32 const rk[], u32 rounds, u8 mac[],
|
||||
@ -96,41 +94,6 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
|
||||
u32 abytes, u32 *macp)
|
||||
{
|
||||
if (crypto_simd_usable()) {
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
|
||||
num_rounds(key));
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
|
||||
int added = min(abytes, AES_BLOCK_SIZE - *macp);
|
||||
|
||||
crypto_xor(&mac[*macp], in, added);
|
||||
|
||||
*macp += added;
|
||||
in += added;
|
||||
abytes -= added;
|
||||
}
|
||||
|
||||
while (abytes >= AES_BLOCK_SIZE) {
|
||||
aes_encrypt(key, mac, mac);
|
||||
crypto_xor(mac, in, AES_BLOCK_SIZE);
|
||||
|
||||
in += AES_BLOCK_SIZE;
|
||||
abytes -= AES_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (abytes > 0) {
|
||||
aes_encrypt(key, mac, mac);
|
||||
crypto_xor(mac, in, abytes);
|
||||
*macp = abytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
@ -150,7 +113,8 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||
ltag.len = 6;
|
||||
}
|
||||
|
||||
ccm_update_mac(ctx, mac, (u8 *)<ag, ltag.len, &macp);
|
||||
macp = ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, macp,
|
||||
ctx->key_enc, num_rounds(ctx));
|
||||
scatterwalk_start(&walk, req->src);
|
||||
|
||||
do {
|
||||
@ -161,8 +125,16 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||
scatterwalk_start(&walk, sg_next(walk.sg));
|
||||
n = scatterwalk_clamp(&walk, len);
|
||||
}
|
||||
n = min_t(u32, n, SZ_4K); /* yield NEON at least every 4k */
|
||||
p = scatterwalk_map(&walk);
|
||||
ccm_update_mac(ctx, mac, p, n, &macp);
|
||||
|
||||
macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc,
|
||||
num_rounds(ctx));
|
||||
|
||||
if (len / SZ_4K > (len - n) / SZ_4K) {
|
||||
kernel_neon_end();
|
||||
kernel_neon_begin();
|
||||
}
|
||||
len -= n;
|
||||
|
||||
scatterwalk_unmap(p);
|
||||
@ -171,54 +143,6 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||
} while (len);
|
||||
}
|
||||
|
||||
static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
|
||||
struct crypto_aes_ctx *ctx, bool enc)
|
||||
{
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
int err = 0;
|
||||
|
||||
while (walk->nbytes) {
|
||||
int blocks = walk->nbytes / AES_BLOCK_SIZE;
|
||||
u32 tail = walk->nbytes % AES_BLOCK_SIZE;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u32 nbytes = walk->nbytes;
|
||||
|
||||
if (nbytes == walk->total && tail > 0) {
|
||||
blocks++;
|
||||
tail = 0;
|
||||
}
|
||||
|
||||
do {
|
||||
u32 bsize = AES_BLOCK_SIZE;
|
||||
|
||||
if (nbytes < AES_BLOCK_SIZE)
|
||||
bsize = nbytes;
|
||||
|
||||
crypto_inc(walk->iv, AES_BLOCK_SIZE);
|
||||
aes_encrypt(ctx, buf, walk->iv);
|
||||
aes_encrypt(ctx, mac, mac);
|
||||
if (enc)
|
||||
crypto_xor(mac, src, bsize);
|
||||
crypto_xor_cpy(dst, src, buf, bsize);
|
||||
if (!enc)
|
||||
crypto_xor(mac, dst, bsize);
|
||||
dst += bsize;
|
||||
src += bsize;
|
||||
nbytes -= bsize;
|
||||
} while (--blocks);
|
||||
|
||||
err = skcipher_walk_done(walk, tail);
|
||||
}
|
||||
|
||||
if (!err) {
|
||||
aes_encrypt(ctx, buf, iv0);
|
||||
aes_encrypt(ctx, mac, mac);
|
||||
crypto_xor(mac, buf, AES_BLOCK_SIZE);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ccm_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
@ -233,42 +157,42 @@ static int ccm_encrypt(struct aead_request *req)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (req->assoclen)
|
||||
ccm_calculate_auth_mac(req, mac);
|
||||
|
||||
/* preserve the original iv for the final round */
|
||||
memcpy(buf, req->iv, AES_BLOCK_SIZE);
|
||||
|
||||
err = skcipher_walk_aead_encrypt(&walk, req, false);
|
||||
|
||||
if (crypto_simd_usable()) {
|
||||
while (walk.nbytes) {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes == walk.total)
|
||||
tail = 0;
|
||||
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_encrypt(walk.dst.virt.addr,
|
||||
walk.src.virt.addr,
|
||||
walk.nbytes - tail, ctx->key_enc,
|
||||
num_rounds(ctx), mac, walk.iv);
|
||||
kernel_neon_end();
|
||||
|
||||
err = skcipher_walk_done(&walk, tail);
|
||||
}
|
||||
if (!err) {
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_final(mac, buf, ctx->key_enc,
|
||||
num_rounds(ctx));
|
||||
kernel_neon_end();
|
||||
}
|
||||
} else {
|
||||
err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
|
||||
}
|
||||
if (err)
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
kernel_neon_begin();
|
||||
|
||||
if (req->assoclen)
|
||||
ccm_calculate_auth_mac(req, mac);
|
||||
|
||||
do {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes == walk.total)
|
||||
tail = 0;
|
||||
|
||||
ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
walk.nbytes - tail, ctx->key_enc,
|
||||
num_rounds(ctx), mac, walk.iv);
|
||||
|
||||
if (walk.nbytes == walk.total)
|
||||
ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
|
||||
|
||||
kernel_neon_end();
|
||||
|
||||
if (walk.nbytes) {
|
||||
err = skcipher_walk_done(&walk, tail);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
if (unlikely(walk.nbytes))
|
||||
kernel_neon_begin();
|
||||
}
|
||||
} while (walk.nbytes);
|
||||
|
||||
/* copy authtag to end of dst */
|
||||
scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
@ -291,43 +215,42 @@ static int ccm_decrypt(struct aead_request *req)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (req->assoclen)
|
||||
ccm_calculate_auth_mac(req, mac);
|
||||
|
||||
/* preserve the original iv for the final round */
|
||||
memcpy(buf, req->iv, AES_BLOCK_SIZE);
|
||||
|
||||
err = skcipher_walk_aead_decrypt(&walk, req, false);
|
||||
|
||||
if (crypto_simd_usable()) {
|
||||
while (walk.nbytes) {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes == walk.total)
|
||||
tail = 0;
|
||||
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_decrypt(walk.dst.virt.addr,
|
||||
walk.src.virt.addr,
|
||||
walk.nbytes - tail, ctx->key_enc,
|
||||
num_rounds(ctx), mac, walk.iv);
|
||||
kernel_neon_end();
|
||||
|
||||
err = skcipher_walk_done(&walk, tail);
|
||||
}
|
||||
if (!err) {
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_final(mac, buf, ctx->key_enc,
|
||||
num_rounds(ctx));
|
||||
kernel_neon_end();
|
||||
}
|
||||
} else {
|
||||
err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
|
||||
}
|
||||
|
||||
if (err)
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
kernel_neon_begin();
|
||||
|
||||
if (req->assoclen)
|
||||
ccm_calculate_auth_mac(req, mac);
|
||||
|
||||
do {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes == walk.total)
|
||||
tail = 0;
|
||||
|
||||
ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
walk.nbytes - tail, ctx->key_enc,
|
||||
num_rounds(ctx), mac, walk.iv);
|
||||
|
||||
if (walk.nbytes == walk.total)
|
||||
ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
|
||||
|
||||
kernel_neon_end();
|
||||
|
||||
if (walk.nbytes) {
|
||||
err = skcipher_walk_done(&walk, tail);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
if (unlikely(walk.nbytes))
|
||||
kernel_neon_begin();
|
||||
}
|
||||
} while (walk.nbytes);
|
||||
|
||||
/* compare calculated auth tag with the stored one */
|
||||
scatterwalk_map_and_copy(buf, req->src,
|
||||
req->assoclen + req->cryptlen - authsize,
|
||||
|
@ -444,7 +444,7 @@ static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req)
|
||||
return err ?: cbc_decrypt_walk(req, &walk);
|
||||
}
|
||||
|
||||
static int ctr_encrypt(struct skcipher_request *req)
|
||||
static int __maybe_unused ctr_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
@ -485,29 +485,6 @@ static int ctr_encrypt(struct skcipher_request *req)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
|
||||
{
|
||||
const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Temporarily disable interrupts to avoid races where
|
||||
* cachelines are evicted when the CPU is interrupted
|
||||
* to do something else.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
aes_encrypt(ctx, dst, src);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int __maybe_unused ctr_encrypt_sync(struct skcipher_request *req)
|
||||
{
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
|
||||
|
||||
return ctr_encrypt(req);
|
||||
}
|
||||
|
||||
static int __maybe_unused xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
@ -656,10 +633,9 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
|
||||
static struct skcipher_alg aes_algs[] = { {
|
||||
#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
|
||||
.base = {
|
||||
.cra_name = "__ecb(aes)",
|
||||
.cra_driver_name = "__ecb-aes-" MODE,
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-" MODE,
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
@ -671,10 +647,9 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__cbc(aes)",
|
||||
.cra_driver_name = "__cbc-aes-" MODE,
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-" MODE,
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
@ -687,10 +662,9 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__ctr(aes)",
|
||||
.cra_driver_name = "__ctr-aes-" MODE,
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-" MODE,
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
@ -704,26 +678,9 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.decrypt = ctr_encrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-" MODE,
|
||||
.cra_priority = PRIO - 1,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
.setkey = skcipher_aes_setkey,
|
||||
.encrypt = ctr_encrypt_sync,
|
||||
.decrypt = ctr_encrypt_sync,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__xts(aes)",
|
||||
.cra_driver_name = "__xts-aes-" MODE,
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "xts-aes-" MODE,
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
@ -738,10 +695,9 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
}, {
|
||||
#endif
|
||||
.base = {
|
||||
.cra_name = "__cts(cbc(aes))",
|
||||
.cra_driver_name = "__cts-cbc-aes-" MODE,
|
||||
.cra_name = "cts(cbc(aes))",
|
||||
.cra_driver_name = "cts-cbc-aes-" MODE,
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
@ -755,10 +711,9 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.decrypt = cts_cbc_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "__essiv(cbc(aes),sha256)",
|
||||
.cra_driver_name = "__essiv-cbc-aes-sha256-" MODE,
|
||||
.cra_name = "essiv(cbc(aes),sha256)",
|
||||
.cra_driver_name = "essiv-cbc-aes-sha256-" MODE,
|
||||
.cra_priority = PRIO + 1,
|
||||
.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_essiv_cbc_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
@ -997,28 +952,15 @@ static struct shash_alg mac_algs[] = { {
|
||||
.descsize = sizeof(struct mac_desc_ctx),
|
||||
} };
|
||||
|
||||
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
|
||||
|
||||
static void aes_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
|
||||
if (aes_simd_algs[i])
|
||||
simd_skcipher_free(aes_simd_algs[i]);
|
||||
|
||||
crypto_unregister_shashes(mac_algs, ARRAY_SIZE(mac_algs));
|
||||
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
}
|
||||
|
||||
static int __init aes_init(void)
|
||||
{
|
||||
struct simd_skcipher_alg *simd;
|
||||
const char *basename;
|
||||
const char *algname;
|
||||
const char *drvname;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
if (err)
|
||||
@ -1028,26 +970,8 @@ static int __init aes_init(void)
|
||||
if (err)
|
||||
goto unregister_ciphers;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
|
||||
if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
|
||||
continue;
|
||||
|
||||
algname = aes_algs[i].base.cra_name + 2;
|
||||
drvname = aes_algs[i].base.cra_driver_name + 2;
|
||||
basename = aes_algs[i].base.cra_driver_name;
|
||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||
err = PTR_ERR(simd);
|
||||
if (IS_ERR(simd))
|
||||
goto unregister_simds;
|
||||
|
||||
aes_simd_algs[i] = simd;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unregister_simds:
|
||||
aes_exit();
|
||||
return err;
|
||||
unregister_ciphers:
|
||||
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
return err;
|
||||
|
@ -63,11 +63,6 @@ struct aesbs_cbc_ctx {
|
||||
u32 enc[AES_MAX_KEYLENGTH_U32];
|
||||
};
|
||||
|
||||
struct aesbs_ctr_ctx {
|
||||
struct aesbs_ctx key; /* must be first member */
|
||||
struct crypto_aes_ctx fallback;
|
||||
};
|
||||
|
||||
struct aesbs_xts_ctx {
|
||||
struct aesbs_ctx key;
|
||||
u32 twkey[AES_MAX_KEYLENGTH_U32];
|
||||
@ -207,25 +202,6 @@ static int cbc_decrypt(struct skcipher_request *req)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = aes_expandkey(&ctx->fallback, in_key, key_len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->key.rounds = 6 + key_len / 4;
|
||||
|
||||
kernel_neon_begin();
|
||||
aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
|
||||
kernel_neon_end();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ctr_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
@ -292,29 +268,6 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||
return aesbs_setkey(tfm, in_key, key_len);
|
||||
}
|
||||
|
||||
static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
|
||||
{
|
||||
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Temporarily disable interrupts to avoid races where
|
||||
* cachelines are evicted when the CPU is interrupted
|
||||
* to do something else.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
aes_encrypt(&ctx->fallback, dst, src);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int ctr_encrypt_sync(struct skcipher_request *req)
|
||||
{
|
||||
if (!crypto_simd_usable())
|
||||
return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
|
||||
|
||||
return ctr_encrypt(req);
|
||||
}
|
||||
|
||||
static int __xts_crypt(struct skcipher_request *req, bool encrypt,
|
||||
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 iv[]))
|
||||
@ -431,13 +384,12 @@ static int xts_decrypt(struct skcipher_request *req)
|
||||
}
|
||||
|
||||
static struct skcipher_alg aes_algs[] = { {
|
||||
.base.cra_name = "__ecb(aes)",
|
||||
.base.cra_driver_name = "__ecb-aes-neonbs",
|
||||
.base.cra_name = "ecb(aes)",
|
||||
.base.cra_driver_name = "ecb-aes-neonbs",
|
||||
.base.cra_priority = 250,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -446,13 +398,12 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(aes)",
|
||||
.base.cra_driver_name = "__cbc-aes-neonbs",
|
||||
.base.cra_name = "cbc(aes)",
|
||||
.base.cra_driver_name = "cbc-aes-neonbs",
|
||||
.base.cra_priority = 250,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -462,13 +413,12 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(aes)",
|
||||
.base.cra_driver_name = "__ctr-aes-neonbs",
|
||||
.base.cra_name = "ctr(aes)",
|
||||
.base.cra_driver_name = "ctr-aes-neonbs",
|
||||
.base.cra_priority = 250,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
@ -479,29 +429,12 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.encrypt = ctr_encrypt,
|
||||
.decrypt = ctr_encrypt,
|
||||
}, {
|
||||
.base.cra_name = "ctr(aes)",
|
||||
.base.cra_driver_name = "ctr-aes-neonbs",
|
||||
.base.cra_priority = 250 - 1,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
.walksize = 8 * AES_BLOCK_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aesbs_ctr_setkey_sync,
|
||||
.encrypt = ctr_encrypt_sync,
|
||||
.decrypt = ctr_encrypt_sync,
|
||||
}, {
|
||||
.base.cra_name = "__xts(aes)",
|
||||
.base.cra_driver_name = "__xts-aes-neonbs",
|
||||
.base.cra_name = "xts(aes)",
|
||||
.base.cra_driver_name = "xts-aes-neonbs",
|
||||
.base.cra_priority = 250,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
@ -512,54 +445,17 @@ static struct skcipher_alg aes_algs[] = { {
|
||||
.decrypt = xts_decrypt,
|
||||
} };
|
||||
|
||||
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
|
||||
|
||||
static void aes_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
|
||||
if (aes_simd_algs[i])
|
||||
simd_skcipher_free(aes_simd_algs[i]);
|
||||
|
||||
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
}
|
||||
|
||||
static int __init aes_init(void)
|
||||
{
|
||||
struct simd_skcipher_alg *simd;
|
||||
const char *basename;
|
||||
const char *algname;
|
||||
const char *drvname;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (!cpu_have_named_feature(ASIMD))
|
||||
return -ENODEV;
|
||||
|
||||
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
|
||||
if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
|
||||
continue;
|
||||
|
||||
algname = aes_algs[i].base.cra_name + 2;
|
||||
drvname = aes_algs[i].base.cra_driver_name + 2;
|
||||
basename = aes_algs[i].base.cra_driver_name;
|
||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||
err = PTR_ERR(simd);
|
||||
if (IS_ERR(simd))
|
||||
goto unregister_simds;
|
||||
|
||||
aes_simd_algs[i] = simd;
|
||||
}
|
||||
return 0;
|
||||
|
||||
unregister_simds:
|
||||
aes_exit();
|
||||
return err;
|
||||
return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
||||
}
|
||||
|
||||
module_init(aes_init);
|
||||
|
@ -362,84 +362,36 @@ static int gcm_encrypt(struct aead_request *req)
|
||||
|
||||
err = skcipher_walk_aead_encrypt(&walk, req, false);
|
||||
|
||||
if (likely(crypto_simd_usable())) {
|
||||
do {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
int nbytes = walk.nbytes;
|
||||
|
||||
tag = (u8 *)&lengths;
|
||||
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
|
||||
src = dst = memcpy(buf + sizeof(buf) - nbytes,
|
||||
src, nbytes);
|
||||
} else if (nbytes < walk.total) {
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
tag = NULL;
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
pmull_gcm_encrypt(nbytes, dst, src, ctx->ghash_key.h,
|
||||
dg, iv, ctx->aes_key.key_enc, nrounds,
|
||||
tag);
|
||||
kernel_neon_end();
|
||||
|
||||
if (unlikely(!nbytes))
|
||||
break;
|
||||
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
|
||||
memcpy(walk.dst.virt.addr,
|
||||
buf + sizeof(buf) - nbytes, nbytes);
|
||||
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
} while (walk.nbytes);
|
||||
} else {
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
int remaining = blocks;
|
||||
|
||||
do {
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
|
||||
crypto_inc(iv, AES_BLOCK_SIZE);
|
||||
|
||||
dst += AES_BLOCK_SIZE;
|
||||
src += AES_BLOCK_SIZE;
|
||||
} while (--remaining > 0);
|
||||
|
||||
ghash_do_update(blocks, dg, walk.dst.virt.addr,
|
||||
&ctx->ghash_key, NULL);
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
/* handle the tail */
|
||||
if (walk.nbytes) {
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
|
||||
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
buf, walk.nbytes);
|
||||
|
||||
memcpy(buf, walk.dst.virt.addr, walk.nbytes);
|
||||
memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
|
||||
}
|
||||
do {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
int nbytes = walk.nbytes;
|
||||
|
||||
tag = (u8 *)&lengths;
|
||||
ghash_do_update(1, dg, tag, &ctx->ghash_key,
|
||||
walk.nbytes ? buf : NULL);
|
||||
|
||||
if (walk.nbytes)
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
|
||||
src = dst = memcpy(buf + sizeof(buf) - nbytes,
|
||||
src, nbytes);
|
||||
} else if (nbytes < walk.total) {
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
tag = NULL;
|
||||
}
|
||||
|
||||
put_unaligned_be64(dg[1], tag);
|
||||
put_unaligned_be64(dg[0], tag + 8);
|
||||
put_unaligned_be32(1, iv + GCM_IV_SIZE);
|
||||
aes_encrypt(&ctx->aes_key, iv, iv);
|
||||
crypto_xor(tag, iv, AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_begin();
|
||||
pmull_gcm_encrypt(nbytes, dst, src, ctx->ghash_key.h,
|
||||
dg, iv, ctx->aes_key.key_enc, nrounds,
|
||||
tag);
|
||||
kernel_neon_end();
|
||||
|
||||
if (unlikely(!nbytes))
|
||||
break;
|
||||
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
|
||||
memcpy(walk.dst.virt.addr,
|
||||
buf + sizeof(buf) - nbytes, nbytes);
|
||||
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
} while (walk.nbytes);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
@ -464,6 +416,7 @@ static int gcm_decrypt(struct aead_request *req)
|
||||
u64 dg[2] = {};
|
||||
be128 lengths;
|
||||
u8 *tag;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
lengths.a = cpu_to_be64(req->assoclen * 8);
|
||||
@ -481,101 +434,41 @@ static int gcm_decrypt(struct aead_request *req)
|
||||
|
||||
err = skcipher_walk_aead_decrypt(&walk, req, false);
|
||||
|
||||
if (likely(crypto_simd_usable())) {
|
||||
int ret;
|
||||
|
||||
do {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
int nbytes = walk.nbytes;
|
||||
|
||||
tag = (u8 *)&lengths;
|
||||
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
|
||||
src = dst = memcpy(buf + sizeof(buf) - nbytes,
|
||||
src, nbytes);
|
||||
} else if (nbytes < walk.total) {
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
tag = NULL;
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
ret = pmull_gcm_decrypt(nbytes, dst, src,
|
||||
ctx->ghash_key.h,
|
||||
dg, iv, ctx->aes_key.key_enc,
|
||||
nrounds, tag, otag, authsize);
|
||||
kernel_neon_end();
|
||||
|
||||
if (unlikely(!nbytes))
|
||||
break;
|
||||
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
|
||||
memcpy(walk.dst.virt.addr,
|
||||
buf + sizeof(buf) - nbytes, nbytes);
|
||||
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
} while (walk.nbytes);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
if (ret)
|
||||
return -EBADMSG;
|
||||
} else {
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
|
||||
ghash_do_update(blocks, dg, walk.src.virt.addr,
|
||||
&ctx->ghash_key, NULL);
|
||||
|
||||
do {
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
|
||||
crypto_inc(iv, AES_BLOCK_SIZE);
|
||||
|
||||
dst += AES_BLOCK_SIZE;
|
||||
src += AES_BLOCK_SIZE;
|
||||
} while (--blocks > 0);
|
||||
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
/* handle the tail */
|
||||
if (walk.nbytes) {
|
||||
memcpy(buf, walk.src.virt.addr, walk.nbytes);
|
||||
memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
|
||||
}
|
||||
do {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
int nbytes = walk.nbytes;
|
||||
|
||||
tag = (u8 *)&lengths;
|
||||
ghash_do_update(1, dg, tag, &ctx->ghash_key,
|
||||
walk.nbytes ? buf : NULL);
|
||||
|
||||
if (walk.nbytes) {
|
||||
aes_encrypt(&ctx->aes_key, buf, iv);
|
||||
|
||||
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
buf, walk.nbytes);
|
||||
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
|
||||
src = dst = memcpy(buf + sizeof(buf) - nbytes,
|
||||
src, nbytes);
|
||||
} else if (nbytes < walk.total) {
|
||||
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
||||
tag = NULL;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
kernel_neon_begin();
|
||||
ret = pmull_gcm_decrypt(nbytes, dst, src, ctx->ghash_key.h,
|
||||
dg, iv, ctx->aes_key.key_enc,
|
||||
nrounds, tag, otag, authsize);
|
||||
kernel_neon_end();
|
||||
|
||||
put_unaligned_be64(dg[1], tag);
|
||||
put_unaligned_be64(dg[0], tag + 8);
|
||||
put_unaligned_be32(1, iv + GCM_IV_SIZE);
|
||||
aes_encrypt(&ctx->aes_key, iv, iv);
|
||||
crypto_xor(tag, iv, AES_BLOCK_SIZE);
|
||||
if (unlikely(!nbytes))
|
||||
break;
|
||||
|
||||
if (crypto_memneq(tag, otag, authsize)) {
|
||||
memzero_explicit(tag, AES_BLOCK_SIZE);
|
||||
return -EBADMSG;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
|
||||
memcpy(walk.dst.virt.addr,
|
||||
buf + sizeof(buf) - nbytes, nbytes);
|
||||
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
} while (walk.nbytes);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return ret ? -EBADMSG : 0;
|
||||
}
|
||||
|
||||
static struct aead_alg gcm_aes_alg = {
|
||||
|
@ -866,7 +866,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
|
||||
req = &subreq;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
if (err)
|
||||
if (!walk.nbytes)
|
||||
return err;
|
||||
} else {
|
||||
tail = 0;
|
||||
|
@ -233,12 +233,12 @@ config CRYPTO_DH
|
||||
|
||||
config CRYPTO_ECC
|
||||
tristate
|
||||
select CRYPTO_RNG_DEFAULT
|
||||
|
||||
config CRYPTO_ECDH
|
||||
tristate "ECDH algorithm"
|
||||
select CRYPTO_ECC
|
||||
select CRYPTO_KPP
|
||||
select CRYPTO_RNG_DEFAULT
|
||||
help
|
||||
Generic implementation of the ECDH algorithm
|
||||
|
||||
|
123
crypto/algapi.c
123
crypto/algapi.c
@ -216,6 +216,32 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_remove_spawns);
|
||||
|
||||
static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_larval *larval;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER))
|
||||
return NULL;
|
||||
|
||||
larval = crypto_larval_alloc(alg->cra_name,
|
||||
alg->cra_flags | CRYPTO_ALG_TESTED, 0);
|
||||
if (IS_ERR(larval))
|
||||
return larval;
|
||||
|
||||
larval->adult = crypto_mod_get(alg);
|
||||
if (!larval->adult) {
|
||||
kfree(larval);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
refcount_set(&larval->alg.cra_refcnt, 1);
|
||||
memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
|
||||
CRYPTO_MAX_ALG_NAME);
|
||||
larval->alg.cra_priority = alg->cra_priority;
|
||||
|
||||
return larval;
|
||||
}
|
||||
|
||||
static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_alg *q;
|
||||
@ -250,31 +276,20 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
|
||||
goto err;
|
||||
}
|
||||
|
||||
larval = crypto_larval_alloc(alg->cra_name,
|
||||
alg->cra_flags | CRYPTO_ALG_TESTED, 0);
|
||||
larval = crypto_alloc_test_larval(alg);
|
||||
if (IS_ERR(larval))
|
||||
goto out;
|
||||
|
||||
ret = -ENOENT;
|
||||
larval->adult = crypto_mod_get(alg);
|
||||
if (!larval->adult)
|
||||
goto free_larval;
|
||||
|
||||
refcount_set(&larval->alg.cra_refcnt, 1);
|
||||
memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
|
||||
CRYPTO_MAX_ALG_NAME);
|
||||
larval->alg.cra_priority = alg->cra_priority;
|
||||
|
||||
list_add(&alg->cra_list, &crypto_alg_list);
|
||||
list_add(&larval->alg.cra_list, &crypto_alg_list);
|
||||
|
||||
if (larval)
|
||||
list_add(&larval->alg.cra_list, &crypto_alg_list);
|
||||
|
||||
crypto_stats_init(alg);
|
||||
|
||||
out:
|
||||
return larval;
|
||||
|
||||
free_larval:
|
||||
kfree(larval);
|
||||
err:
|
||||
larval = ERR_PTR(ret);
|
||||
goto out;
|
||||
@ -389,29 +404,10 @@ void crypto_remove_final(struct list_head *list)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_remove_final);
|
||||
|
||||
static void crypto_wait_for_test(struct crypto_larval *larval)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
|
||||
if (err != NOTIFY_STOP) {
|
||||
if (WARN_ON(err != NOTIFY_DONE))
|
||||
goto out;
|
||||
crypto_alg_tested(larval->alg.cra_driver_name, 0);
|
||||
}
|
||||
|
||||
err = wait_for_completion_killable(&larval->completion);
|
||||
WARN_ON(err);
|
||||
if (!err)
|
||||
crypto_notify(CRYPTO_MSG_ALG_LOADED, larval);
|
||||
|
||||
out:
|
||||
crypto_larval_kill(&larval->alg);
|
||||
}
|
||||
|
||||
int crypto_register_alg(struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_larval *larval;
|
||||
bool test_started;
|
||||
int err;
|
||||
|
||||
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
|
||||
@ -421,12 +417,16 @@ int crypto_register_alg(struct crypto_alg *alg)
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
larval = __crypto_register_alg(alg);
|
||||
test_started = static_key_enabled(&crypto_boot_test_finished);
|
||||
if (!IS_ERR_OR_NULL(larval))
|
||||
larval->test_started = test_started;
|
||||
up_write(&crypto_alg_sem);
|
||||
|
||||
if (IS_ERR(larval))
|
||||
if (IS_ERR_OR_NULL(larval))
|
||||
return PTR_ERR(larval);
|
||||
|
||||
crypto_wait_for_test(larval);
|
||||
if (test_started)
|
||||
crypto_wait_for_test(larval);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_register_alg);
|
||||
@ -632,6 +632,8 @@ int crypto_register_instance(struct crypto_template *tmpl,
|
||||
larval = __crypto_register_alg(&inst->alg);
|
||||
if (IS_ERR(larval))
|
||||
goto unlock;
|
||||
else if (larval)
|
||||
larval->test_started = true;
|
||||
|
||||
hlist_add_head(&inst->list, &tmpl->instances);
|
||||
inst->tmpl = tmpl;
|
||||
@ -640,7 +642,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
|
||||
up_write(&crypto_alg_sem);
|
||||
|
||||
err = PTR_ERR(larval);
|
||||
if (IS_ERR(larval))
|
||||
if (IS_ERR_OR_NULL(larval))
|
||||
goto err;
|
||||
|
||||
crypto_wait_for_test(larval);
|
||||
@ -1261,9 +1263,48 @@ void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret,
|
||||
EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt);
|
||||
#endif
|
||||
|
||||
static void __init crypto_start_tests(void)
|
||||
{
|
||||
for (;;) {
|
||||
struct crypto_larval *larval = NULL;
|
||||
struct crypto_alg *q;
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
|
||||
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
||||
struct crypto_larval *l;
|
||||
|
||||
if (!crypto_is_larval(q))
|
||||
continue;
|
||||
|
||||
l = (void *)q;
|
||||
|
||||
if (!crypto_is_test_larval(l))
|
||||
continue;
|
||||
|
||||
if (l->test_started)
|
||||
continue;
|
||||
|
||||
l->test_started = true;
|
||||
larval = l;
|
||||
break;
|
||||
}
|
||||
|
||||
up_write(&crypto_alg_sem);
|
||||
|
||||
if (!larval)
|
||||
break;
|
||||
|
||||
crypto_wait_for_test(larval);
|
||||
}
|
||||
|
||||
static_branch_enable(&crypto_boot_test_finished);
|
||||
}
|
||||
|
||||
static int __init crypto_algapi_init(void)
|
||||
{
|
||||
crypto_init_proc();
|
||||
crypto_start_tests();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1272,7 +1313,11 @@ static void __exit crypto_algapi_exit(void)
|
||||
crypto_exit_proc();
|
||||
}
|
||||
|
||||
module_init(crypto_algapi_init);
|
||||
/*
|
||||
* We run this at late_initcall so that all the built-in algorithms
|
||||
* have had a chance to register themselves first.
|
||||
*/
|
||||
late_initcall(crypto_algapi_init);
|
||||
module_exit(crypto_algapi_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
50
crypto/api.c
50
crypto/api.c
@ -12,6 +12,7 @@
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/module.h>
|
||||
@ -30,6 +31,9 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
|
||||
BLOCKING_NOTIFIER_HEAD(crypto_chain);
|
||||
EXPORT_SYMBOL_GPL(crypto_chain);
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(crypto_boot_test_finished);
|
||||
EXPORT_SYMBOL_GPL(crypto_boot_test_finished);
|
||||
|
||||
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
|
||||
|
||||
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
|
||||
@ -47,11 +51,6 @@ void crypto_mod_put(struct crypto_alg *alg)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_mod_put);
|
||||
|
||||
static inline int crypto_is_test_larval(struct crypto_larval *larval)
|
||||
{
|
||||
return larval->alg.cra_driver_name[0];
|
||||
}
|
||||
|
||||
static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
@ -163,11 +162,52 @@ void crypto_larval_kill(struct crypto_alg *alg)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_larval_kill);
|
||||
|
||||
void crypto_wait_for_test(struct crypto_larval *larval)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
|
||||
if (WARN_ON_ONCE(err != NOTIFY_STOP))
|
||||
goto out;
|
||||
|
||||
err = wait_for_completion_killable(&larval->completion);
|
||||
WARN_ON(err);
|
||||
if (!err)
|
||||
crypto_notify(CRYPTO_MSG_ALG_LOADED, larval);
|
||||
|
||||
out:
|
||||
crypto_larval_kill(&larval->alg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_wait_for_test);
|
||||
|
||||
static void crypto_start_test(struct crypto_larval *larval)
|
||||
{
|
||||
if (!crypto_is_test_larval(larval))
|
||||
return;
|
||||
|
||||
if (larval->test_started)
|
||||
return;
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
if (larval->test_started) {
|
||||
up_write(&crypto_alg_sem);
|
||||
return;
|
||||
}
|
||||
|
||||
larval->test_started = true;
|
||||
up_write(&crypto_alg_sem);
|
||||
|
||||
crypto_wait_for_test(larval);
|
||||
}
|
||||
|
||||
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_larval *larval = (void *)alg;
|
||||
long timeout;
|
||||
|
||||
if (!static_branch_likely(&crypto_boot_test_finished))
|
||||
crypto_start_test(larval);
|
||||
|
||||
timeout = wait_for_completion_killable_timeout(
|
||||
&larval->completion, 60 * HZ);
|
||||
|
||||
|
@ -327,6 +327,19 @@ int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
|
||||
* into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
|
||||
struct kpp_request *req)
|
||||
{
|
||||
return crypto_transfer_request_to_engine(engine, &req->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
|
||||
* to list into the engine queue
|
||||
@ -382,6 +395,19 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
|
||||
|
||||
/**
|
||||
* crypto_finalize_kpp_request - finalize one kpp_request if the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
*/
|
||||
void crypto_finalize_kpp_request(struct crypto_engine *engine,
|
||||
struct kpp_request *req, int err)
|
||||
{
|
||||
return crypto_finalize_request(engine, &req->base, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
|
||||
|
||||
/**
|
||||
* crypto_finalize_skcipher_request - finalize one skcipher_request if
|
||||
* the request is done
|
||||
|
@ -2003,7 +2003,7 @@ static inline int __init drbg_healthcheck_sanity(void)
|
||||
#define OUTBUFLEN 16
|
||||
unsigned char buf[OUTBUFLEN];
|
||||
struct drbg_state *drbg = NULL;
|
||||
int ret = -EFAULT;
|
||||
int ret;
|
||||
int rc = -EFAULT;
|
||||
bool pr = false;
|
||||
int coreref = 0;
|
||||
|
14
crypto/ecc.c
14
crypto/ecc.c
@ -32,10 +32,10 @@
|
||||
#include <linux/fips.h>
|
||||
#include <crypto/ecdh.h>
|
||||
#include <crypto/rng.h>
|
||||
#include <crypto/internal/ecc.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include "ecc.h"
|
||||
#include "ecc_curve_defs.h"
|
||||
|
||||
typedef struct {
|
||||
@ -81,7 +81,7 @@ static void ecc_free_digits_space(u64 *space)
|
||||
kfree_sensitive(space);
|
||||
}
|
||||
|
||||
static struct ecc_point *ecc_alloc_point(unsigned int ndigits)
|
||||
struct ecc_point *ecc_alloc_point(unsigned int ndigits)
|
||||
{
|
||||
struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
|
||||
@ -106,8 +106,9 @@ static struct ecc_point *ecc_alloc_point(unsigned int ndigits)
|
||||
kfree(p);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_alloc_point);
|
||||
|
||||
static void ecc_free_point(struct ecc_point *p)
|
||||
void ecc_free_point(struct ecc_point *p)
|
||||
{
|
||||
if (!p)
|
||||
return;
|
||||
@ -116,6 +117,7 @@ static void ecc_free_point(struct ecc_point *p)
|
||||
kfree_sensitive(p->y);
|
||||
kfree_sensitive(p);
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_free_point);
|
||||
|
||||
static void vli_clear(u64 *vli, unsigned int ndigits)
|
||||
{
|
||||
@ -165,7 +167,7 @@ static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
|
||||
}
|
||||
|
||||
/* Counts the number of bits required for vli. */
|
||||
static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
|
||||
unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
|
||||
{
|
||||
unsigned int i, num_digits;
|
||||
u64 digit;
|
||||
@ -180,6 +182,7 @@ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
|
||||
|
||||
return ((num_digits - 1) * 64 + i);
|
||||
}
|
||||
EXPORT_SYMBOL(vli_num_bits);
|
||||
|
||||
/* Set dest from unaligned bit string src. */
|
||||
void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits)
|
||||
@ -1062,11 +1065,12 @@ EXPORT_SYMBOL(vli_mod_inv);
|
||||
/* ------ Point operations ------ */
|
||||
|
||||
/* Returns true if p_point is the point at infinity, false otherwise. */
|
||||
static bool ecc_point_is_zero(const struct ecc_point *point)
|
||||
bool ecc_point_is_zero(const struct ecc_point *point)
|
||||
{
|
||||
return (vli_is_zero(point->x, point->ndigits) &&
|
||||
vli_is_zero(point->y, point->ndigits));
|
||||
}
|
||||
EXPORT_SYMBOL(ecc_point_is_zero);
|
||||
|
||||
/* Point multiplication algorithm using Montgomery's ladder with co-Z
|
||||
* coordinates. From https://eprint.iacr.org/2011/338.pdf
|
||||
|
@ -6,11 +6,11 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <crypto/internal/ecc.h>
|
||||
#include <crypto/internal/kpp.h>
|
||||
#include <crypto/kpp.h>
|
||||
#include <crypto/ecdh.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include "ecc.h"
|
||||
|
||||
struct ecdh_ctx {
|
||||
unsigned int curve_id;
|
||||
|
@ -5,12 +5,12 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <crypto/internal/akcipher.h>
|
||||
#include <crypto/internal/ecc.h>
|
||||
#include <crypto/akcipher.h>
|
||||
#include <crypto/ecdh.h>
|
||||
#include <linux/asn1_decoder.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include "ecc.h"
|
||||
#include "ecdsasignature.asn1.h"
|
||||
|
||||
struct ecc_ctx {
|
||||
|
@ -20,12 +20,12 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/streebog.h>
|
||||
#include <crypto/internal/akcipher.h>
|
||||
#include <crypto/internal/ecc.h>
|
||||
#include <crypto/akcipher.h>
|
||||
#include <linux/oid_registry.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include "ecrdsa_params.asn1.h"
|
||||
#include "ecrdsa_pub_key.asn1.h"
|
||||
#include "ecc.h"
|
||||
#include "ecrdsa_defs.h"
|
||||
|
||||
#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
|
||||
|
@ -13,7 +13,7 @@
|
||||
#ifndef _CRYTO_ECRDSA_DEFS_H
|
||||
#define _CRYTO_ECRDSA_DEFS_H
|
||||
|
||||
#include "ecc.h"
|
||||
#include <crypto/internal/ecc.h>
|
||||
|
||||
#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
|
||||
#define ECRDSA_MAX_DIGITS (512 / 64)
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/notifier.h>
|
||||
@ -27,6 +28,7 @@ struct crypto_larval {
|
||||
struct crypto_alg *adult;
|
||||
struct completion completion;
|
||||
u32 mask;
|
||||
bool test_started;
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -45,6 +47,8 @@ extern struct list_head crypto_alg_list;
|
||||
extern struct rw_semaphore crypto_alg_sem;
|
||||
extern struct blocking_notifier_head crypto_chain;
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(crypto_boot_test_finished);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
void __init crypto_init_proc(void);
|
||||
void __exit crypto_exit_proc(void);
|
||||
@ -70,6 +74,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
|
||||
|
||||
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
|
||||
void crypto_larval_kill(struct crypto_alg *alg);
|
||||
void crypto_wait_for_test(struct crypto_larval *larval);
|
||||
void crypto_alg_tested(const char *name, int err);
|
||||
|
||||
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
||||
@ -156,5 +161,10 @@ static inline void crypto_yield(u32 flags)
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
static inline int crypto_is_test_larval(struct crypto_larval *larval)
|
||||
{
|
||||
return larval->alg.cra_driver_name[0];
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_INTERNAL_H */
|
||||
|
||||
|
@ -125,7 +125,7 @@ struct rand_data {
|
||||
* This test complies with SP800-90B section 4.4.2.
|
||||
***************************************************************************/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Reset the APT counter
|
||||
*
|
||||
* @ec [in] Reference to entropy collector
|
||||
@ -138,7 +138,7 @@ static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked)
|
||||
ec->apt_observations = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Insert a new entropy event into APT
|
||||
*
|
||||
* @ec [in] Reference to entropy collector
|
||||
@ -182,7 +182,7 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
|
||||
* the end. The caller of the Jitter RNG is informed with an error code.
|
||||
***************************************************************************/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Repetition Count Test as defined in SP800-90B section 4.4.1
|
||||
*
|
||||
* @ec [in] Reference to entropy collector
|
||||
@ -223,7 +223,7 @@ static void jent_rct_insert(struct rand_data *ec, int stuck)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Is there an RCT health test failure?
|
||||
*
|
||||
* @ec [in] Reference to entropy collector
|
||||
@ -246,7 +246,7 @@ static inline __u64 jent_delta(__u64 prev, __u64 next)
|
||||
(JENT_UINT64_MAX - prev + 1 + next);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Stuck test by checking the:
|
||||
* 1st derivative of the jitter measurement (time delta)
|
||||
* 2nd derivative of the jitter measurement (delta of time deltas)
|
||||
@ -288,7 +288,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Report any health test failures
|
||||
*
|
||||
* @ec [in] Reference to entropy collector
|
||||
@ -310,7 +310,7 @@ static int jent_health_failure(struct rand_data *ec)
|
||||
* Noise sources
|
||||
***************************************************************************/
|
||||
|
||||
/**
|
||||
/*
|
||||
* Update of the loop count used for the next round of
|
||||
* an entropy collection.
|
||||
*
|
||||
@ -353,7 +353,7 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
|
||||
return (shuffle + (1<<min));
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* CPU Jitter noise source -- this is the noise source based on the CPU
|
||||
* execution time jitter
|
||||
*
|
||||
@ -435,7 +435,7 @@ static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt,
|
||||
ec->data = new;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Memory Access noise source -- this is a noise source based on variations in
|
||||
* memory access times
|
||||
*
|
||||
@ -500,7 +500,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
|
||||
/***************************************************************************
|
||||
* Start of entropy processing logic
|
||||
***************************************************************************/
|
||||
/**
|
||||
/*
|
||||
* This is the heart of the entropy generation: calculate time deltas and
|
||||
* use the CPU jitter in the time deltas. The jitter is injected into the
|
||||
* entropy pool.
|
||||
@ -539,7 +539,7 @@ static int jent_measure_jitter(struct rand_data *ec)
|
||||
return stuck;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Generator of one 64 bit random number
|
||||
* Function fills rand_data->data
|
||||
*
|
||||
@ -566,7 +566,7 @@ static void jent_gen_entropy(struct rand_data *ec)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Entry function: Obtain entropy for the caller.
|
||||
*
|
||||
* This function invokes the entropy gathering logic as often to generate
|
||||
|
@ -78,12 +78,14 @@ static void pcrypt_aead_enc(struct padata_priv *padata)
|
||||
{
|
||||
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
||||
struct aead_request *req = pcrypt_request_ctx(preq);
|
||||
int ret;
|
||||
|
||||
padata->info = crypto_aead_encrypt(req);
|
||||
ret = crypto_aead_encrypt(req);
|
||||
|
||||
if (padata->info == -EINPROGRESS)
|
||||
if (ret == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
padata->info = ret;
|
||||
padata_do_serial(padata);
|
||||
}
|
||||
|
||||
@ -123,12 +125,14 @@ static void pcrypt_aead_dec(struct padata_priv *padata)
|
||||
{
|
||||
struct pcrypt_request *preq = pcrypt_padata_request(padata);
|
||||
struct aead_request *req = pcrypt_request_ctx(preq);
|
||||
int ret;
|
||||
|
||||
padata->info = crypto_aead_decrypt(req);
|
||||
ret = crypto_aead_decrypt(req);
|
||||
|
||||
if (padata->info == -EINPROGRESS)
|
||||
if (ret == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
padata->info = ret;
|
||||
padata_do_serial(padata);
|
||||
}
|
||||
|
||||
|
@ -1333,7 +1333,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
|
||||
|
||||
if (bs > XBUFSIZE * PAGE_SIZE) {
|
||||
pr_err("template (%u) too big for buffer (%lu)\n",
|
||||
*b_size, XBUFSIZE * PAGE_SIZE);
|
||||
bs, XBUFSIZE * PAGE_SIZE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1386,8 +1386,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
|
||||
memset(cur->xbuf[p], 0xff, k);
|
||||
|
||||
skcipher_request_set_crypt(cur->req, cur->sg,
|
||||
cur->sg, *b_size,
|
||||
iv);
|
||||
cur->sg, bs, iv);
|
||||
}
|
||||
|
||||
if (secs) {
|
||||
|
@ -1061,14 +1061,14 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
|
||||
|
||||
static void crypto_disable_simd_for_test(void)
|
||||
{
|
||||
preempt_disable();
|
||||
migrate_disable();
|
||||
__this_cpu_write(crypto_simd_disabled_for_test, true);
|
||||
}
|
||||
|
||||
static void crypto_reenable_simd_for_test(void)
|
||||
{
|
||||
__this_cpu_write(crypto_simd_disabled_for_test, false);
|
||||
preempt_enable();
|
||||
migrate_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1201,7 +1201,7 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
|
||||
"\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46"
|
||||
"\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00"
|
||||
"\x02\x01\x00",
|
||||
.key_len = 804,
|
||||
.key_len = 803,
|
||||
/*
|
||||
* m is SHA256 hash of following message:
|
||||
* "\x49\x41\xbe\x0a\x0c\xc9\xf6\x35\x51\xe4\x27\x56\x13\x71\x4b\xd0"
|
||||
|
@ -63,7 +63,7 @@ config HW_RANDOM_AMD
|
||||
|
||||
config HW_RANDOM_ATMEL
|
||||
tristate "Atmel Random Number Generator support"
|
||||
depends on ARCH_AT91 && HAVE_CLK && OF
|
||||
depends on (ARCH_AT91 || COMPILE_TEST) && HAVE_CLK && OF
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number
|
||||
@ -87,7 +87,7 @@ config HW_RANDOM_BA431
|
||||
config HW_RANDOM_BCM2835
|
||||
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
|
||||
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
|
||||
ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
|
||||
ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number
|
||||
@ -100,7 +100,7 @@ config HW_RANDOM_BCM2835
|
||||
|
||||
config HW_RANDOM_IPROC_RNG200
|
||||
tristate "Broadcom iProc/STB RNG200 support"
|
||||
depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB
|
||||
depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the RNG200
|
||||
@ -165,7 +165,7 @@ config HW_RANDOM_IXP4XX
|
||||
|
||||
config HW_RANDOM_OMAP
|
||||
tristate "OMAP Random Number Generator support"
|
||||
depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3
|
||||
depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3 || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number
|
||||
@ -179,7 +179,7 @@ config HW_RANDOM_OMAP
|
||||
|
||||
config HW_RANDOM_OMAP3_ROM
|
||||
tristate "OMAP3 ROM Random Number Generator support"
|
||||
depends on ARCH_OMAP3
|
||||
depends on ARCH_OMAP3 || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number
|
||||
@ -298,7 +298,7 @@ config HW_RANDOM_INGENIC_TRNG
|
||||
|
||||
config HW_RANDOM_NOMADIK
|
||||
tristate "ST-Ericsson Nomadik Random Number Generator support"
|
||||
depends on ARCH_NOMADIK
|
||||
depends on ARCH_NOMADIK || COMPILE_TEST
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number
|
||||
|
@ -42,13 +42,11 @@ static int ixp4xx_rng_probe(struct platform_device *pdev)
|
||||
{
|
||||
void __iomem * rng_base;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
|
||||
if (!cpu_is_ixp46x()) /* includes IXP455 */
|
||||
return -ENOSYS;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
rng_base = devm_ioremap_resource(dev, res);
|
||||
rng_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(rng_base))
|
||||
return PTR_ERR(rng_base);
|
||||
|
||||
|
@ -54,9 +54,10 @@ static int meson_rng_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(data->base))
|
||||
return PTR_ERR(data->base);
|
||||
|
||||
data->core_clk = devm_clk_get(dev, "core");
|
||||
data->core_clk = devm_clk_get_optional(dev, "core");
|
||||
if (IS_ERR(data->core_clk))
|
||||
data->core_clk = NULL;
|
||||
return dev_err_probe(dev, PTR_ERR(data->core_clk),
|
||||
"Failed to get core clock\n");
|
||||
|
||||
if (data->core_clk) {
|
||||
ret = clk_prepare_enable(data->core_clk);
|
||||
|
@ -166,8 +166,13 @@ static int mtk_rng_runtime_resume(struct device *dev)
|
||||
return mtk_rng_init(&priv->rng);
|
||||
}
|
||||
|
||||
static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend,
|
||||
mtk_rng_runtime_resume, NULL);
|
||||
static const struct dev_pm_ops mtk_rng_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(mtk_rng_runtime_suspend,
|
||||
mtk_rng_runtime_resume, NULL)
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||
pm_runtime_force_resume)
|
||||
};
|
||||
|
||||
#define MTK_RNG_PM_OPS (&mtk_rng_pm_ops)
|
||||
#else /* CONFIG_PM */
|
||||
#define MTK_RNG_PM_OPS NULL
|
||||
|
@ -111,7 +111,7 @@ static ssize_t trng_counter_show(struct device *dev,
|
||||
#if IS_ENABLED(CONFIG_ARCH_RANDOM)
|
||||
u64 arch_counter = atomic64_read(&s390_arch_random_counter);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE,
|
||||
return sysfs_emit(buf,
|
||||
"trng: %llu\n"
|
||||
"hwrng: %llu\n"
|
||||
"arch: %llu\n"
|
||||
@ -119,7 +119,7 @@ static ssize_t trng_counter_show(struct device *dev,
|
||||
dev_counter, hwrng_counter, arch_counter,
|
||||
dev_counter + hwrng_counter + arch_counter);
|
||||
#else
|
||||
return snprintf(buf, PAGE_SIZE,
|
||||
return sysfs_emit(buf,
|
||||
"trng: %llu\n"
|
||||
"hwrng: %llu\n"
|
||||
"total: %llu\n",
|
||||
|
@ -1153,16 +1153,27 @@ static struct caam_akcipher_alg caam_rsa = {
|
||||
int caam_pkc_init(struct device *ctrldev)
|
||||
{
|
||||
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||
u32 pk_inst;
|
||||
u32 pk_inst, pkha;
|
||||
int err;
|
||||
init_done = false;
|
||||
|
||||
/* Determine public key hardware accelerator presence. */
|
||||
if (priv->era < 10)
|
||||
if (priv->era < 10) {
|
||||
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
|
||||
CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
|
||||
else
|
||||
pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
|
||||
} else {
|
||||
pkha = rd_reg32(&priv->ctrl->vreg.pkha);
|
||||
pk_inst = pkha & CHA_VER_NUM_MASK;
|
||||
|
||||
/*
|
||||
* Newer CAAMs support partially disabled functionality. If this is the
|
||||
* case, the number is non-zero, but this bit is set to indicate that
|
||||
* no encryption or decryption is supported. Only signing and verifying
|
||||
* is supported.
|
||||
*/
|
||||
if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
|
||||
pk_inst = 0;
|
||||
}
|
||||
|
||||
/* Do not register algorithms if PKHA is not present. */
|
||||
if (!pk_inst)
|
||||
|
@ -322,6 +322,9 @@ struct version_regs {
|
||||
/* CHA Miscellaneous Information - AESA_MISC specific */
|
||||
#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
|
||||
|
||||
/* CHA Miscellaneous Information - PKHA_MISC specific */
|
||||
#define CHA_VER_MISC_PKHA_NO_CRYPT BIT(7 + CHA_VER_MISC_SHIFT)
|
||||
|
||||
/*
|
||||
* caam_perfmon - Performance Monitor/Secure Memory Status/
|
||||
* CAAM Global Status/Component Version IDs
|
||||
|
@ -467,8 +467,8 @@ static int ccp_init(struct ccp_device *ccp)
|
||||
|
||||
cmd_q = &ccp->cmd_q[i];
|
||||
|
||||
kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
|
||||
"%s-q%u", ccp->name, cmd_q->id);
|
||||
kthread = kthread_run(ccp_cmd_queue_thread, cmd_q,
|
||||
"%s-q%u", ccp->name, cmd_q->id);
|
||||
if (IS_ERR(kthread)) {
|
||||
dev_err(dev, "error creating queue thread (%ld)\n",
|
||||
PTR_ERR(kthread));
|
||||
@ -477,7 +477,6 @@ static int ccp_init(struct ccp_device *ccp)
|
||||
}
|
||||
|
||||
cmd_q->kthread = kthread;
|
||||
wake_up_process(kthread);
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Enabling interrupts...\n");
|
||||
|
@ -950,8 +950,8 @@ static int ccp5_init(struct ccp_device *ccp)
|
||||
|
||||
cmd_q = &ccp->cmd_q[i];
|
||||
|
||||
kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
|
||||
"%s-q%u", ccp->name, cmd_q->id);
|
||||
kthread = kthread_run(ccp_cmd_queue_thread, cmd_q,
|
||||
"%s-q%u", ccp->name, cmd_q->id);
|
||||
if (IS_ERR(kthread)) {
|
||||
dev_err(dev, "error creating queue thread (%ld)\n",
|
||||
PTR_ERR(kthread));
|
||||
@ -960,7 +960,6 @@ static int ccp5_init(struct ccp_device *ccp)
|
||||
}
|
||||
|
||||
cmd_q->kthread = kthread;
|
||||
wake_up_process(kthread);
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Enabling interrupts...\n");
|
||||
|
@ -134,7 +134,7 @@ static int sev_cmd_buffer_len(int cmd)
|
||||
case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
|
||||
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
|
||||
case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
|
||||
case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
|
||||
case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
|
||||
default: return 0;
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,8 @@ MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
|
||||
static void init_cc_cache_params(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
u32 cache_params, ace_const, val, mask;
|
||||
u32 cache_params, ace_const, val;
|
||||
u64 mask;
|
||||
|
||||
/* compute CC_AXIM_CACHE_PARAMS */
|
||||
cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
|
||||
|
@ -233,6 +233,8 @@
|
||||
#define QM_DBG_WRITE_LEN 1024
|
||||
#define QM_DBG_TMP_BUF_LEN 22
|
||||
#define QM_PCI_COMMAND_INVALID ~0
|
||||
#define QM_RESET_STOP_TX_OFFSET 1
|
||||
#define QM_RESET_STOP_RX_OFFSET 2
|
||||
|
||||
#define WAIT_PERIOD 20
|
||||
#define REMOVE_WAIT_DELAY 10
|
||||
@ -883,6 +885,20 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
|
||||
{
|
||||
u32 *addr;
|
||||
|
||||
if (qp->is_in_kernel)
|
||||
return;
|
||||
|
||||
addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
|
||||
*addr = 1;
|
||||
|
||||
/* make sure setup is completed */
|
||||
mb();
|
||||
}
|
||||
|
||||
static irqreturn_t qm_aeq_irq(int irq, void *data)
|
||||
{
|
||||
struct hisi_qm *qm = data;
|
||||
@ -2467,6 +2483,15 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
|
||||
return qp->sqe + sq_tail * qp->qm->sqe_size;
|
||||
}
|
||||
|
||||
static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
|
||||
{
|
||||
u64 *addr;
|
||||
|
||||
/* Use last 64 bits of DUS to reset status. */
|
||||
addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
|
||||
*addr = 0;
|
||||
}
|
||||
|
||||
static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
|
||||
{
|
||||
struct device *dev = &qm->pdev->dev;
|
||||
@ -2492,7 +2517,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
|
||||
}
|
||||
|
||||
qp = &qm->qp_array[qp_id];
|
||||
|
||||
hisi_qm_unset_hw_reset(qp);
|
||||
memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
|
||||
|
||||
qp->event_cb = NULL;
|
||||
@ -2912,6 +2937,14 @@ static int hisi_qm_get_available_instances(struct uacce_device *uacce)
|
||||
return hisi_qm_get_free_qp_num(uacce->priv);
|
||||
}
|
||||
|
||||
static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qm->qp_num; i++)
|
||||
qm_set_qp_disable(&qm->qp_array[i], offset);
|
||||
}
|
||||
|
||||
static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
|
||||
unsigned long arg,
|
||||
struct uacce_queue *q)
|
||||
@ -3094,7 +3127,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
|
||||
if (IS_ERR(uacce))
|
||||
return PTR_ERR(uacce);
|
||||
|
||||
if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) {
|
||||
if (uacce->flags & UACCE_DEV_SVA) {
|
||||
qm->use_sva = true;
|
||||
} else {
|
||||
/* only consider sva case */
|
||||
@ -3122,8 +3155,10 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
|
||||
else
|
||||
mmio_page_nr = qm->db_interval / PAGE_SIZE;
|
||||
|
||||
/* Add one more page for device or qp status */
|
||||
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
|
||||
sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
|
||||
sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
|
||||
PAGE_SHIFT;
|
||||
|
||||
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
|
||||
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
|
||||
@ -3367,8 +3402,10 @@ void hisi_qm_uninit(struct hisi_qm *qm)
|
||||
|
||||
qm_irq_unregister(qm);
|
||||
hisi_qm_pci_uninit(qm);
|
||||
uacce_remove(qm->uacce);
|
||||
qm->uacce = NULL;
|
||||
if (qm->use_sva) {
|
||||
uacce_remove(qm->uacce);
|
||||
qm->uacce = NULL;
|
||||
}
|
||||
|
||||
up_write(&qm->qps_lock);
|
||||
}
|
||||
@ -3682,11 +3719,13 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
|
||||
|
||||
if (qm->status.stop_reason == QM_SOFT_RESET ||
|
||||
qm->status.stop_reason == QM_FLR) {
|
||||
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
|
||||
ret = qm_stop_started_qp(qm);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to stop started qp!\n");
|
||||
goto err_unlock;
|
||||
}
|
||||
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
|
||||
}
|
||||
|
||||
/* Mask eq and aeq irq */
|
||||
@ -4185,7 +4224,7 @@ static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = sscanf(buf, "%ld", val);
|
||||
ret = sscanf(buf, "%lu", val);
|
||||
if (ret != QM_QOS_VAL_NUM)
|
||||
return -EINVAL;
|
||||
|
||||
@ -5045,6 +5084,8 @@ static int qm_controller_reset(struct hisi_qm *qm)
|
||||
|
||||
ret = qm_controller_reset_prepare(qm);
|
||||
if (ret) {
|
||||
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
|
||||
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
|
||||
clear_bit(QM_RST_SCHED, &qm->misc_ctl);
|
||||
return ret;
|
||||
}
|
||||
@ -5131,6 +5172,8 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
|
||||
ret = hisi_qm_stop(qm, QM_FLR);
|
||||
if (ret) {
|
||||
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
|
||||
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
|
||||
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -5314,9 +5357,14 @@ static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
|
||||
atomic_set(&qm->status.flags, QM_STOP);
|
||||
cmd = QM_VF_PREPARE_FAIL;
|
||||
goto err_prepare;
|
||||
} else {
|
||||
goto out;
|
||||
}
|
||||
|
||||
err_prepare:
|
||||
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
|
||||
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
|
||||
out:
|
||||
pci_save_state(pdev);
|
||||
ret = qm->ops->ping_pf(qm, cmd);
|
||||
if (ret)
|
||||
@ -5777,9 +5825,11 @@ int hisi_qm_init(struct hisi_qm *qm)
|
||||
goto err_irq_register;
|
||||
}
|
||||
|
||||
ret = qm_alloc_uacce(qm);
|
||||
if (ret < 0)
|
||||
dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
|
||||
if (qm->mode == UACCE_MODE_SVA) {
|
||||
ret = qm_alloc_uacce(qm);
|
||||
if (ret < 0)
|
||||
dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
|
||||
}
|
||||
|
||||
ret = hisi_qm_memory_init(qm);
|
||||
if (ret)
|
||||
@ -5792,8 +5842,10 @@ int hisi_qm_init(struct hisi_qm *qm)
|
||||
return 0;
|
||||
|
||||
err_alloc_uacce:
|
||||
uacce_remove(qm->uacce);
|
||||
qm->uacce = NULL;
|
||||
if (qm->use_sva) {
|
||||
uacce_remove(qm->uacce);
|
||||
qm->uacce = NULL;
|
||||
}
|
||||
err_irq_register:
|
||||
qm_irq_unregister(qm);
|
||||
err_pci_init:
|
||||
|
@ -218,7 +218,7 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = {
|
||||
{"HZIP_AVG_DELAY ", 0x28ull},
|
||||
{"HZIP_MEM_VISIBLE_DATA ", 0x30ull},
|
||||
{"HZIP_MEM_VISIBLE_ADDR ", 0x34ull},
|
||||
{"HZIP_COMSUMED_BYTE ", 0x38ull},
|
||||
{"HZIP_CONSUMED_BYTE ", 0x38ull},
|
||||
{"HZIP_PRODUCED_BYTE ", 0x40ull},
|
||||
{"HZIP_COMP_INF ", 0x70ull},
|
||||
{"HZIP_PRE_OUT ", 0x78ull},
|
||||
|
@ -674,14 +674,12 @@ static int img_hash_digest(struct ahash_request *req)
|
||||
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
|
||||
{
|
||||
struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err = -ENOMEM;
|
||||
|
||||
ctx->fallback = crypto_alloc_ahash(alg_name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ctx->fallback)) {
|
||||
pr_err("img_hash: Could not load fallback driver.\n");
|
||||
err = PTR_ERR(ctx->fallback);
|
||||
goto err;
|
||||
return PTR_ERR(ctx->fallback);
|
||||
}
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
sizeof(struct img_hash_request_ctx) +
|
||||
@ -689,9 +687,6 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
|
||||
IMG_HASH_DMA_THRESHOLD);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
|
||||
|
@ -39,6 +39,25 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
|
||||
|
||||
Intel does not recommend use of CTS mode with AES/SM4.
|
||||
|
||||
config CRYPTO_DEV_KEEMBAY_OCS_ECC
|
||||
tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
|
||||
depends on ARCH_KEEMBAY || COMPILE_TEST
|
||||
depends on OF || COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
select CRYPTO_ECDH
|
||||
select CRYPTO_ENGINE
|
||||
help
|
||||
Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
|
||||
Elliptic Curve Cryptography (ECC) hardware acceleration for use with
|
||||
Crypto API.
|
||||
|
||||
Provides OCS acceleration for ECDH-256 and ECDH-384.
|
||||
|
||||
Say Y or M if you are compiling for the Intel Keem Bay SoC. The
|
||||
module will be called keembay-ocs-ecc.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CRYPTO_DEV_KEEMBAY_OCS_HCU
|
||||
tristate "Support for Intel Keem Bay OCS HCU HW acceleration"
|
||||
select CRYPTO_HASH
|
||||
|
@ -4,5 +4,7 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o
|
||||
keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC) += keembay-ocs-ecc.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o
|
||||
keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o
|
||||
|
1017
drivers/crypto/keembay/keembay-ocs-ecc.c
Normal file
1017
drivers/crypto/keembay/keembay-ocs-ecc.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -615,7 +615,6 @@ static struct platform_driver marvell_cesa = {
|
||||
};
|
||||
module_platform_driver(marvell_cesa);
|
||||
|
||||
MODULE_ALIAS("platform:mv_crypto");
|
||||
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
|
||||
MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
|
||||
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
|
||||
|
@ -1274,6 +1274,7 @@ static int aead_do_fallback(struct aead_request *req, bool is_enc)
|
||||
req->base.complete, req->base.data);
|
||||
aead_request_set_crypt(&rctx->fbk_req, req->src,
|
||||
req->dst, req->cryptlen, req->iv);
|
||||
aead_request_set_ad(&rctx->fbk_req, req->assoclen);
|
||||
ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
|
||||
crypto_aead_decrypt(&rctx->fbk_req);
|
||||
} else {
|
||||
|
@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
|
||||
/* Copyright(c) 2020 Intel Corporation */
|
||||
#include <linux/iopoll.h>
|
||||
#include <adf_accel_devices.h>
|
||||
#include <adf_common_drv.h>
|
||||
#include <adf_pf2vf_msg.h>
|
||||
@ -161,7 +162,36 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
|
||||
ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
|
||||
}
|
||||
|
||||
static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
|
||||
static int adf_init_device(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
void __iomem *addr;
|
||||
u32 status;
|
||||
u32 csr;
|
||||
int ret;
|
||||
|
||||
addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
|
||||
|
||||
/* Temporarily mask PM interrupt */
|
||||
csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
|
||||
csr |= ADF_4XXX_PM_SOU;
|
||||
ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
|
||||
|
||||
/* Set DRV_ACTIVE bit to power up the device */
|
||||
ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
|
||||
|
||||
/* Poll status register to make sure the device is powered up */
|
||||
ret = read_poll_timeout(ADF_CSR_RD, status,
|
||||
status & ADF_4XXX_PM_INIT_STATE,
|
||||
ADF_4XXX_PM_POLL_DELAY_US,
|
||||
ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr,
|
||||
ADF_4XXX_PM_STATUS);
|
||||
if (ret)
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -215,6 +245,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
|
||||
hw_data->exit_arb = adf_exit_arb;
|
||||
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
|
||||
hw_data->enable_ints = adf_enable_ints;
|
||||
hw_data->init_device = adf_init_device;
|
||||
hw_data->reset_device = adf_reset_flr;
|
||||
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
|
||||
hw_data->uof_get_num_objs = uof_get_num_objs;
|
||||
@ -222,7 +253,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
|
||||
hw_data->uof_get_ae_mask = uof_get_ae_mask;
|
||||
hw_data->set_msix_rttable = set_msix_default_rttable;
|
||||
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
|
||||
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
|
||||
hw_data->enable_pfvf_comms = pfvf_comms_disabled;
|
||||
hw_data->disable_iov = adf_disable_sriov;
|
||||
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
|
||||
|
||||
|
@ -62,6 +62,16 @@
|
||||
#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
|
||||
#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
|
||||
|
||||
/* Power management */
|
||||
#define ADF_4XXX_PM_POLL_DELAY_US 20
|
||||
#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC
|
||||
#define ADF_4XXX_PM_STATUS (0x50A00C)
|
||||
#define ADF_4XXX_PM_INTERRUPT (0x50A028)
|
||||
#define ADF_4XXX_PM_DRV_ACTIVE BIT(20)
|
||||
#define ADF_4XXX_PM_INIT_STATE BIT(21)
|
||||
/* Power management source in ERRSOU2 and ERRMSK2 */
|
||||
#define ADF_4XXX_PM_SOU BIT(18)
|
||||
|
||||
/* Firmware Binaries */
|
||||
#define ADF_4XXX_FW "qat_4xxx.bin"
|
||||
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
|
||||
|
@ -48,34 +48,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
|
||||
return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
|
||||
}
|
||||
|
||||
static u32 get_num_accels(struct adf_hw_device_data *self)
|
||||
{
|
||||
u32 i, ctr = 0;
|
||||
|
||||
if (!self || !self->accel_mask)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
|
||||
if (self->accel_mask & (1 << i))
|
||||
ctr++;
|
||||
}
|
||||
return ctr;
|
||||
}
|
||||
|
||||
static u32 get_num_aes(struct adf_hw_device_data *self)
|
||||
{
|
||||
u32 i, ctr = 0;
|
||||
|
||||
if (!self || !self->ae_mask)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
|
||||
if (self->ae_mask & (1 << i))
|
||||
ctr++;
|
||||
}
|
||||
return ctr;
|
||||
}
|
||||
|
||||
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
|
||||
{
|
||||
return ADF_C3XXX_PMISC_BAR;
|
||||
@ -88,12 +60,12 @@ static u32 get_etr_bar_id(struct adf_hw_device_data *self)
|
||||
|
||||
static u32 get_sram_bar_id(struct adf_hw_device_data *self)
|
||||
{
|
||||
return 0;
|
||||
return ADF_C3XXX_SRAM_BAR;
|
||||
}
|
||||
|
||||
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
|
||||
{
|
||||
int aes = get_num_aes(self);
|
||||
int aes = self->get_num_aes(self);
|
||||
|
||||
if (aes == 6)
|
||||
return DEV_SKU_4;
|
||||
@ -106,41 +78,6 @@ static const u32 *adf_get_arbiter_mapping(void)
|
||||
return thrd_to_arb_map;
|
||||
}
|
||||
|
||||
static u32 get_pf2vf_offset(u32 i)
|
||||
{
|
||||
return ADF_C3XXX_PF2VF_OFFSET(i);
|
||||
}
|
||||
|
||||
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
|
||||
unsigned long accel_mask = hw_device->accel_mask;
|
||||
unsigned long ae_mask = hw_device->ae_mask;
|
||||
void __iomem *csr = misc_bar->virt_addr;
|
||||
unsigned int val, i;
|
||||
|
||||
/* Enable Accel Engine error detection & correction */
|
||||
for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
|
||||
val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
|
||||
val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
|
||||
ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
|
||||
val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
|
||||
ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
|
||||
}
|
||||
|
||||
/* Enable shared memory error detection & correction */
|
||||
for_each_set_bit(i, &accel_mask, ADF_C3XXX_MAX_ACCELERATORS) {
|
||||
val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
|
||||
val |= ADF_C3XXX_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
|
||||
val |= ADF_C3XXX_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
|
||||
}
|
||||
}
|
||||
|
||||
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
void __iomem *addr;
|
||||
@ -154,13 +91,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
|
||||
ADF_C3XXX_SMIA1_MASK);
|
||||
}
|
||||
|
||||
static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
|
||||
{
|
||||
adf_gen2_cfg_iov_thds(accel_dev, enable,
|
||||
@ -177,16 +107,16 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
|
||||
hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
|
||||
hw_data->num_logical_accel = 1;
|
||||
hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
|
||||
hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
|
||||
hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
|
||||
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
|
||||
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
|
||||
hw_data->alloc_irq = adf_isr_resource_alloc;
|
||||
hw_data->free_irq = adf_isr_resource_free;
|
||||
hw_data->enable_error_correction = adf_enable_error_correction;
|
||||
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
|
||||
hw_data->get_accel_mask = get_accel_mask;
|
||||
hw_data->get_ae_mask = get_ae_mask;
|
||||
hw_data->get_accel_cap = adf_gen2_get_accel_cap;
|
||||
hw_data->get_num_accels = get_num_accels;
|
||||
hw_data->get_num_aes = get_num_aes;
|
||||
hw_data->get_num_accels = adf_gen2_get_num_accels;
|
||||
hw_data->get_num_aes = adf_gen2_get_num_aes;
|
||||
hw_data->get_sram_bar_id = get_sram_bar_id;
|
||||
hw_data->get_etr_bar_id = get_etr_bar_id;
|
||||
hw_data->get_misc_bar_id = get_misc_bar_id;
|
||||
@ -205,7 +135,10 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
|
||||
hw_data->enable_ints = adf_enable_ints;
|
||||
hw_data->reset_device = adf_reset_flr;
|
||||
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
|
||||
hw_data->get_pf2vf_offset = get_pf2vf_offset;
|
||||
hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
|
||||
hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
|
||||
hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
|
||||
hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
|
||||
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
|
||||
hw_data->disable_iov = adf_disable_sriov;
|
||||
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
|
||||
|
@ -6,8 +6,7 @@
|
||||
/* PCIe configuration space */
|
||||
#define ADF_C3XXX_PMISC_BAR 0
|
||||
#define ADF_C3XXX_ETR_BAR 1
|
||||
#define ADF_C3XXX_RX_RINGS_OFFSET 8
|
||||
#define ADF_C3XXX_TX_RINGS_MASK 0xFF
|
||||
#define ADF_C3XXX_SRAM_BAR 0
|
||||
#define ADF_C3XXX_MAX_ACCELERATORS 3
|
||||
#define ADF_C3XXX_MAX_ACCELENGINES 6
|
||||
#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
|
||||
@ -19,16 +18,6 @@
|
||||
#define ADF_C3XXX_SMIA0_MASK 0xFFFF
|
||||
#define ADF_C3XXX_SMIA1_MASK 0x1
|
||||
#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
|
||||
/* Error detection and correction */
|
||||
#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
|
||||
#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
|
||||
#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28)
|
||||
#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
|
||||
#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18)
|
||||
#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10)
|
||||
#define ADF_C3XXX_ERRSSMSH_EN BIT(3)
|
||||
|
||||
#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
|
||||
|
||||
/* AE to function mapping */
|
||||
#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
|
||||
|
@ -48,34 +48,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
|
||||
return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
|
||||
}
|
||||
|
||||
static u32 get_num_accels(struct adf_hw_device_data *self)
|
||||
{
|
||||
u32 i, ctr = 0;
|
||||
|
||||
if (!self || !self->accel_mask)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
|
||||
if (self->accel_mask & (1 << i))
|
||||
ctr++;
|
||||
}
|
||||
return ctr;
|
||||
}
|
||||
|
||||
static u32 get_num_aes(struct adf_hw_device_data *self)
|
||||
{
|
||||
u32 i, ctr = 0;
|
||||
|
||||
if (!self || !self->ae_mask)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
|
||||
if (self->ae_mask & (1 << i))
|
||||
ctr++;
|
||||
}
|
||||
return ctr;
|
||||
}
|
||||
|
||||
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
|
||||
{
|
||||
return ADF_C62X_PMISC_BAR;
|
||||
@ -93,7 +65,7 @@ static u32 get_sram_bar_id(struct adf_hw_device_data *self)
|
||||
|
||||
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
|
||||
{
|
||||
int aes = get_num_aes(self);
|
||||
int aes = self->get_num_aes(self);
|
||||
|
||||
if (aes == 8)
|
||||
return DEV_SKU_2;
|
||||
@ -108,41 +80,6 @@ static const u32 *adf_get_arbiter_mapping(void)
|
||||
return thrd_to_arb_map;
|
||||
}
|
||||
|
||||
static u32 get_pf2vf_offset(u32 i)
|
||||
{
|
||||
return ADF_C62X_PF2VF_OFFSET(i);
|
||||
}
|
||||
|
||||
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
|
||||
unsigned long accel_mask = hw_device->accel_mask;
|
||||
unsigned long ae_mask = hw_device->ae_mask;
|
||||
void __iomem *csr = misc_bar->virt_addr;
|
||||
unsigned int val, i;
|
||||
|
||||
/* Enable Accel Engine error detection & correction */
|
||||
for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
|
||||
val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
|
||||
val |= ADF_C62X_ENABLE_AE_ECC_ERR;
|
||||
ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
|
||||
val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
|
||||
ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
|
||||
}
|
||||
|
||||
/* Enable shared memory error detection & correction */
|
||||
for_each_set_bit(i, &accel_mask, ADF_C62X_MAX_ACCELERATORS) {
|
||||
val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
|
||||
val |= ADF_C62X_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
|
||||
val |= ADF_C62X_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
|
||||
}
|
||||
}
|
||||
|
||||
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
void __iomem *addr;
|
||||
@ -156,13 +93,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
|
||||
ADF_C62X_SMIA1_MASK);
|
||||
}
|
||||
|
||||
static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
|
||||
{
|
||||
adf_gen2_cfg_iov_thds(accel_dev, enable,
|
||||
@ -179,16 +109,16 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
|
||||
hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
|
||||
hw_data->num_logical_accel = 1;
|
||||
hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
|
||||
hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
|
||||
hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
|
||||
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
|
||||
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
|
||||
hw_data->alloc_irq = adf_isr_resource_alloc;
|
||||
hw_data->free_irq = adf_isr_resource_free;
|
||||
hw_data->enable_error_correction = adf_enable_error_correction;
|
||||
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
|
||||
hw_data->get_accel_mask = get_accel_mask;
|
||||
hw_data->get_ae_mask = get_ae_mask;
|
||||
hw_data->get_accel_cap = adf_gen2_get_accel_cap;
|
||||
hw_data->get_num_accels = get_num_accels;
|
||||
hw_data->get_num_aes = get_num_aes;
|
||||
hw_data->get_num_accels = adf_gen2_get_num_accels;
|
||||
hw_data->get_num_aes = adf_gen2_get_num_aes;
|
||||
hw_data->get_sram_bar_id = get_sram_bar_id;
|
||||
hw_data->get_etr_bar_id = get_etr_bar_id;
|
||||
hw_data->get_misc_bar_id = get_misc_bar_id;
|
||||
@ -207,7 +137,10 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
|
||||
hw_data->enable_ints = adf_enable_ints;
|
||||
hw_data->reset_device = adf_reset_flr;
|
||||
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
|
||||
hw_data->get_pf2vf_offset = get_pf2vf_offset;
|
||||
hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
|
||||
hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
|
||||
hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
|
||||
hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
|
||||
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
|
||||
hw_data->disable_iov = adf_disable_sriov;
|
||||
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
|
||||
|
@ -7,8 +7,6 @@
|
||||
#define ADF_C62X_SRAM_BAR 0
|
||||
#define ADF_C62X_PMISC_BAR 1
|
||||
#define ADF_C62X_ETR_BAR 2
|
||||
#define ADF_C62X_RX_RINGS_OFFSET 8
|
||||
#define ADF_C62X_TX_RINGS_MASK 0xFF
|
||||
#define ADF_C62X_MAX_ACCELERATORS 5
|
||||
#define ADF_C62X_MAX_ACCELENGINES 10
|
||||
#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
|
||||
@ -20,16 +18,6 @@
|
||||
#define ADF_C62X_SMIA0_MASK 0xFFFF
|
||||
#define ADF_C62X_SMIA1_MASK 0x1
|
||||
#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
|
||||
/* Error detection and correction */
|
||||
#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
|
||||
#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
|
||||
#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28)
|
||||
#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
|
||||
#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18)
|
||||
#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10)
|
||||
#define ADF_C62X_ERRSSMSH_EN BIT(3)
|
||||
|
||||
#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
|
||||
|
||||
/* AE to function mapping */
|
||||
#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
|
||||
|
@ -42,13 +42,17 @@ struct adf_bar {
|
||||
resource_size_t base_addr;
|
||||
void __iomem *virt_addr;
|
||||
resource_size_t size;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
struct adf_irq {
|
||||
bool enabled;
|
||||
char name[ADF_MAX_MSIX_VECTOR_NAME];
|
||||
};
|
||||
|
||||
struct adf_accel_msix {
|
||||
struct msix_entry *entries;
|
||||
char **names;
|
||||
struct adf_irq *irqs;
|
||||
u32 num_entries;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
struct adf_accel_pci {
|
||||
struct pci_dev *pci_dev;
|
||||
@ -56,7 +60,7 @@ struct adf_accel_pci {
|
||||
struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
|
||||
u8 revid;
|
||||
u8 sku;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
enum dev_state {
|
||||
DEV_DOWN = 0,
|
||||
@ -96,7 +100,7 @@ struct adf_hw_device_class {
|
||||
const char *name;
|
||||
const enum adf_device_type type;
|
||||
u32 instances;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
struct arb_info {
|
||||
u32 arb_cfg;
|
||||
@ -166,12 +170,18 @@ struct adf_hw_device_data {
|
||||
int (*init_arb)(struct adf_accel_dev *accel_dev);
|
||||
void (*exit_arb)(struct adf_accel_dev *accel_dev);
|
||||
const u32 *(*get_arb_mapping)(void);
|
||||
int (*init_device)(struct adf_accel_dev *accel_dev);
|
||||
void (*disable_iov)(struct adf_accel_dev *accel_dev);
|
||||
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
|
||||
bool enable);
|
||||
void (*enable_ints)(struct adf_accel_dev *accel_dev);
|
||||
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
|
||||
int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
|
||||
u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
|
||||
void (*enable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
|
||||
u32 vf_mask);
|
||||
void (*disable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
|
||||
u32 vf_mask);
|
||||
void (*reset_device)(struct adf_accel_dev *accel_dev);
|
||||
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
|
||||
char *(*uof_get_name)(u32 obj_num);
|
||||
@ -195,7 +205,7 @@ struct adf_hw_device_data {
|
||||
u8 num_logical_accel;
|
||||
u8 num_engines;
|
||||
u8 min_iov_compat_ver;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
/* CSR write macro */
|
||||
#define ADF_CSR_WR(csr_base, csr_offset, val) \
|
||||
@ -251,7 +261,8 @@ struct adf_accel_dev {
|
||||
struct adf_accel_vf_info *vf_info;
|
||||
} pf;
|
||||
struct {
|
||||
char *irq_name;
|
||||
bool irq_enabled;
|
||||
char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
|
||||
struct tasklet_struct pf2vf_bh_tasklet;
|
||||
struct mutex vf2pf_lock; /* protect CSR access */
|
||||
struct completion iov_msg_completion;
|
||||
@ -261,5 +272,5 @@ struct adf_accel_dev {
|
||||
};
|
||||
bool is_vf;
|
||||
u32 accel_id;
|
||||
} __packed;
|
||||
};
|
||||
#endif
|
||||
|
@ -62,7 +62,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
|
||||
void adf_dev_stop(struct adf_accel_dev *accel_dev);
|
||||
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
|
||||
|
||||
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
|
||||
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
|
||||
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
|
||||
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
|
||||
@ -197,10 +196,11 @@ void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
|
||||
u32 vf_mask);
|
||||
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
||||
u32 vf_mask);
|
||||
int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
|
||||
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
|
||||
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
|
||||
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
|
||||
|
||||
int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg);
|
||||
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
|
||||
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
|
||||
int adf_init_pf_wq(void);
|
||||
@ -211,6 +211,11 @@ void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
|
||||
#else
|
||||
#define adf_sriov_configure NULL
|
||||
|
||||
static inline int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
}
|
||||
|
@ -4,6 +4,104 @@
|
||||
#include "icp_qat_hw.h"
|
||||
#include <linux/pci.h>
|
||||
|
||||
#define ADF_GEN2_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
|
||||
|
||||
u32 adf_gen2_get_pf2vf_offset(u32 i)
|
||||
{
|
||||
return ADF_GEN2_PF2VF_OFFSET(i);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_gen2_get_pf2vf_offset);
|
||||
|
||||
u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
|
||||
{
|
||||
u32 errsou3, errmsk3, vf_int_mask;
|
||||
|
||||
/* Get the interrupt sources triggered by VFs */
|
||||
errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
|
||||
vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
|
||||
|
||||
/* To avoid adding duplicate entries to work queue, clear
|
||||
* vf_int_mask_sets bits that are already masked in ERRMSK register.
|
||||
*/
|
||||
errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
|
||||
vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
|
||||
|
||||
return vf_int_mask;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_gen2_get_vf2pf_sources);
|
||||
|
||||
void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
|
||||
{
|
||||
/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
|
||||
if (vf_mask & 0xFFFF) {
|
||||
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
|
||||
& ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_gen2_enable_vf2pf_interrupts);
|
||||
|
||||
void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
|
||||
{
|
||||
/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
|
||||
if (vf_mask & 0xFFFF) {
|
||||
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
|
||||
| ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_gen2_disable_vf2pf_interrupts);
|
||||
|
||||
u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
|
||||
{
|
||||
if (!self || !self->accel_mask)
|
||||
return 0;
|
||||
|
||||
return hweight16(self->accel_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
|
||||
|
||||
u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
|
||||
{
|
||||
if (!self || !self->ae_mask)
|
||||
return 0;
|
||||
|
||||
return hweight32(self->ae_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
|
||||
|
||||
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_bar *misc_bar = &GET_BARS(accel_dev)
|
||||
[hw_data->get_misc_bar_id(hw_data)];
|
||||
unsigned long accel_mask = hw_data->accel_mask;
|
||||
unsigned long ae_mask = hw_data->ae_mask;
|
||||
void __iomem *csr = misc_bar->virt_addr;
|
||||
unsigned int val, i;
|
||||
|
||||
/* Enable Accel Engine error detection & correction */
|
||||
for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
|
||||
val = ADF_CSR_RD(csr, ADF_GEN2_AE_CTX_ENABLES(i));
|
||||
val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
|
||||
ADF_CSR_WR(csr, ADF_GEN2_AE_CTX_ENABLES(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_GEN2_AE_MISC_CONTROL(i));
|
||||
val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
|
||||
ADF_CSR_WR(csr, ADF_GEN2_AE_MISC_CONTROL(i), val);
|
||||
}
|
||||
|
||||
/* Enable shared memory error detection & correction */
|
||||
for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
|
||||
val = ADF_CSR_RD(csr, ADF_GEN2_UERRSSMSH(i));
|
||||
val |= ADF_GEN2_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_GEN2_UERRSSMSH(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_GEN2_CERRSSMSH(i));
|
||||
val |= ADF_GEN2_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_GEN2_CERRSSMSH(i), val);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
|
||||
|
||||
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
|
||||
int num_a_regs, int num_b_regs)
|
||||
{
|
||||
|
@ -22,6 +22,8 @@
|
||||
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
|
||||
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
|
||||
#define ADF_RING_BUNDLE_SIZE 0x1000
|
||||
#define ADF_GEN2_RX_RINGS_OFFSET 8
|
||||
#define ADF_GEN2_TX_RINGS_MASK 0xFF
|
||||
|
||||
#define BUILD_RING_BASE_ADDR(addr, size) \
|
||||
(((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
|
||||
@ -125,6 +127,31 @@ do { \
|
||||
#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
|
||||
#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
|
||||
|
||||
/* Error detection and correction */
|
||||
#define ADF_GEN2_AE_CTX_ENABLES(i) ((i) * 0x1000 + 0x20818)
|
||||
#define ADF_GEN2_AE_MISC_CONTROL(i) ((i) * 0x1000 + 0x20960)
|
||||
#define ADF_GEN2_ENABLE_AE_ECC_ERR BIT(28)
|
||||
#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
|
||||
#define ADF_GEN2_UERRSSMSH(i) ((i) * 0x4000 + 0x18)
|
||||
#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
|
||||
#define ADF_GEN2_ERRSSMSH_EN BIT(3)
|
||||
|
||||
/* VF2PF interrupts */
|
||||
#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
|
||||
#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
|
||||
#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
|
||||
#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
|
||||
#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
|
||||
#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9)
|
||||
|
||||
u32 adf_gen2_get_pf2vf_offset(u32 i);
|
||||
u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_bar);
|
||||
void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
|
||||
void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
|
||||
|
||||
u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
|
||||
u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
|
||||
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
|
||||
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
|
||||
int num_a_regs, int num_b_regs);
|
||||
void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
|
||||
|
@ -79,6 +79,11 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (hw_data->init_device && hw_data->init_device(accel_dev)) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
|
||||
return -EFAULT;
|
||||
|
@ -16,46 +16,31 @@
|
||||
#include "adf_transport_internal.h"
|
||||
|
||||
#define ADF_MAX_NUM_VFS 32
|
||||
#define ADF_ERRSOU3 (0x3A000 + 0x0C)
|
||||
#define ADF_ERRSOU5 (0x3A000 + 0xD8)
|
||||
#define ADF_ERRMSK3 (0x3A000 + 0x1C)
|
||||
#define ADF_ERRMSK5 (0x3A000 + 0xDC)
|
||||
#define ADF_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
|
||||
#define ADF_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
|
||||
|
||||
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
u32 msix_num_entries = 1;
|
||||
u32 msix_num_entries = hw_data->num_banks + 1;
|
||||
int ret;
|
||||
|
||||
if (hw_data->set_msix_rttable)
|
||||
hw_data->set_msix_rttable(accel_dev);
|
||||
|
||||
/* If SR-IOV is disabled, add entries for each bank */
|
||||
if (!accel_dev->pf.vf_info) {
|
||||
int i;
|
||||
|
||||
msix_num_entries += hw_data->num_banks;
|
||||
for (i = 0; i < msix_num_entries; i++)
|
||||
pci_dev_info->msix_entries.entries[i].entry = i;
|
||||
} else {
|
||||
pci_dev_info->msix_entries.entries[0].entry =
|
||||
hw_data->num_banks;
|
||||
}
|
||||
|
||||
if (pci_enable_msix_exact(pci_dev_info->pci_dev,
|
||||
pci_dev_info->msix_entries.entries,
|
||||
msix_num_entries)) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
|
||||
return -EFAULT;
|
||||
ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
|
||||
msix_num_entries, PCI_IRQ_MSIX);
|
||||
if (unlikely(ret < 0)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to allocate %d MSI-X vectors\n",
|
||||
msix_num_entries);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
|
||||
{
|
||||
pci_disable_msix(pci_dev_info->pci_dev);
|
||||
pci_free_irq_vectors(pci_dev_info->pci_dev);
|
||||
}
|
||||
|
||||
static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
|
||||
@ -80,22 +65,10 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
|
||||
struct adf_bar *pmisc =
|
||||
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
u32 errsou3, errsou5, errmsk3, errmsk5;
|
||||
unsigned long vf_mask;
|
||||
|
||||
/* Get the interrupt sources triggered by VFs */
|
||||
errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3);
|
||||
errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5);
|
||||
vf_mask = ADF_ERR_REG_VF2PF_L(errsou3);
|
||||
vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5);
|
||||
|
||||
/* To avoid adding duplicate entries to work queue, clear
|
||||
* vf_int_mask_sets bits that are already masked in ERRMSK register.
|
||||
*/
|
||||
errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3);
|
||||
errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5);
|
||||
vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3);
|
||||
vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5);
|
||||
vf_mask = hw_data->get_vf2pf_sources(pmisc_addr);
|
||||
|
||||
if (vf_mask) {
|
||||
struct adf_accel_vf_info *vf_info;
|
||||
@ -135,13 +108,39 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static void adf_free_irqs(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
|
||||
struct adf_etr_data *etr_data = accel_dev->transport;
|
||||
int clust_irq = hw_data->num_banks;
|
||||
int irq, i = 0;
|
||||
|
||||
if (pci_dev_info->msix_entries.num_entries > 1) {
|
||||
for (i = 0; i < hw_data->num_banks; i++) {
|
||||
if (irqs[i].enabled) {
|
||||
irq = pci_irq_vector(pci_dev_info->pci_dev, i);
|
||||
irq_set_affinity_hint(irq, NULL);
|
||||
free_irq(irq, &etr_data->banks[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (irqs[i].enabled) {
|
||||
irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
|
||||
free_irq(irq, accel_dev);
|
||||
}
|
||||
}
|
||||
|
||||
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
|
||||
struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
|
||||
struct adf_etr_data *etr_data = accel_dev->transport;
|
||||
int ret, i = 0;
|
||||
int clust_irq = hw_data->num_banks;
|
||||
int ret, irq, i = 0;
|
||||
char *name;
|
||||
|
||||
/* Request msix irq for all banks unless SR-IOV enabled */
|
||||
@ -150,105 +149,82 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
|
||||
struct adf_etr_bank_data *bank = &etr_data->banks[i];
|
||||
unsigned int cpu, cpus = num_online_cpus();
|
||||
|
||||
name = *(pci_dev_info->msix_entries.names + i);
|
||||
name = irqs[i].name;
|
||||
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
|
||||
"qat%d-bundle%d", accel_dev->accel_id, i);
|
||||
ret = request_irq(msixe[i].vector,
|
||||
adf_msix_isr_bundle, 0, name, bank);
|
||||
irq = pci_irq_vector(pci_dev_info->pci_dev, i);
|
||||
if (unlikely(irq < 0)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to get IRQ number of device vector %d - %s\n",
|
||||
i, name);
|
||||
ret = irq;
|
||||
goto err;
|
||||
}
|
||||
ret = request_irq(irq, adf_msix_isr_bundle, 0,
|
||||
&name[0], bank);
|
||||
if (ret) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"failed to enable irq %d for %s\n",
|
||||
msixe[i].vector, name);
|
||||
return ret;
|
||||
"Failed to allocate IRQ %d for %s\n",
|
||||
irq, name);
|
||||
goto err;
|
||||
}
|
||||
|
||||
cpu = ((accel_dev->accel_id * hw_data->num_banks) +
|
||||
i) % cpus;
|
||||
irq_set_affinity_hint(msixe[i].vector,
|
||||
get_cpu_mask(cpu));
|
||||
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
|
||||
irqs[i].enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Request msix irq for AE */
|
||||
name = *(pci_dev_info->msix_entries.names + i);
|
||||
name = irqs[i].name;
|
||||
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
|
||||
"qat%d-ae-cluster", accel_dev->accel_id);
|
||||
ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
|
||||
irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
|
||||
if (unlikely(irq < 0)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to get IRQ number of device vector %d - %s\n",
|
||||
i, name);
|
||||
ret = irq;
|
||||
goto err;
|
||||
}
|
||||
ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
|
||||
if (ret) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"failed to enable irq %d, for %s\n",
|
||||
msixe[i].vector, name);
|
||||
return ret;
|
||||
"Failed to allocate IRQ %d for %s\n", irq, name);
|
||||
goto err;
|
||||
}
|
||||
irqs[i].enabled = true;
|
||||
return ret;
|
||||
err:
|
||||
adf_free_irqs(accel_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void adf_free_irqs(struct adf_accel_dev *accel_dev)
|
||||
static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
|
||||
struct adf_etr_data *etr_data = accel_dev->transport;
|
||||
int i = 0;
|
||||
|
||||
if (pci_dev_info->msix_entries.num_entries > 1) {
|
||||
for (i = 0; i < hw_data->num_banks; i++) {
|
||||
irq_set_affinity_hint(msixe[i].vector, NULL);
|
||||
free_irq(msixe[i].vector, &etr_data->banks[i]);
|
||||
}
|
||||
}
|
||||
irq_set_affinity_hint(msixe[i].vector, NULL);
|
||||
free_irq(msixe[i].vector, accel_dev);
|
||||
}
|
||||
|
||||
static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
int i;
|
||||
char **names;
|
||||
struct msix_entry *entries;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
u32 msix_num_entries = 1;
|
||||
struct adf_irq *irqs;
|
||||
|
||||
/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
|
||||
if (!accel_dev->pf.vf_info)
|
||||
msix_num_entries += hw_data->num_banks;
|
||||
|
||||
entries = kcalloc_node(msix_num_entries, sizeof(*entries),
|
||||
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
|
||||
if (!entries)
|
||||
irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
|
||||
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
|
||||
if (!names) {
|
||||
kfree(entries);
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < msix_num_entries; i++) {
|
||||
*(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
|
||||
if (!(*(names + i)))
|
||||
goto err;
|
||||
}
|
||||
accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
|
||||
accel_dev->accel_pci_dev.msix_entries.entries = entries;
|
||||
accel_dev->accel_pci_dev.msix_entries.names = names;
|
||||
accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
|
||||
return 0;
|
||||
err:
|
||||
for (i = 0; i < msix_num_entries; i++)
|
||||
kfree(*(names + i));
|
||||
kfree(entries);
|
||||
kfree(names);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
|
||||
static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
char **names = accel_dev->accel_pci_dev.msix_entries.names;
|
||||
int i;
|
||||
|
||||
kfree(accel_dev->accel_pci_dev.msix_entries.entries);
|
||||
for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
|
||||
kfree(*(names + i));
|
||||
kfree(names);
|
||||
kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
|
||||
accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
|
||||
}
|
||||
|
||||
static int adf_setup_bh(struct adf_accel_dev *accel_dev)
|
||||
@ -287,7 +263,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
|
||||
adf_free_irqs(accel_dev);
|
||||
adf_cleanup_bh(accel_dev);
|
||||
adf_disable_msix(&accel_dev->accel_pci_dev);
|
||||
adf_isr_free_msix_entry_table(accel_dev);
|
||||
adf_isr_free_msix_vectors_data(accel_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
|
||||
|
||||
@ -303,7 +279,7 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = adf_isr_alloc_msix_entry_table(accel_dev);
|
||||
ret = adf_isr_alloc_msix_vectors_data(accel_dev);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
@ -328,7 +304,7 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
|
||||
adf_disable_msix(&accel_dev->accel_pci_dev);
|
||||
|
||||
err_free_msix_table:
|
||||
adf_isr_free_msix_entry_table(accel_dev);
|
||||
adf_isr_free_msix_vectors_data(accel_dev);
|
||||
|
||||
err_out:
|
||||
return ret;
|
||||
|
@ -5,82 +5,51 @@
|
||||
#include "adf_common_drv.h"
|
||||
#include "adf_pf2vf_msg.h"
|
||||
|
||||
#define ADF_DH895XCC_EP_OFFSET 0x3A000
|
||||
#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
|
||||
#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
|
||||
#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
|
||||
#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
|
||||
|
||||
static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
||||
u32 vf_mask)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_bar *pmisc =
|
||||
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
u32 reg;
|
||||
|
||||
/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
|
||||
if (vf_mask & 0xFFFF) {
|
||||
reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
|
||||
reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
|
||||
}
|
||||
|
||||
/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
|
||||
if (vf_mask >> 16) {
|
||||
reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
|
||||
reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
|
||||
}
|
||||
}
|
||||
#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
|
||||
#define ADF_PFVF_MSG_ACK_DELAY 2
|
||||
#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
|
||||
#define ADF_PFVF_MSG_RETRY_DELAY 5
|
||||
#define ADF_PFVF_MSG_MAX_RETRIES 3
|
||||
#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
|
||||
ADF_PFVF_MSG_ACK_MAX_RETRY + \
|
||||
ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
|
||||
|
||||
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
|
||||
struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
|
||||
__adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
|
||||
hw_data->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
|
||||
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
|
||||
}
|
||||
|
||||
static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
|
||||
u32 vf_mask)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_bar *pmisc =
|
||||
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
u32 reg;
|
||||
|
||||
/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
|
||||
if (vf_mask & 0xFFFF) {
|
||||
reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
|
||||
ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
|
||||
}
|
||||
|
||||
/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
|
||||
if (vf_mask >> 16) {
|
||||
reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
|
||||
ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
|
||||
ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
|
||||
}
|
||||
}
|
||||
|
||||
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
|
||||
struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
|
||||
__adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
|
||||
hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
|
||||
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
|
||||
}
|
||||
|
||||
void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
|
||||
void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
|
||||
u32 vf_mask)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
|
||||
struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
|
||||
void __iomem *pmisc_addr = pmisc->virt_addr;
|
||||
|
||||
spin_lock(&accel_dev->pf.vf2pf_ints_lock);
|
||||
__adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
|
||||
hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
|
||||
spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
|
||||
}
|
||||
|
||||
@ -117,44 +86,33 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
|
||||
|
||||
mutex_lock(lock);
|
||||
|
||||
/* Check if PF2VF CSR is in use by remote function */
|
||||
/* Check if the PFVF CSR is in use by remote function */
|
||||
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
|
||||
if ((val & remote_in_use_mask) == remote_in_use_pattern) {
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"PF2VF CSR in use by remote function\n");
|
||||
"PFVF CSR in use by remote function\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Attempt to get ownership of PF2VF CSR */
|
||||
msg &= ~local_in_use_mask;
|
||||
msg |= local_in_use_pattern;
|
||||
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
|
||||
|
||||
/* Wait in case remote func also attempting to get ownership */
|
||||
msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
|
||||
|
||||
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
|
||||
if ((val & local_in_use_mask) != local_in_use_pattern) {
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"PF2VF CSR in use by remote - collision detected\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function now owns the PV2VF CSR. The IN_USE_BY pattern must
|
||||
* remain in the PF2VF CSR for all writes including ACK from remote
|
||||
* until this local function relinquishes the CSR. Send the message
|
||||
* by interrupting the remote.
|
||||
*/
|
||||
/* Attempt to get ownership of the PFVF CSR */
|
||||
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
|
||||
|
||||
/* Wait for confirmation from remote func it received the message */
|
||||
do {
|
||||
msleep(ADF_IOV_MSG_ACK_DELAY);
|
||||
msleep(ADF_PFVF_MSG_ACK_DELAY);
|
||||
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
|
||||
} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
|
||||
} while ((val & int_bit) && (count++ < ADF_PFVF_MSG_ACK_MAX_RETRY));
|
||||
|
||||
if (val != msg) {
|
||||
dev_dbg(&GET_DEV(accel_dev),
|
||||
"Collision - PFVF CSR overwritten by remote function\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (val & int_bit) {
|
||||
dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
|
||||
@ -162,7 +120,7 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
|
||||
/* Finished with the PFVF CSR; relinquish it and leave msg in CSR */
|
||||
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
|
||||
out:
|
||||
mutex_unlock(lock);
|
||||
@ -170,16 +128,17 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_iov_putmsg() - send PF2VF message
|
||||
* adf_iov_putmsg() - send PFVF message
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
* @msg: Message to send
|
||||
* @vf_nr: VF number to which the message will be sent
|
||||
* @vf_nr: VF number to which the message will be sent if on PF, ignored
|
||||
* otherwise
|
||||
*
|
||||
* Function sends a message from the PF to a VF
|
||||
* Function sends a message through the PFVF channel
|
||||
*
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
|
||||
static int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
|
||||
{
|
||||
u32 count = 0;
|
||||
int ret;
|
||||
@ -187,12 +146,77 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
|
||||
do {
|
||||
ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
|
||||
if (ret)
|
||||
msleep(ADF_IOV_MSG_RETRY_DELAY);
|
||||
} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
|
||||
msleep(ADF_PFVF_MSG_RETRY_DELAY);
|
||||
} while (ret && (count++ < ADF_PFVF_MSG_MAX_RETRIES));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_send_pf2vf_msg() - send PF to VF message
|
||||
* @accel_dev: Pointer to acceleration device
|
||||
* @vf_nr: VF number to which the message will be sent
|
||||
* @msg: Message to send
|
||||
*
|
||||
* This function allows the PF to send a message to a specific VF.
|
||||
*
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
static int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, u32 msg)
|
||||
{
|
||||
return adf_iov_putmsg(accel_dev, msg, vf_nr);
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_send_vf2pf_msg() - send VF to PF message
|
||||
* @accel_dev: Pointer to acceleration device
|
||||
* @msg: Message to send
|
||||
*
|
||||
* This function allows the VF to send a message to the PF.
|
||||
*
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg)
|
||||
{
|
||||
return adf_iov_putmsg(accel_dev, msg, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_send_vf2pf_req() - send VF2PF request message
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
* @msg: Request message to send
|
||||
*
|
||||
* This function sends a message that requires a response from the VF to the PF
|
||||
* and waits for a reply.
|
||||
*
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
static int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, u32 msg)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
|
||||
int ret;
|
||||
|
||||
reinit_completion(&accel_dev->vf.iov_msg_completion);
|
||||
|
||||
/* Send request from VF to PF */
|
||||
ret = adf_send_vf2pf_msg(accel_dev, msg);
|
||||
if (ret) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to send request msg to PF\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Wait for response */
|
||||
if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
|
||||
timeout)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"PFVF request/response message timeout expired\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev = vf_info->accel_dev;
|
||||
@ -204,6 +228,11 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
|
||||
|
||||
/* Read message from the VF */
|
||||
msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
|
||||
if (!(msg & ADF_VF2PF_INT)) {
|
||||
dev_info(&GET_DEV(accel_dev),
|
||||
"Spurious VF2PF interrupt, msg %X. Ignored\n", msg);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* To ACK, clear the VF2PFINT bit */
|
||||
msg &= ~ADF_VF2PF_INT;
|
||||
@ -284,9 +313,10 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
|
||||
if (resp && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
|
||||
|
||||
out:
|
||||
/* re-enable interrupt on PF from this VF */
|
||||
adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
|
||||
|
||||
@ -304,7 +334,7 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
|
||||
int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
|
||||
|
||||
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
|
||||
if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
|
||||
if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to send restarting msg to VF%d\n", i);
|
||||
}
|
||||
@ -312,7 +342,6 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
|
||||
|
||||
static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
u32 msg = 0;
|
||||
int ret;
|
||||
@ -322,24 +351,13 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
|
||||
msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
|
||||
BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
|
||||
|
||||
reinit_completion(&accel_dev->vf.iov_msg_completion);
|
||||
|
||||
/* Send request from VF to PF */
|
||||
ret = adf_iov_putmsg(accel_dev, msg, 0);
|
||||
ret = adf_send_vf2pf_req(accel_dev, msg);
|
||||
if (ret) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to send Compatibility Version Request.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Wait for response */
|
||||
if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
|
||||
timeout)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"IOV request/response message timeout expired\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Response from PF received, check compatibility */
|
||||
switch (accel_dev->vf.compatible) {
|
||||
case ADF_PF2VF_VF_COMPATIBLE:
|
||||
@ -378,3 +396,21 @@ int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
|
||||
return adf_vf2pf_request_version(accel_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
|
||||
|
||||
/**
|
||||
* adf_enable_pf2vf_comms() - Function enables communication from pf to vf
|
||||
*
|
||||
* @accel_dev: Pointer to acceleration device virtual function.
|
||||
*
|
||||
* This function carries out the necessary steps to setup and start the PFVF
|
||||
* communication channel, if any.
|
||||
*
|
||||
* Return: 0 on success, error code otherwise.
|
||||
*/
|
||||
int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
|
||||
|
@ -90,13 +90,4 @@
|
||||
/* VF->PF Compatible Version Request */
|
||||
#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
|
||||
|
||||
/* Collision detection */
|
||||
#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10
|
||||
#define ADF_IOV_MSG_ACK_DELAY 2
|
||||
#define ADF_IOV_MSG_ACK_MAX_RETRY 100
|
||||
#define ADF_IOV_MSG_RETRY_DELAY 5
|
||||
#define ADF_IOV_MSG_MAX_RETRIES 3
|
||||
#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \
|
||||
ADF_IOV_MSG_ACK_MAX_RETRY + \
|
||||
ADF_IOV_MSG_COLLISION_DETECT_DELAY)
|
||||
#endif /* ADF_IOV_MSG_H */
|
||||
|
@ -17,7 +17,7 @@ int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
|
||||
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
|
||||
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
|
||||
|
||||
if (adf_iov_putmsg(accel_dev, msg, 0)) {
|
||||
if (adf_send_vf2pf_msg(accel_dev, msg)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to send Init event to PF\n");
|
||||
return -EFAULT;
|
||||
@ -41,7 +41,7 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
|
||||
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
|
||||
|
||||
if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
|
||||
if (adf_iov_putmsg(accel_dev, msg, 0))
|
||||
if (adf_send_vf2pf_msg(accel_dev, msg))
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to send Shutdown event to PF\n");
|
||||
}
|
||||
|
@ -53,27 +53,22 @@ EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
|
||||
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
||||
int stat = pci_enable_msi(pci_dev_info->pci_dev);
|
||||
|
||||
if (stat) {
|
||||
int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
|
||||
PCI_IRQ_MSI);
|
||||
if (unlikely(stat < 0)) {
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Failed to enable MSI interrupts\n");
|
||||
"Failed to enable MSI interrupt: %d\n", stat);
|
||||
return stat;
|
||||
}
|
||||
|
||||
accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
|
||||
if (!accel_dev->vf.irq_name)
|
||||
return -ENOMEM;
|
||||
|
||||
return stat;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_disable_msi(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
||||
|
||||
kfree(accel_dev->vf.irq_name);
|
||||
pci_disable_msi(pdev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
static void adf_dev_stop_async(struct work_struct *work)
|
||||
@ -101,6 +96,11 @@ static void adf_pf2vf_bh_handler(void *data)
|
||||
|
||||
/* Read the message from PF */
|
||||
msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
|
||||
if (!(msg & ADF_PF2VF_INT)) {
|
||||
dev_info(&GET_DEV(accel_dev),
|
||||
"Spurious PF2VF interrupt, msg %X. Ignored\n", msg);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
|
||||
/* Ignore legacy non-system (non-kernel) PF2VF messages */
|
||||
@ -149,6 +149,7 @@ static void adf_pf2vf_bh_handler(void *data)
|
||||
msg &= ~ADF_PF2VF_INT;
|
||||
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
|
||||
|
||||
out:
|
||||
/* Re-enable PF2VF interrupts */
|
||||
adf_enable_pf2vf_interrupts(accel_dev);
|
||||
return;
|
||||
@ -240,6 +241,7 @@ static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
|
||||
}
|
||||
cpu = accel_dev->accel_id % num_online_cpus();
|
||||
irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
|
||||
accel_dev->vf.irq_enabled = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -271,8 +273,10 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
||||
|
||||
irq_set_affinity_hint(pdev->irq, NULL);
|
||||
free_irq(pdev->irq, (void *)accel_dev);
|
||||
if (accel_dev->vf.irq_enabled) {
|
||||
irq_set_affinity_hint(pdev->irq, NULL);
|
||||
free_irq(pdev->irq, accel_dev);
|
||||
}
|
||||
adf_cleanup_bh(accel_dev);
|
||||
adf_cleanup_pf2vf_bh(accel_dev);
|
||||
adf_disable_msi(accel_dev);
|
||||
|
@ -35,34 +35,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
|
||||
return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
|
||||
}
|
||||
|
||||
static u32 get_num_accels(struct adf_hw_device_data *self)
|
||||
{
|
||||
u32 i, ctr = 0;
|
||||
|
||||
if (!self || !self->accel_mask)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
|
||||
if (self->accel_mask & (1 << i))
|
||||
ctr++;
|
||||
}
|
||||
return ctr;
|
||||
}
|
||||
|
||||
static u32 get_num_aes(struct adf_hw_device_data *self)
|
||||
{
|
||||
u32 i, ctr = 0;
|
||||
|
||||
if (!self || !self->ae_mask)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
|
||||
if (self->ae_mask & (1 << i))
|
||||
ctr++;
|
||||
}
|
||||
return ctr;
|
||||
}
|
||||
|
||||
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
|
||||
{
|
||||
return ADF_DH895XCC_PMISC_BAR;
|
||||
@ -126,41 +98,6 @@ static const u32 *adf_get_arbiter_mapping(void)
|
||||
return thrd_to_arb_map;
|
||||
}
|
||||
|
||||
static u32 get_pf2vf_offset(u32 i)
|
||||
{
|
||||
return ADF_DH895XCC_PF2VF_OFFSET(i);
|
||||
}
|
||||
|
||||
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
|
||||
unsigned long accel_mask = hw_device->accel_mask;
|
||||
unsigned long ae_mask = hw_device->ae_mask;
|
||||
void __iomem *csr = misc_bar->virt_addr;
|
||||
unsigned int val, i;
|
||||
|
||||
/* Enable Accel Engine error detection & correction */
|
||||
for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
|
||||
val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
|
||||
val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
|
||||
val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
|
||||
}
|
||||
|
||||
/* Enable shared memory error detection & correction */
|
||||
for_each_set_bit(i, &accel_mask, ADF_DH895XCC_MAX_ACCELERATORS) {
|
||||
val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
|
||||
val |= ADF_DH895XCC_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
|
||||
val |= ADF_DH895XCC_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
|
||||
}
|
||||
}
|
||||
|
||||
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
void __iomem *addr;
|
||||
@ -175,11 +112,50 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
|
||||
ADF_DH895XCC_SMIA1_MASK);
|
||||
}
|
||||
|
||||
static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
|
||||
static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
|
||||
{
|
||||
spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
|
||||
u32 errsou5, errmsk5, vf_int_mask;
|
||||
|
||||
return 0;
|
||||
vf_int_mask = adf_gen2_get_vf2pf_sources(pmisc_bar);
|
||||
|
||||
/* Get the interrupt sources triggered by VFs, but to avoid duplicates
|
||||
* in the work queue, clear vf_int_mask_sets bits that are already
|
||||
* masked in ERRMSK register.
|
||||
*/
|
||||
errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
|
||||
errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
|
||||
vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
|
||||
vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
|
||||
|
||||
return vf_int_mask;
|
||||
}
|
||||
|
||||
static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
|
||||
{
|
||||
/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
|
||||
adf_gen2_enable_vf2pf_interrupts(pmisc_addr, vf_mask);
|
||||
|
||||
/* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
|
||||
if (vf_mask >> 16) {
|
||||
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
|
||||
& ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
|
||||
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
|
||||
{
|
||||
/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
|
||||
adf_gen2_disable_vf2pf_interrupts(pmisc_addr, vf_mask);
|
||||
|
||||
/* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
|
||||
if (vf_mask >> 16) {
|
||||
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
|
||||
| ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
|
||||
|
||||
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
|
||||
@ -198,16 +174,16 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
||||
hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
|
||||
hw_data->num_logical_accel = 1;
|
||||
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
|
||||
hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
|
||||
hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
|
||||
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
|
||||
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
|
||||
hw_data->alloc_irq = adf_isr_resource_alloc;
|
||||
hw_data->free_irq = adf_isr_resource_free;
|
||||
hw_data->enable_error_correction = adf_enable_error_correction;
|
||||
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
|
||||
hw_data->get_accel_mask = get_accel_mask;
|
||||
hw_data->get_ae_mask = get_ae_mask;
|
||||
hw_data->get_accel_cap = get_accel_cap;
|
||||
hw_data->get_num_accels = get_num_accels;
|
||||
hw_data->get_num_aes = get_num_aes;
|
||||
hw_data->get_num_accels = adf_gen2_get_num_accels;
|
||||
hw_data->get_num_aes = adf_gen2_get_num_aes;
|
||||
hw_data->get_etr_bar_id = get_etr_bar_id;
|
||||
hw_data->get_misc_bar_id = get_misc_bar_id;
|
||||
hw_data->get_admin_info = adf_gen2_get_admin_info;
|
||||
@ -225,7 +201,10 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
||||
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
|
||||
hw_data->enable_ints = adf_enable_ints;
|
||||
hw_data->reset_device = adf_reset_sbr;
|
||||
hw_data->get_pf2vf_offset = get_pf2vf_offset;
|
||||
hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
|
||||
hw_data->get_vf2pf_sources = get_vf2pf_sources;
|
||||
hw_data->enable_vf2pf_interrupts = enable_vf2pf_interrupts;
|
||||
hw_data->disable_vf2pf_interrupts = disable_vf2pf_interrupts;
|
||||
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
|
||||
hw_data->disable_iov = adf_disable_sriov;
|
||||
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
|
||||
|
@ -7,8 +7,6 @@
|
||||
#define ADF_DH895XCC_SRAM_BAR 0
|
||||
#define ADF_DH895XCC_PMISC_BAR 1
|
||||
#define ADF_DH895XCC_ETR_BAR 2
|
||||
#define ADF_DH895XCC_RX_RINGS_OFFSET 8
|
||||
#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
|
||||
@ -25,16 +23,10 @@
|
||||
#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
|
||||
#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
|
||||
#define ADF_DH895XCC_SMIA1_MASK 0x1
|
||||
/* Error detection and correction */
|
||||
#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
|
||||
#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
|
||||
#define ADF_DH895XCC_ENABLE_AE_ECC_ERR BIT(28)
|
||||
#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
|
||||
#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
|
||||
#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
|
||||
#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
|
||||
|
||||
#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
|
||||
/* Masks for VF2PF interrupts */
|
||||
#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
|
||||
#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask) ((vf_mask) >> 16)
|
||||
|
||||
/* AE to function mapping */
|
||||
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
|
||||
|
@ -2171,6 +2171,8 @@ static int s5p_aes_probe(struct platform_device *pdev)
|
||||
|
||||
variant = find_s5p_sss_version(pdev);
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Note: HASH and PRNG uses the same registers in secss, avoid
|
||||
|
@ -2412,8 +2412,7 @@ static int sa_ul_probe(struct platform_device *pdev)
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
|
||||
ret);
|
||||
dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
|
||||
pm_runtime_disable(dev);
|
||||
return ret;
|
||||
}
|
||||
@ -2435,16 +2434,16 @@ static int sa_ul_probe(struct platform_device *pdev)
|
||||
|
||||
sa_register_algos(dev_data);
|
||||
|
||||
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
|
||||
ret = of_platform_populate(node, NULL, NULL, dev);
|
||||
if (ret)
|
||||
goto release_dma;
|
||||
|
||||
device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
|
||||
device_for_each_child(dev, dev, sa_link_child);
|
||||
|
||||
return 0;
|
||||
|
||||
release_dma:
|
||||
sa_unregister_algos(&pdev->dev);
|
||||
sa_unregister_algos(dev);
|
||||
|
||||
dma_release_channel(dev_data->dma_rx2);
|
||||
dma_release_channel(dev_data->dma_rx1);
|
||||
@ -2453,8 +2452,8 @@ static int sa_ul_probe(struct platform_device *pdev)
|
||||
destroy_dma_pool:
|
||||
dma_pool_destroy(dev_data->sc_pool);
|
||||
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_sync(dev);
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <crypto/akcipher.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/kpp.h>
|
||||
|
||||
#define ENGINE_NAME_LEN 30
|
||||
/*
|
||||
@ -96,6 +97,8 @@ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct akcipher_request *req);
|
||||
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
|
||||
struct ahash_request *req);
|
||||
int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
|
||||
struct kpp_request *req);
|
||||
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct skcipher_request *req);
|
||||
void crypto_finalize_aead_request(struct crypto_engine *engine,
|
||||
@ -104,6 +107,8 @@ void crypto_finalize_akcipher_request(struct crypto_engine *engine,
|
||||
struct akcipher_request *req, int err);
|
||||
void crypto_finalize_hash_request(struct crypto_engine *engine,
|
||||
struct ahash_request *req, int err);
|
||||
void crypto_finalize_kpp_request(struct crypto_engine *engine,
|
||||
struct kpp_request *req, int err);
|
||||
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
|
||||
struct skcipher_request *req, int err);
|
||||
int crypto_engine_start(struct crypto_engine *engine);
|
||||
|
@ -225,6 +225,41 @@ void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
|
||||
void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
|
||||
const u64 *mod, unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* vli_num_bits() - Counts the number of bits required for vli.
|
||||
*
|
||||
* @vli: vli to check.
|
||||
* @ndigits: Length of the @vli
|
||||
*
|
||||
* Return: The number of bits required to represent @vli.
|
||||
*/
|
||||
unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* ecc_aloc_point() - Allocate ECC point.
|
||||
*
|
||||
* @ndigits: Length of vlis in u64 qwords.
|
||||
*
|
||||
* Return: Pointer to the allocated point or NULL if allocation failed.
|
||||
*/
|
||||
struct ecc_point *ecc_alloc_point(unsigned int ndigits);
|
||||
|
||||
/**
|
||||
* ecc_free_point() - Free ECC point.
|
||||
*
|
||||
* @p: The point to free.
|
||||
*/
|
||||
void ecc_free_point(struct ecc_point *p);
|
||||
|
||||
/**
|
||||
* ecc_point_is_zero() - Check if point is zero.
|
||||
*
|
||||
* @p: Point to check for zero.
|
||||
*
|
||||
* Return: true if point is the point at infinity, false otherwise.
|
||||
*/
|
||||
bool ecc_point_is_zero(const struct ecc_point *point);
|
||||
|
||||
/**
|
||||
* ecc_point_mult_shamir() - Add two points multiplied by scalars
|
||||
*
|
||||
@ -242,4 +277,5 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
|
||||
const u64 *x, const struct ecc_point *p,
|
||||
const u64 *y, const struct ecc_point *q,
|
||||
const struct ecc_curve *curve);
|
||||
|
||||
#endif
|
@ -15,7 +15,7 @@ static const u32 fk[4] = {
|
||||
0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
|
||||
};
|
||||
|
||||
static const u32 __cacheline_aligned ck[32] = {
|
||||
static const u32 ____cacheline_aligned ck[32] = {
|
||||
0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
|
||||
0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
|
||||
0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
|
||||
@ -26,7 +26,7 @@ static const u32 __cacheline_aligned ck[32] = {
|
||||
0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
|
||||
};
|
||||
|
||||
static const u8 __cacheline_aligned sbox[256] = {
|
||||
static const u8 ____cacheline_aligned sbox[256] = {
|
||||
0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
|
||||
0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
|
||||
0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
|
||||
|
Loading…
Reference in New Issue
Block a user