mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
3382c44f0c
Having switched to workqueue from tasklet, we are no longer limited to atomic APIs and can now convert the spinlock to a mutex. This, along with the conversion from tasklet to workqueue grants us ~15% improvement in cryptsetup benchmarks for AES encryption. While at it: use guards to simplify locking code. Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org> Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
65 lines
1.8 KiB
C
65 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
|
*/
|
|
|
|
#ifndef _CORE_H_
|
|
#define _CORE_H_
|
|
|
|
#include <linux/mutex.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
#include "dma.h"
|
|
|
|
/**
|
|
* struct qce_device - crypto engine device structure
|
|
* @queue: crypto request queue
|
|
* @lock: the lock protects queue and req
|
|
* @done_work: workqueue context
|
|
* @req: current active request
|
|
* @result: result of current transform
|
|
* @base: virtual IO base
|
|
* @dev: pointer to device structure
|
|
* @core: core device clock
|
|
* @iface: interface clock
|
|
* @bus: bus clock
|
|
* @dma: pointer to dma data
|
|
* @burst_size: the crypto burst size
|
|
* @pipe_pair_id: which pipe pair id the device using
|
|
* @async_req_enqueue: invoked by every algorithm to enqueue a request
|
|
* @async_req_done: invoked by every algorithm to finish its request
|
|
*/
|
|
struct qce_device {
|
|
struct crypto_queue queue;
|
|
struct mutex lock;
|
|
struct work_struct done_work;
|
|
struct crypto_async_request *req;
|
|
int result;
|
|
void __iomem *base;
|
|
struct device *dev;
|
|
struct clk *core, *iface, *bus;
|
|
struct icc_path *mem_path;
|
|
struct qce_dma_data dma;
|
|
int burst_size;
|
|
unsigned int pipe_pair_id;
|
|
int (*async_req_enqueue)(struct qce_device *qce,
|
|
struct crypto_async_request *req);
|
|
void (*async_req_done)(struct qce_device *qce, int ret);
|
|
};
|
|
|
|
/**
|
|
* struct qce_algo_ops - algorithm operations per crypto type
|
|
* @type: should be CRYPTO_ALG_TYPE_XXX
|
|
* @register_algs: invoked by core to register the algorithms
|
|
* @unregister_algs: invoked by core to unregister the algorithms
|
|
* @async_req_handle: invoked by core to handle enqueued request
|
|
*/
|
|
struct qce_algo_ops {
|
|
u32 type;
|
|
int (*register_algs)(struct qce_device *qce);
|
|
void (*unregister_algs)(struct qce_device *qce);
|
|
int (*async_req_handle)(struct crypto_async_request *async_req);
|
|
};
|
|
|
|
#endif /* _CORE_H_ */
|