iio: buffer-dmaengine: Support specifying buffer direction

Update the devm_iio_dmaengine_buffer_setup() function to support
specifying the buffer direction.

Update the iio_dmaengine_buffer_submit() function to handle input
buffers as well as output buffers.

Signed-off-by: Paul Cercueil <paul@crapouillou.net>
Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>
Signed-off-by: Nuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-4-5ca45b4de294@analog.com
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
This commit is contained in:
Paul Cercueil 2024-04-19 10:25:37 +02:00 committed by Jonathan Cameron
parent fb09febafd
commit c1b9156658
2 changed files with 49 additions and 20 deletions

View File

@ -64,14 +64,25 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(&queue->buffer);
struct dma_async_tx_descriptor *desc;
enum dma_transfer_direction dma_dir;
size_t max_size;
dma_cookie_t cookie;
block->bytes_used = min(block->size, dmaengine_buffer->max_size);
block->bytes_used = round_down(block->bytes_used,
dmaengine_buffer->align);
max_size = min(block->size, dmaengine_buffer->max_size);
max_size = round_down(max_size, dmaengine_buffer->align);
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
block->bytes_used = max_size;
dma_dir = DMA_DEV_TO_MEM;
} else {
dma_dir = DMA_MEM_TO_DEV;
}
if (!block->bytes_used || block->bytes_used > max_size)
return -EINVAL;
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
block->phys_addr, block->bytes_used, dma_dir,
DMA_PREP_INTERRUPT);
if (!desc)
return -ENOMEM;
@ -229,9 +240,10 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
struct iio_dev *indio_dev,
const char *channel)
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir)
{
struct iio_buffer *buffer;
int ret;
@ -242,6 +254,8 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
buffer->direction = dir;
ret = iio_device_attach_buffer(indio_dev, buffer);
if (ret) {
iio_dmaengine_buffer_free(buffer);
@ -250,7 +264,7 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
return buffer;
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
static void __devm_iio_dmaengine_buffer_free(void *buffer)
{
@ -258,30 +272,32 @@ static void __devm_iio_dmaengine_buffer_free(void *buffer)
}
/**
* devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
* devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
* @dev: Parent device for the buffer
* @indio_dev: IIO device to which to attach this buffer.
* @channel: DMA channel name, typically "rx".
* @dir: Direction of buffer (in or out)
*
* This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
* and attaches it to an IIO device with iio_device_attach_buffer().
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
* IIO device.
*/
int devm_iio_dmaengine_buffer_setup(struct device *dev,
struct iio_dev *indio_dev,
const char *channel)
int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir)
{
struct iio_buffer *buffer;
buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel);
buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
buffer);
}
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");

View File

@ -7,15 +7,28 @@
#ifndef __IIO_DMAENGINE_H__
#define __IIO_DMAENGINE_H__
#include <linux/iio/buffer.h>
struct iio_dev;
struct device;
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
struct iio_dev *indio_dev,
const char *channel);
int devm_iio_dmaengine_buffer_setup(struct device *dev,
struct iio_dev *indio_dev,
const char *channel);
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir);
#define iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
IIO_BUFFER_DIRECTION_IN)
int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir);
#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
IIO_BUFFER_DIRECTION_IN)
#endif