mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-12 08:00:09 +00:00
Merge branch 'topic/core' into for-linus
This commit is contained in:
commit
a365c96854
@ -482,7 +482,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
|
||||
device = chan->device;
|
||||
|
||||
/* check if the channel supports slave transactions */
|
||||
if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
|
||||
if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
|
||||
test_bit(DMA_CYCLIC, device->cap_mask.bits)))
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
@ -865,12 +866,12 @@ static bool device_has_all_tx_types(struct dma_device *device)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
|
||||
#if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
|
||||
if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
|
||||
return false;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
|
||||
#if IS_ENABLED(CONFIG_ASYNC_XOR)
|
||||
if (!dma_has_cap(DMA_XOR, device->cap_mask))
|
||||
return false;
|
||||
|
||||
@ -880,7 +881,7 @@ static bool device_has_all_tx_types(struct dma_device *device)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
|
||||
#if IS_ENABLED(CONFIG_ASYNC_PQ)
|
||||
if (!dma_has_cap(DMA_PQ, device->cap_mask))
|
||||
return false;
|
||||
|
||||
|
@ -240,8 +240,9 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||||
struct of_phandle_args dma_spec;
|
||||
struct of_dma *ofdma;
|
||||
struct dma_chan *chan;
|
||||
int count, i;
|
||||
int count, i, start;
|
||||
int ret_no_channel = -ENODEV;
|
||||
static atomic_t last_index;
|
||||
|
||||
if (!np || !name) {
|
||||
pr_err("%s: not enough information provided\n", __func__);
|
||||
@ -259,8 +260,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
/*
|
||||
* approximate an average distribution across multiple
|
||||
* entries with the same name
|
||||
*/
|
||||
start = atomic_inc_return(&last_index);
|
||||
for (i = 0; i < count; i++) {
|
||||
if (of_dma_match_channel(np, name, i, &dma_spec))
|
||||
if (of_dma_match_channel(np, name,
|
||||
(i + start) % count,
|
||||
&dma_spec))
|
||||
continue;
|
||||
|
||||
mutex_lock(&of_dma_lock);
|
||||
|
@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
|
||||
sg_dma_address(&sg) = buf;
|
||||
sg_dma_len(&sg) = len;
|
||||
|
||||
if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
|
||||
return NULL;
|
||||
|
||||
return chan->device->device_prep_slave_sg(chan, &sg, 1,
|
||||
dir, flags, NULL);
|
||||
}
|
||||
@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_transfer_direction dir, unsigned long flags)
|
||||
{
|
||||
if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
|
||||
return NULL;
|
||||
|
||||
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
|
||||
dir, flags, NULL);
|
||||
}
|
||||
@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
|
||||
enum dma_transfer_direction dir, unsigned long flags,
|
||||
struct rio_dma_ext *rio_ext)
|
||||
{
|
||||
if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
|
||||
return NULL;
|
||||
|
||||
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
|
||||
dir, flags, rio_ext);
|
||||
}
|
||||
@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
|
||||
size_t period_len, enum dma_transfer_direction dir,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
|
||||
return NULL;
|
||||
|
||||
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
|
||||
period_len, dir, flags);
|
||||
}
|
||||
@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
|
||||
struct dma_chan *chan, struct dma_interleaved_template *xt,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
|
||||
return NULL;
|
||||
|
||||
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
|
||||
}
|
||||
|
||||
@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
|
||||
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (!chan || !chan->device)
|
||||
if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
|
||||
return NULL;
|
||||
|
||||
return chan->device->device_prep_dma_memset(chan, dest, value,
|
||||
@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
|
||||
struct scatterlist *src_sg, unsigned int src_nents,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
|
||||
return NULL;
|
||||
|
||||
return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
|
||||
src_sg, src_nents, flags);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user