Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git

This commit is contained in:
Stephen Rothwell 2024-12-20 09:41:52 +11:00
commit 4322e341f7
9 changed files with 43 additions and 29 deletions

View File

@ -7,9 +7,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-map-ops.h>
#include <linux/platform_device.h>
#include <linux/platform_data/amd_qdma.h>
#include <linux/regmap.h>
@ -492,18 +492,9 @@ static int qdma_device_verify(struct qdma_device *qdev)
static int qdma_device_setup(struct qdma_device *qdev)
{
struct device *dev = &qdev->pdev->dev;
u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
int ret = 0;
while (dev && get_dma_ops(dev))
dev = dev->parent;
if (!dev) {
qdma_err(qdev, "dma device not found");
return -EINVAL;
}
set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
ret = qdma_setup_fmap_context(qdev);
if (ret) {
qdma_err(qdev, "Failed setup fmap context");
@ -548,11 +539,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan)
{
struct qdma_queue *queue = to_qdma_queue(chan);
struct qdma_device *qdev = queue->qdev;
struct device *dev = qdev->dma_dev.dev;
struct qdma_platdata *pdata;
qdma_clear_queue_context(queue);
vchan_free_chan_resources(&queue->vchan);
dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
pdata = dev_get_platdata(&qdev->pdev->dev);
dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
queue->desc_base, queue->dma_desc_base);
}
@ -565,6 +557,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
struct qdma_queue *queue = to_qdma_queue(chan);
struct qdma_device *qdev = queue->qdev;
struct qdma_ctxt_sw_desc desc;
struct qdma_platdata *pdata;
size_t size;
int ret;
@ -572,8 +565,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
if (ret)
return ret;
pdata = dev_get_platdata(&qdev->pdev->dev);
size = queue->ring_size * QDMA_MM_DESC_SIZE;
queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
&queue->dma_desc_base,
GFP_KERNEL);
if (!queue->desc_base) {
@ -588,7 +582,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
if (ret) {
qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
chan->name);
dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
queue->dma_desc_base);
return ret;
}
@ -948,8 +942,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev)
static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
{
u32 ctxt[QDMA_CTXT_REGMAP_LEN];
struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
struct device *dev = &qdev->pdev->dev;
u32 ctxt[QDMA_CTXT_REGMAP_LEN];
struct qdma_intr_ring *ring;
struct qdma_ctxt_intr intr_ctxt;
u32 vector;
@ -969,7 +964,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
ring->msix_id = qdev->err_irq_idx + i + 1;
ring->ridx = i;
ring->color = 1;
ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
ring->base = dmam_alloc_coherent(pdata->dma_dev,
QDMA_INTR_RING_SIZE,
&ring->dev_base, GFP_KERNEL);
if (!ring->base) {
qdma_err(qdev, "Failed to alloc intr ring %d", i);

View File

@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad,
{
struct admac_sram *sram;
int i, ret = 0, nblocks;
ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
if (dir == DMA_MEM_TO_DEV)
sram = &ad->txcache;
@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev)
goto free_irq;
}
ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
dev_info(&pdev->dev, "Audio DMA Controller\n");
dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
return 0;

View File

@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
return NULL;
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
if (!desc)
return NULL;
list_add_tail(&desc->desc_node, &desc->descs_list);
desc->tx_dma_desc.cookie = -EBUSY;

View File

@ -8,13 +8,15 @@
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev);
struct acpi_dma_spec *dma_spec = param;
struct dw_dma_slave slave = {
.dma_dev = dma_spec->dev,
.src_id = dma_spec->slave_id,
.dst_id = dma_spec->slave_id,
.m_master = 0,
.p_master = 1,
.m_master = data->m_master,
.p_master = data->p_master,
};
return dw_dma_filter(chan, &slave);

View File

@ -51,11 +51,15 @@ struct dw_dma_chip_pdata {
int (*probe)(struct dw_dma_chip *chip);
int (*remove)(struct dw_dma_chip *chip);
struct dw_dma_chip *chip;
u8 m_master;
u8 p_master;
};
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
.probe = dw_dma_probe,
.remove = dw_dma_remove,
.m_master = 0,
.p_master = 1,
};
static const struct dw_dma_platform_data idma32_pdata = {
@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.pdata = &idma32_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
.m_master = 0,
.p_master = 0,
};
static const struct dw_dma_platform_data xbar_pdata = {
@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
.pdata = &xbar_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
.m_master = 0,
.p_master = 0,
};
#endif /* _DMA_DW_INTERNAL_H */

View File

@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
if (ret)
return ret;
dw_dma_acpi_controller_register(chip->dw);
pci_set_drvdata(pdev, data);
dw_dma_acpi_controller_register(chip->dw);
return 0;
}

View File

@ -31,7 +31,7 @@
#define LDMA_ASK_VALID BIT(2)
#define LDMA_START BIT(3) /* DMA start operation */
#define LDMA_STOP BIT(4) /* DMA stop operation */
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
/* Bitfields in ndesc_addr field of HW descriptor */
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */

View File

@ -84,7 +84,7 @@ enum dma_transfer_direction {
DMA_TRANS_NONE,
};
/**
/*
* Interleaved Transfer Request
* ----------------------------
* A chunk is collection of contiguous bytes to be transferred.
@ -223,7 +223,7 @@ enum sum_check_bits {
};
/**
* enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
* enum sum_check_flags - result of async_{xor,pq}_zero_sum operations
* @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
* @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
*/
@ -286,7 +286,7 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
* pointer to the engine's metadata area
* 4. Read out the metadata from the pointer
*
* Note: the two mode is not compatible and clients must use one mode for a
* Warning: the two modes are not compatible and clients must use one mode for a
* descriptor.
*/
enum dma_desc_metadata_mode {
@ -594,9 +594,13 @@ struct dma_descriptor_metadata_ops {
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
* @desc_free: driver's callback function to free a resusable descriptor
* after completion
* descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_result: error result from a DMA transaction
* @callback_param: general parameter to pass to the callback routine
* @unmap: hook for generic DMA unmap data
* @desc_metadata_mode: core managed metadata mode to protect mixed use of
* DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
* DESC_METADATA_NONE
@ -827,6 +831,9 @@ struct dma_filter {
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer,
* where the address and size of each segment is located in one entry of
* the dma_vec array.
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will

View File

@ -26,11 +26,13 @@ struct dma_slave_map;
* @max_mm_channels: Maximum number of MM DMA channels in each direction
* @device_map: DMA slave map
* @irq_index: The index of first IRQ
* @dma_dev: The device pointer for dma operations
*/
struct qdma_platdata {
u32 max_mm_channels;
u32 irq_index;
struct dma_slave_map *device_map;
struct device *dma_dev;
};
#endif /* _PLATDATA_AMD_QDMA_H */