mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-27 16:23:32 +00:00
dmaengine fixes for v6.13
Driver fixes for: - Kernel doc warning documentation fixes - apple driver fix for register access - amd driver dropping private dma_ops - freescale cleanup path fix - refcount fix for mv_xor driver - null pointer deref fix for at_xdmac driver - GENMASK to GENMASK_ULL fix for loongson2 apb driver - Tegra driver fix for correcting dma status -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmdtW8IACgkQfBQHDyUj g0fWEQ/5AddE/OIlxBai1K+diRcTxrgHJ6rMl2+4gF3Odhf3+bCnGS8rnYxwzIzO eRgovAhJsR5zc519KIPvdm9nj/MlxzCCUktQX/5smLvLrlGqdlPor14/SMsm5ePS w1BX550Ho2YoI1mvlcW4Kj4Kvy6e2KAgBZs0gKsHLyS90KLsi9mENmt12RmYtPns DKkzy+R8q8Xf5GKAwPGFutbBsCzXocJmodcEbKNnYut9+Wcn3wao1ZUscrXEfT7X 0AvAT+9rOTIItSHD4NSRqUmcqvcQqIY9ESJtntEABqes3/CBtr0dovZBoJvbpnu2 iVAkQBeBi0Le9Wprs6q5ODr5BZsHYqjskstMXS48m3CUkShLORtwTJh+4fVDFPFE AuGn55nSfXQn/pbFbIoCbU8HzJmNlhFTtuvPji5PgLDi5st2aU3AjZcqdW+zGg9l Cwc+1A7AQ+SaZ3iY9aeNeckaeSefyDQiMH5P/1a/SLois8tGLaXJfi/t6vGDiuXp NlJJZ5GrMDlN3eKajDU2+qj0rBv8j6tYaL9VeQ3h/5UP1tlhm9gOMionri2do/hF XRioKQo8aFW5WadmPVBRhC65x83b3TUDf/KOgDhDh53VjZLNp+c2yAlzkFaUJofb EQblwTuASWnA7QnqqUT9juPLBBylbuNaxa0CiB8NCUUkx+vVTfA= =1jRj -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine fixes from Vinod Koul: "Bunch of minor driver fixes for drivers in this cycle: - Kernel doc warning documentation fixes - apple driver fix for register access - amd driver dropping private dma_ops - freescale cleanup path fix - refcount fix for mv_xor driver - null pointer deref fix for at_xdmac driver - GENMASK to GENMASK_ULL fix for loongson2 apb driver - Tegra driver fix for correcting dma status" * tag 'dmaengine-fix-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine: tegra: Return correct DMA status when paused dmaengine: mv_xor: fix child node refcount handling in early exit dmaengine: fsl-edma: implement the cleanup path of fsl_edma3_attach_pd() dmaengine: amd: qdma: Remove using the private get and set dma_ops APIs dmaengine: apple-admac: Avoid accessing registers in probe linux/dmaengine.h: fix a few kernel-doc warnings dmaengine: loongson2-apb: Change GENMASK to GENMASK_ULL dmaengine: dw: Select only supported masters for ACPI devices dmaengine: at_xdmac: avoid null_prt_deref in at_xdmac_prep_dma_memset
This commit is contained in:
commit
23db0ed34f
@ -7,9 +7,9 @@
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/amd_qdma.h>
|
||||
#include <linux/regmap.h>
|
||||
@ -492,18 +492,9 @@ static int qdma_device_verify(struct qdma_device *qdev)
|
||||
|
||||
static int qdma_device_setup(struct qdma_device *qdev)
|
||||
{
|
||||
struct device *dev = &qdev->pdev->dev;
|
||||
u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
|
||||
int ret = 0;
|
||||
|
||||
while (dev && get_dma_ops(dev))
|
||||
dev = dev->parent;
|
||||
if (!dev) {
|
||||
qdma_err(qdev, "dma device not found");
|
||||
return -EINVAL;
|
||||
}
|
||||
set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
|
||||
|
||||
ret = qdma_setup_fmap_context(qdev);
|
||||
if (ret) {
|
||||
qdma_err(qdev, "Failed setup fmap context");
|
||||
@ -548,11 +539,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct qdma_queue *queue = to_qdma_queue(chan);
|
||||
struct qdma_device *qdev = queue->qdev;
|
||||
struct device *dev = qdev->dma_dev.dev;
|
||||
struct qdma_platdata *pdata;
|
||||
|
||||
qdma_clear_queue_context(queue);
|
||||
vchan_free_chan_resources(&queue->vchan);
|
||||
dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
|
||||
pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||
dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
|
||||
queue->desc_base, queue->dma_desc_base);
|
||||
}
|
||||
|
||||
@ -565,6 +557,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
||||
struct qdma_queue *queue = to_qdma_queue(chan);
|
||||
struct qdma_device *qdev = queue->qdev;
|
||||
struct qdma_ctxt_sw_desc desc;
|
||||
struct qdma_platdata *pdata;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
@ -572,8 +565,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||
size = queue->ring_size * QDMA_MM_DESC_SIZE;
|
||||
queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
|
||||
queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
|
||||
&queue->dma_desc_base,
|
||||
GFP_KERNEL);
|
||||
if (!queue->desc_base) {
|
||||
@ -588,7 +582,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
||||
if (ret) {
|
||||
qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
|
||||
chan->name);
|
||||
dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
|
||||
dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
|
||||
queue->dma_desc_base);
|
||||
return ret;
|
||||
}
|
||||
@ -948,8 +942,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev)
|
||||
|
||||
static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
|
||||
{
|
||||
u32 ctxt[QDMA_CTXT_REGMAP_LEN];
|
||||
struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||
struct device *dev = &qdev->pdev->dev;
|
||||
u32 ctxt[QDMA_CTXT_REGMAP_LEN];
|
||||
struct qdma_intr_ring *ring;
|
||||
struct qdma_ctxt_intr intr_ctxt;
|
||||
u32 vector;
|
||||
@ -969,7 +964,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
|
||||
ring->msix_id = qdev->err_irq_idx + i + 1;
|
||||
ring->ridx = i;
|
||||
ring->color = 1;
|
||||
ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
|
||||
ring->base = dmam_alloc_coherent(pdata->dma_dev,
|
||||
QDMA_INTR_RING_SIZE,
|
||||
&ring->dev_base, GFP_KERNEL);
|
||||
if (!ring->base) {
|
||||
qdma_err(qdev, "Failed to alloc intr ring %d", i);
|
||||
|
@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad,
|
||||
{
|
||||
struct admac_sram *sram;
|
||||
int i, ret = 0, nblocks;
|
||||
ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
|
||||
ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
sram = &ad->txcache;
|
||||
@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev)
|
||||
goto free_irq;
|
||||
}
|
||||
|
||||
ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
|
||||
ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
|
||||
|
||||
dev_info(&pdev->dev, "Audio DMA Controller\n");
|
||||
dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
|
||||
readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
||||
return NULL;
|
||||
|
||||
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
list_add_tail(&desc->desc_node, &desc->descs_list);
|
||||
|
||||
desc->tx_dma_desc.cookie = -EBUSY;
|
||||
|
@ -8,13 +8,15 @@
|
||||
|
||||
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
|
||||
{
|
||||
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||
struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev);
|
||||
struct acpi_dma_spec *dma_spec = param;
|
||||
struct dw_dma_slave slave = {
|
||||
.dma_dev = dma_spec->dev,
|
||||
.src_id = dma_spec->slave_id,
|
||||
.dst_id = dma_spec->slave_id,
|
||||
.m_master = 0,
|
||||
.p_master = 1,
|
||||
.m_master = data->m_master,
|
||||
.p_master = data->p_master,
|
||||
};
|
||||
|
||||
return dw_dma_filter(chan, &slave);
|
||||
|
@ -51,11 +51,15 @@ struct dw_dma_chip_pdata {
|
||||
int (*probe)(struct dw_dma_chip *chip);
|
||||
int (*remove)(struct dw_dma_chip *chip);
|
||||
struct dw_dma_chip *chip;
|
||||
u8 m_master;
|
||||
u8 p_master;
|
||||
};
|
||||
|
||||
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
|
||||
.probe = dw_dma_probe,
|
||||
.remove = dw_dma_remove,
|
||||
.m_master = 0,
|
||||
.p_master = 1,
|
||||
};
|
||||
|
||||
static const struct dw_dma_platform_data idma32_pdata = {
|
||||
@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
|
||||
.pdata = &idma32_pdata,
|
||||
.probe = idma32_dma_probe,
|
||||
.remove = idma32_dma_remove,
|
||||
.m_master = 0,
|
||||
.p_master = 0,
|
||||
};
|
||||
|
||||
static const struct dw_dma_platform_data xbar_pdata = {
|
||||
@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
|
||||
.pdata = &xbar_pdata,
|
||||
.probe = idma32_dma_probe,
|
||||
.remove = idma32_dma_remove,
|
||||
.m_master = 0,
|
||||
.p_master = 0,
|
||||
};
|
||||
|
||||
#endif /* _DMA_DW_INTERNAL_H */
|
||||
|
@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dw_dma_acpi_controller_register(chip->dw);
|
||||
|
||||
pci_set_drvdata(pdev, data);
|
||||
|
||||
dw_dma_acpi_controller_register(chip->dw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -166,6 +166,7 @@ struct fsl_edma_chan {
|
||||
struct work_struct issue_worker;
|
||||
struct platform_device *pdev;
|
||||
struct device *pd_dev;
|
||||
struct device_link *pd_dev_link;
|
||||
u32 srcid;
|
||||
struct clk *clk;
|
||||
int priority;
|
||||
|
@ -417,10 +417,33 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
|
||||
|
||||
static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
|
||||
{
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||
if (fsl_edma->chan_masked & BIT(i))
|
||||
continue;
|
||||
fsl_chan = &fsl_edma->chans[i];
|
||||
if (fsl_chan->pd_dev_link)
|
||||
device_link_del(fsl_chan->pd_dev_link);
|
||||
if (fsl_chan->pd_dev) {
|
||||
dev_pm_domain_detach(fsl_chan->pd_dev, false);
|
||||
pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
|
||||
pm_runtime_set_suspended(fsl_chan->pd_dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void devm_fsl_edma3_detach_pd(void *data)
|
||||
{
|
||||
fsl_edma3_detach_pd(data);
|
||||
}
|
||||
|
||||
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
|
||||
{
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
struct device_link *link;
|
||||
struct device *pd_chan;
|
||||
struct device *dev;
|
||||
int i;
|
||||
@ -436,15 +459,16 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
|
||||
pd_chan = dev_pm_domain_attach_by_id(dev, i);
|
||||
if (IS_ERR_OR_NULL(pd_chan)) {
|
||||
dev_err(dev, "Failed attach pd %d\n", i);
|
||||
return -EINVAL;
|
||||
goto detach;
|
||||
}
|
||||
|
||||
link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
|
||||
fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
|
||||
DL_FLAG_PM_RUNTIME |
|
||||
DL_FLAG_RPM_ACTIVE);
|
||||
if (!link) {
|
||||
if (!fsl_chan->pd_dev_link) {
|
||||
dev_err(dev, "Failed to add device_link to %d\n", i);
|
||||
return -EINVAL;
|
||||
dev_pm_domain_detach(pd_chan, false);
|
||||
goto detach;
|
||||
}
|
||||
|
||||
fsl_chan->pd_dev = pd_chan;
|
||||
@ -455,6 +479,10 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
detach:
|
||||
fsl_edma3_detach_pd(fsl_edma);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int fsl_edma_probe(struct platform_device *pdev)
|
||||
@ -544,6 +572,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define LDMA_ASK_VALID BIT(2)
|
||||
#define LDMA_START BIT(3) /* DMA start operation */
|
||||
#define LDMA_STOP BIT(4) /* DMA stop operation */
|
||||
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
|
||||
#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
|
||||
|
||||
/* Bitfields in ndesc_addr field of HW descriptor */
|
||||
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
|
||||
|
@ -1388,6 +1388,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (!irq) {
|
||||
ret = -ENODEV;
|
||||
of_node_put(np);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
@ -1396,6 +1397,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(chan)) {
|
||||
ret = PTR_ERR(chan);
|
||||
irq_dispose_mapping(irq);
|
||||
of_node_put(np);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
|
@ -231,6 +231,7 @@ struct tegra_dma_channel {
|
||||
bool config_init;
|
||||
char name[30];
|
||||
enum dma_transfer_direction sid_dir;
|
||||
enum dma_status status;
|
||||
int id;
|
||||
int irq;
|
||||
int slave_id;
|
||||
@ -393,6 +394,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc)
|
||||
tegra_dma_dump_chan_regs(tdc);
|
||||
}
|
||||
|
||||
tdc->status = DMA_PAUSED;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -419,6 +422,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
|
||||
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
|
||||
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
|
||||
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
|
||||
|
||||
tdc->status = DMA_IN_PROGRESS;
|
||||
}
|
||||
|
||||
static int tegra_dma_device_resume(struct dma_chan *dc)
|
||||
@ -544,6 +549,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
|
||||
|
||||
tegra_dma_sid_free(tdc);
|
||||
tdc->dma_desc = NULL;
|
||||
tdc->status = DMA_COMPLETE;
|
||||
}
|
||||
|
||||
static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
|
||||
@ -716,6 +722,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
|
||||
tdc->dma_desc = NULL;
|
||||
}
|
||||
|
||||
tdc->status = DMA_COMPLETE;
|
||||
tegra_dma_sid_free(tdc);
|
||||
vchan_get_all_descriptors(&tdc->vc, &head);
|
||||
spin_unlock_irqrestore(&tdc->vc.lock, flags);
|
||||
@ -769,6 +776,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
|
||||
if (ret == DMA_COMPLETE)
|
||||
return ret;
|
||||
|
||||
if (tdc->status == DMA_PAUSED)
|
||||
ret = DMA_PAUSED;
|
||||
|
||||
spin_lock_irqsave(&tdc->vc.lock, flags);
|
||||
vd = vchan_find_desc(&tdc->vc, cookie);
|
||||
if (vd) {
|
||||
|
@ -84,7 +84,7 @@ enum dma_transfer_direction {
|
||||
DMA_TRANS_NONE,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* Interleaved Transfer Request
|
||||
* ----------------------------
|
||||
* A chunk is collection of contiguous bytes to be transferred.
|
||||
@ -223,7 +223,7 @@ enum sum_check_bits {
|
||||
};
|
||||
|
||||
/**
|
||||
* enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
|
||||
* enum sum_check_flags - result of async_{xor,pq}_zero_sum operations
|
||||
* @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
|
||||
* @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
|
||||
*/
|
||||
@ -286,7 +286,7 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
|
||||
* pointer to the engine's metadata area
|
||||
* 4. Read out the metadata from the pointer
|
||||
*
|
||||
* Note: the two mode is not compatible and clients must use one mode for a
|
||||
* Warning: the two modes are not compatible and clients must use one mode for a
|
||||
* descriptor.
|
||||
*/
|
||||
enum dma_desc_metadata_mode {
|
||||
@ -594,9 +594,13 @@ struct dma_descriptor_metadata_ops {
|
||||
* @phys: physical address of the descriptor
|
||||
* @chan: target channel for this operation
|
||||
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
|
||||
* @desc_free: driver's callback function to free a resusable descriptor
|
||||
* after completion
|
||||
* descriptor pending. To be pushed on .issue_pending() call
|
||||
* @callback: routine to call after this operation is complete
|
||||
* @callback_result: error result from a DMA transaction
|
||||
* @callback_param: general parameter to pass to the callback routine
|
||||
* @unmap: hook for generic DMA unmap data
|
||||
* @desc_metadata_mode: core managed metadata mode to protect mixed use of
|
||||
* DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
|
||||
* DESC_METADATA_NONE
|
||||
@ -827,6 +831,9 @@ struct dma_filter {
|
||||
* @device_prep_dma_memset: prepares a memset operation
|
||||
* @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
|
||||
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
|
||||
* @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer,
|
||||
* where the address and size of each segment is located in one entry of
|
||||
* the dma_vec array.
|
||||
* @device_prep_slave_sg: prepares a slave dma operation
|
||||
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
|
||||
* The function takes a buffer of size buf_len. The callback function will
|
||||
|
@ -26,11 +26,13 @@ struct dma_slave_map;
|
||||
* @max_mm_channels: Maximum number of MM DMA channels in each direction
|
||||
* @device_map: DMA slave map
|
||||
* @irq_index: The index of first IRQ
|
||||
* @dma_dev: The device pointer for dma operations
|
||||
*/
|
||||
struct qdma_platdata {
|
||||
u32 max_mm_channels;
|
||||
u32 irq_index;
|
||||
struct dma_slave_map *device_map;
|
||||
struct device *dma_dev;
|
||||
};
|
||||
|
||||
#endif /* _PLATDATA_AMD_QDMA_H */
|
||||
|
Loading…
Reference in New Issue
Block a user