mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 21:23:23 +00:00
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
This commit is contained in:
commit
ff914bc51a
@ -22,7 +22,9 @@ properties:
|
|||||||
number.
|
number.
|
||||||
|
|
||||||
compatible:
|
compatible:
|
||||||
const: allwinner,sun4i-a10-dma
|
enum:
|
||||||
|
- allwinner,sun4i-a10-dma
|
||||||
|
- allwinner,suniv-f1c100s-dma
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
@ -25,7 +25,9 @@ properties:
|
|||||||
- items:
|
- items:
|
||||||
- enum:
|
- enum:
|
||||||
- qcom,qcm2290-gpi-dma
|
- qcom,qcm2290-gpi-dma
|
||||||
|
- qcom,qcs8300-gpi-dma
|
||||||
- qcom,qdu1000-gpi-dma
|
- qcom,qdu1000-gpi-dma
|
||||||
|
- qcom,sa8775p-gpi-dma
|
||||||
- qcom,sar2130p-gpi-dma
|
- qcom,sar2130p-gpi-dma
|
||||||
- qcom,sc7280-gpi-dma
|
- qcom,sc7280-gpi-dma
|
||||||
- qcom,sdx75-gpi-dma
|
- qcom,sdx75-gpi-dma
|
||||||
@ -35,10 +37,12 @@ properties:
|
|||||||
- qcom,sm8450-gpi-dma
|
- qcom,sm8450-gpi-dma
|
||||||
- qcom,sm8550-gpi-dma
|
- qcom,sm8550-gpi-dma
|
||||||
- qcom,sm8650-gpi-dma
|
- qcom,sm8650-gpi-dma
|
||||||
|
- qcom,sm8750-gpi-dma
|
||||||
- qcom,x1e80100-gpi-dma
|
- qcom,x1e80100-gpi-dma
|
||||||
- const: qcom,sm6350-gpi-dma
|
- const: qcom,sm6350-gpi-dma
|
||||||
- items:
|
- items:
|
||||||
- enum:
|
- enum:
|
||||||
|
- qcom,qcs615-gpi-dma
|
||||||
- qcom,sdm670-gpi-dma
|
- qcom,sdm670-gpi-dma
|
||||||
- qcom,sm6125-gpi-dma
|
- qcom,sm6125-gpi-dma
|
||||||
- qcom,sm8150-gpi-dma
|
- qcom,sm8150-gpi-dma
|
||||||
|
10
MAINTAINERS
10
MAINTAINERS
@ -988,6 +988,12 @@ L: linux-edac@vger.kernel.org
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/ras/amd/atl/*
|
F: drivers/ras/amd/atl/*
|
||||||
|
|
||||||
|
AMD AE4DMA DRIVER
|
||||||
|
M: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
|
||||||
|
L: dmaengine@vger.kernel.org
|
||||||
|
S: Supported
|
||||||
|
F: drivers/dma/amd/ae4dma/
|
||||||
|
|
||||||
AMD AXI W1 DRIVER
|
AMD AXI W1 DRIVER
|
||||||
M: Kris Chaplin <kris.chaplin@amd.com>
|
M: Kris Chaplin <kris.chaplin@amd.com>
|
||||||
R: Thomas Delev <thomas.delev@amd.com>
|
R: Thomas Delev <thomas.delev@amd.com>
|
||||||
@ -1172,8 +1178,8 @@ F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
|
|||||||
AMD PTDMA DRIVER
|
AMD PTDMA DRIVER
|
||||||
M: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
|
M: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
|
||||||
L: dmaengine@vger.kernel.org
|
L: dmaengine@vger.kernel.org
|
||||||
S: Maintained
|
S: Supported
|
||||||
F: drivers/dma/ptdma/
|
F: drivers/dma/amd/ptdma/
|
||||||
|
|
||||||
AMD QDMA DRIVER
|
AMD QDMA DRIVER
|
||||||
M: Nishad Saraf <nishads@amd.com>
|
M: Nishad Saraf <nishads@amd.com>
|
||||||
|
@ -162,8 +162,8 @@ config DMA_SA11X0
|
|||||||
|
|
||||||
config DMA_SUN4I
|
config DMA_SUN4I
|
||||||
tristate "Allwinner A10 DMA SoCs support"
|
tristate "Allwinner A10 DMA SoCs support"
|
||||||
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
|
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV
|
||||||
default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
|
default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV)
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
select DMA_VIRTUAL_CHANNELS
|
select DMA_VIRTUAL_CHANNELS
|
||||||
help
|
help
|
||||||
@ -740,8 +740,6 @@ source "drivers/dma/bestcomm/Kconfig"
|
|||||||
|
|
||||||
source "drivers/dma/mediatek/Kconfig"
|
source "drivers/dma/mediatek/Kconfig"
|
||||||
|
|
||||||
source "drivers/dma/ptdma/Kconfig"
|
|
||||||
|
|
||||||
source "drivers/dma/qcom/Kconfig"
|
source "drivers/dma/qcom/Kconfig"
|
||||||
|
|
||||||
source "drivers/dma/dw/Kconfig"
|
source "drivers/dma/dw/Kconfig"
|
||||||
|
@ -16,7 +16,6 @@ obj-$(CONFIG_DMATEST) += dmatest.o
|
|||||||
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
|
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
|
||||||
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
|
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
|
||||||
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
|
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
|
||||||
obj-$(CONFIG_AMD_PTDMA) += ptdma/
|
|
||||||
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
|
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
|
||||||
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
|
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
|
||||||
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
|
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
|
||||||
|
@ -1,4 +1,32 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0-only
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
#
|
||||||
|
|
||||||
|
config AMD_AE4DMA
|
||||||
|
tristate "AMD AE4DMA Engine"
|
||||||
|
depends on (X86_64 || COMPILE_TEST) && PCI
|
||||||
|
depends on AMD_PTDMA
|
||||||
|
select DMA_ENGINE
|
||||||
|
select DMA_VIRTUAL_CHANNELS
|
||||||
|
help
|
||||||
|
Enable support for the AMD AE4DMA controller. This controller
|
||||||
|
provides DMA capabilities to perform high bandwidth memory to
|
||||||
|
memory and IO copy operations. It performs DMA transfer through
|
||||||
|
queue-based descriptor management. This DMA controller is intended
|
||||||
|
to be used with AMD Non-Transparent Bridge devices and not for
|
||||||
|
general purpose peripheral DMA.
|
||||||
|
|
||||||
|
config AMD_PTDMA
|
||||||
|
tristate "AMD PassThru DMA Engine"
|
||||||
|
depends on X86_64 && PCI
|
||||||
|
select DMA_ENGINE
|
||||||
|
select DMA_VIRTUAL_CHANNELS
|
||||||
|
help
|
||||||
|
Enable support for the AMD PTDMA controller. This controller
|
||||||
|
provides DMA capabilities to perform high bandwidth memory to
|
||||||
|
memory and IO copy operations. It performs DMA transfer through
|
||||||
|
queue-based descriptor management. This DMA controller is intended
|
||||||
|
to be used with AMD Non-Transparent Bridge devices and not for
|
||||||
|
general purpose peripheral DMA.
|
||||||
|
|
||||||
config AMD_QDMA
|
config AMD_QDMA
|
||||||
tristate "AMD Queue-based DMA"
|
tristate "AMD Queue-based DMA"
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
obj-$(CONFIG_AMD_AE4DMA) += ae4dma/
|
||||||
|
obj-$(CONFIG_AMD_PTDMA) += ptdma/
|
||||||
obj-$(CONFIG_AMD_QDMA) += qdma/
|
obj-$(CONFIG_AMD_QDMA) += qdma/
|
||||||
|
10
drivers/dma/amd/ae4dma/Makefile
Normal file
10
drivers/dma/amd/ae4dma/Makefile
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
#
|
||||||
|
# AMD AE4DMA driver
|
||||||
|
#
|
||||||
|
|
||||||
|
obj-$(CONFIG_AMD_AE4DMA) += ae4dma.o
|
||||||
|
|
||||||
|
ae4dma-objs := ae4dma-dev.o
|
||||||
|
|
||||||
|
ae4dma-$(CONFIG_PCI) += ae4dma-pci.o
|
157
drivers/dma/amd/ae4dma/ae4dma-dev.c
Normal file
157
drivers/dma/amd/ae4dma/ae4dma-dev.c
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* AMD AE4DMA driver
|
||||||
|
*
|
||||||
|
* Copyright (c) 2024, Advanced Micro Devices, Inc.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "ae4dma.h"
|
||||||
|
|
||||||
|
static unsigned int max_hw_q = 1;
|
||||||
|
module_param(max_hw_q, uint, 0444);
|
||||||
|
MODULE_PARM_DESC(max_hw_q, "max hw queues supported by engine (any non-zero value, default: 1)");
|
||||||
|
|
||||||
|
static void ae4_pending_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q = container_of(work, struct ae4_cmd_queue, p_work.work);
|
||||||
|
struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
|
||||||
|
struct pt_cmd *cmd;
|
||||||
|
u32 cridx;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
wait_event_interruptible(ae4cmd_q->q_w,
|
||||||
|
((atomic64_read(&ae4cmd_q->done_cnt)) <
|
||||||
|
atomic64_read(&ae4cmd_q->intr_cnt)));
|
||||||
|
|
||||||
|
atomic64_inc(&ae4cmd_q->done_cnt);
|
||||||
|
|
||||||
|
mutex_lock(&ae4cmd_q->cmd_lock);
|
||||||
|
cridx = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
|
||||||
|
while ((ae4cmd_q->dridx != cridx) && !list_empty(&ae4cmd_q->cmd)) {
|
||||||
|
cmd = list_first_entry(&ae4cmd_q->cmd, struct pt_cmd, entry);
|
||||||
|
list_del(&cmd->entry);
|
||||||
|
|
||||||
|
ae4_check_status_error(ae4cmd_q, ae4cmd_q->dridx);
|
||||||
|
cmd->pt_cmd_callback(cmd->data, cmd->ret);
|
||||||
|
|
||||||
|
ae4cmd_q->q_cmd_count--;
|
||||||
|
ae4cmd_q->dridx = (ae4cmd_q->dridx + 1) % CMD_Q_LEN;
|
||||||
|
|
||||||
|
complete_all(&ae4cmd_q->cmp);
|
||||||
|
}
|
||||||
|
mutex_unlock(&ae4cmd_q->cmd_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static irqreturn_t ae4_core_irq_handler(int irq, void *data)
|
||||||
|
{
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q = data;
|
||||||
|
struct pt_cmd_queue *cmd_q;
|
||||||
|
struct pt_device *pt;
|
||||||
|
u32 status;
|
||||||
|
|
||||||
|
cmd_q = &ae4cmd_q->cmd_q;
|
||||||
|
pt = cmd_q->pt;
|
||||||
|
|
||||||
|
pt->total_interrupts++;
|
||||||
|
atomic64_inc(&ae4cmd_q->intr_cnt);
|
||||||
|
|
||||||
|
status = readl(cmd_q->reg_control + AE4_INTR_STS_OFF);
|
||||||
|
if (status & BIT(0)) {
|
||||||
|
status &= GENMASK(31, 1);
|
||||||
|
writel(status, cmd_q->reg_control + AE4_INTR_STS_OFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
wake_up(&ae4cmd_q->q_w);
|
||||||
|
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ae4_destroy_work(struct ae4_device *ae4)
|
||||||
|
{
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ae4->cmd_q_count; i++) {
|
||||||
|
ae4cmd_q = &ae4->ae4cmd_q[i];
|
||||||
|
|
||||||
|
if (!ae4cmd_q->pws)
|
||||||
|
break;
|
||||||
|
|
||||||
|
cancel_delayed_work_sync(&ae4cmd_q->p_work);
|
||||||
|
destroy_workqueue(ae4cmd_q->pws);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int ae4_core_init(struct ae4_device *ae4)
|
||||||
|
{
|
||||||
|
struct pt_device *pt = &ae4->pt;
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q;
|
||||||
|
struct device *dev = pt->dev;
|
||||||
|
struct pt_cmd_queue *cmd_q;
|
||||||
|
int i, ret = 0;
|
||||||
|
|
||||||
|
writel(max_hw_q, pt->io_regs);
|
||||||
|
|
||||||
|
for (i = 0; i < max_hw_q; i++) {
|
||||||
|
ae4cmd_q = &ae4->ae4cmd_q[i];
|
||||||
|
ae4cmd_q->id = ae4->cmd_q_count;
|
||||||
|
ae4->cmd_q_count++;
|
||||||
|
|
||||||
|
cmd_q = &ae4cmd_q->cmd_q;
|
||||||
|
cmd_q->pt = pt;
|
||||||
|
|
||||||
|
cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
|
||||||
|
|
||||||
|
ret = devm_request_irq(dev, ae4->ae4_irq[i], ae4_core_irq_handler, 0,
|
||||||
|
dev_name(pt->dev), ae4cmd_q);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
cmd_q->qsize = Q_SIZE(sizeof(struct ae4dma_desc));
|
||||||
|
|
||||||
|
cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!cmd_q->qbase)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < ae4->cmd_q_count; i++) {
|
||||||
|
ae4cmd_q = &ae4->ae4cmd_q[i];
|
||||||
|
|
||||||
|
cmd_q = &ae4cmd_q->cmd_q;
|
||||||
|
|
||||||
|
cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
|
||||||
|
|
||||||
|
/* Update the device registers with queue information. */
|
||||||
|
writel(CMD_Q_LEN, cmd_q->reg_control + AE4_MAX_IDX_OFF);
|
||||||
|
|
||||||
|
cmd_q->qdma_tail = cmd_q->qbase_dma;
|
||||||
|
writel(lower_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_L_OFF);
|
||||||
|
writel(upper_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_H_OFF);
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&ae4cmd_q->cmd);
|
||||||
|
init_waitqueue_head(&ae4cmd_q->q_w);
|
||||||
|
|
||||||
|
ae4cmd_q->pws = alloc_ordered_workqueue("ae4dma_%d", WQ_MEM_RECLAIM, ae4cmd_q->id);
|
||||||
|
if (!ae4cmd_q->pws) {
|
||||||
|
ae4_destroy_work(ae4);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
INIT_DELAYED_WORK(&ae4cmd_q->p_work, ae4_pending_work);
|
||||||
|
queue_delayed_work(ae4cmd_q->pws, &ae4cmd_q->p_work, usecs_to_jiffies(100));
|
||||||
|
|
||||||
|
init_completion(&ae4cmd_q->cmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = pt_dmaengine_register(pt);
|
||||||
|
if (ret)
|
||||||
|
ae4_destroy_work(ae4);
|
||||||
|
else
|
||||||
|
ptdma_debugfs_setup(pt);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
158
drivers/dma/amd/ae4dma/ae4dma-pci.c
Normal file
158
drivers/dma/amd/ae4dma/ae4dma-pci.c
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* AMD AE4DMA driver
|
||||||
|
*
|
||||||
|
* Copyright (c) 2024, Advanced Micro Devices, Inc.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "ae4dma.h"
|
||||||
|
|
||||||
|
static int ae4_get_irqs(struct ae4_device *ae4)
|
||||||
|
{
|
||||||
|
struct ae4_msix *ae4_msix = ae4->ae4_msix;
|
||||||
|
struct pt_device *pt = &ae4->pt;
|
||||||
|
struct device *dev = pt->dev;
|
||||||
|
struct pci_dev *pdev;
|
||||||
|
int i, v, ret;
|
||||||
|
|
||||||
|
pdev = to_pci_dev(dev);
|
||||||
|
|
||||||
|
for (v = 0; v < ARRAY_SIZE(ae4_msix->msix_entry); v++)
|
||||||
|
ae4_msix->msix_entry[v].entry = v;
|
||||||
|
|
||||||
|
ret = pci_alloc_irq_vectors(pdev, v, v, PCI_IRQ_MSIX);
|
||||||
|
if (ret != v) {
|
||||||
|
if (ret > 0)
|
||||||
|
pci_free_irq_vectors(pdev);
|
||||||
|
|
||||||
|
dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
|
||||||
|
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_err(dev, "could not enable MSI (%d)\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = pci_irq_vector(pdev, 0);
|
||||||
|
if (ret < 0) {
|
||||||
|
pci_free_irq_vectors(pdev);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
|
||||||
|
ae4->ae4_irq[i] = ret;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
ae4_msix->msix_count = ret;
|
||||||
|
for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
|
||||||
|
ae4->ae4_irq[i] = ae4_msix->msix_entry[i].vector;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ae4_free_irqs(struct ae4_device *ae4)
|
||||||
|
{
|
||||||
|
struct ae4_msix *ae4_msix = ae4->ae4_msix;
|
||||||
|
struct pt_device *pt = &ae4->pt;
|
||||||
|
struct device *dev = pt->dev;
|
||||||
|
struct pci_dev *pdev;
|
||||||
|
|
||||||
|
pdev = to_pci_dev(dev);
|
||||||
|
|
||||||
|
if (ae4_msix && (ae4_msix->msix_count || ae4->ae4_irq[MAX_AE4_HW_QUEUES - 1]))
|
||||||
|
pci_free_irq_vectors(pdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ae4_deinit(struct ae4_device *ae4)
|
||||||
|
{
|
||||||
|
ae4_free_irqs(ae4);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ae4_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct ae4_device *ae4;
|
||||||
|
struct pt_device *pt;
|
||||||
|
int bar_mask;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
ae4 = devm_kzalloc(dev, sizeof(*ae4), GFP_KERNEL);
|
||||||
|
if (!ae4)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ae4->ae4_msix = devm_kzalloc(dev, sizeof(struct ae4_msix), GFP_KERNEL);
|
||||||
|
if (!ae4->ae4_msix)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = pcim_enable_device(pdev);
|
||||||
|
if (ret)
|
||||||
|
goto ae4_error;
|
||||||
|
|
||||||
|
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||||
|
ret = pcim_iomap_regions(pdev, bar_mask, "ae4dma");
|
||||||
|
if (ret)
|
||||||
|
goto ae4_error;
|
||||||
|
|
||||||
|
pt = &ae4->pt;
|
||||||
|
pt->dev = dev;
|
||||||
|
pt->ver = AE4_DMA_VERSION;
|
||||||
|
|
||||||
|
pt->io_regs = pcim_iomap_table(pdev)[0];
|
||||||
|
if (!pt->io_regs) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto ae4_error;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = ae4_get_irqs(ae4);
|
||||||
|
if (ret < 0)
|
||||||
|
goto ae4_error;
|
||||||
|
|
||||||
|
pci_set_master(pdev);
|
||||||
|
|
||||||
|
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
|
||||||
|
|
||||||
|
dev_set_drvdata(dev, ae4);
|
||||||
|
|
||||||
|
ret = ae4_core_init(ae4);
|
||||||
|
if (ret)
|
||||||
|
goto ae4_error;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ae4_error:
|
||||||
|
ae4_deinit(ae4);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ae4_pci_remove(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct ae4_device *ae4 = dev_get_drvdata(&pdev->dev);
|
||||||
|
|
||||||
|
ae4_destroy_work(ae4);
|
||||||
|
ae4_deinit(ae4);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct pci_device_id ae4_pci_table[] = {
|
||||||
|
{ PCI_VDEVICE(AMD, 0x14C8), },
|
||||||
|
{ PCI_VDEVICE(AMD, 0x14DC), },
|
||||||
|
{ PCI_VDEVICE(AMD, 0x149B), },
|
||||||
|
/* Last entry must be zero */
|
||||||
|
{ 0, }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(pci, ae4_pci_table);
|
||||||
|
|
||||||
|
static struct pci_driver ae4_pci_driver = {
|
||||||
|
.name = "ae4dma",
|
||||||
|
.id_table = ae4_pci_table,
|
||||||
|
.probe = ae4_pci_probe,
|
||||||
|
.remove = ae4_pci_remove,
|
||||||
|
};
|
||||||
|
|
||||||
|
module_pci_driver(ae4_pci_driver);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_DESCRIPTION("AMD AE4DMA driver");
|
100
drivers/dma/amd/ae4dma/ae4dma.h
Normal file
100
drivers/dma/amd/ae4dma/ae4dma.h
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
/*
|
||||||
|
* AMD AE4DMA driver
|
||||||
|
*
|
||||||
|
* Copyright (c) 2024, Advanced Micro Devices, Inc.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
|
||||||
|
*/
|
||||||
|
#ifndef __AE4DMA_H__
|
||||||
|
#define __AE4DMA_H__
|
||||||
|
|
||||||
|
#include <linux/device.h>
|
||||||
|
#include <linux/dmaengine.h>
|
||||||
|
#include <linux/dmapool.h>
|
||||||
|
#include <linux/list.h>
|
||||||
|
#include <linux/mutex.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/wait.h>
|
||||||
|
|
||||||
|
#include "../ptdma/ptdma.h"
|
||||||
|
#include "../../virt-dma.h"
|
||||||
|
|
||||||
|
#define MAX_AE4_HW_QUEUES 16
|
||||||
|
|
||||||
|
#define AE4_DESC_COMPLETED 0x03
|
||||||
|
|
||||||
|
#define AE4_MAX_IDX_OFF 0x08
|
||||||
|
#define AE4_RD_IDX_OFF 0x0c
|
||||||
|
#define AE4_WR_IDX_OFF 0x10
|
||||||
|
#define AE4_INTR_STS_OFF 0x14
|
||||||
|
#define AE4_Q_BASE_L_OFF 0x18
|
||||||
|
#define AE4_Q_BASE_H_OFF 0x1c
|
||||||
|
#define AE4_Q_SZ 0x20
|
||||||
|
|
||||||
|
#define AE4_DMA_VERSION 4
|
||||||
|
#define CMD_AE4_DESC_DW0_VAL 2
|
||||||
|
|
||||||
|
struct ae4_msix {
|
||||||
|
int msix_count;
|
||||||
|
struct msix_entry msix_entry[MAX_AE4_HW_QUEUES];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ae4_cmd_queue {
|
||||||
|
struct ae4_device *ae4;
|
||||||
|
struct pt_cmd_queue cmd_q;
|
||||||
|
struct list_head cmd;
|
||||||
|
/* protect command operations */
|
||||||
|
struct mutex cmd_lock;
|
||||||
|
struct delayed_work p_work;
|
||||||
|
struct workqueue_struct *pws;
|
||||||
|
struct completion cmp;
|
||||||
|
wait_queue_head_t q_w;
|
||||||
|
atomic64_t intr_cnt;
|
||||||
|
atomic64_t done_cnt;
|
||||||
|
u64 q_cmd_count;
|
||||||
|
u32 dridx;
|
||||||
|
u32 tail_wi;
|
||||||
|
u32 id;
|
||||||
|
};
|
||||||
|
|
||||||
|
union dwou {
|
||||||
|
u32 dw0;
|
||||||
|
struct dword0 {
|
||||||
|
u8 byte0;
|
||||||
|
u8 byte1;
|
||||||
|
u16 timestamp;
|
||||||
|
} dws;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct dword1 {
|
||||||
|
u8 status;
|
||||||
|
u8 err_code;
|
||||||
|
u16 desc_id;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ae4dma_desc {
|
||||||
|
union dwou dwouv;
|
||||||
|
struct dword1 dw1;
|
||||||
|
u32 length;
|
||||||
|
u32 rsvd;
|
||||||
|
u32 src_hi;
|
||||||
|
u32 src_lo;
|
||||||
|
u32 dst_hi;
|
||||||
|
u32 dst_lo;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ae4_device {
|
||||||
|
struct pt_device pt;
|
||||||
|
struct ae4_msix *ae4_msix;
|
||||||
|
struct ae4_cmd_queue ae4cmd_q[MAX_AE4_HW_QUEUES];
|
||||||
|
unsigned int ae4_irq[MAX_AE4_HW_QUEUES];
|
||||||
|
unsigned int cmd_q_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
int ae4_core_init(struct ae4_device *ae4);
|
||||||
|
void ae4_destroy_work(struct ae4_device *ae4);
|
||||||
|
void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx);
|
||||||
|
#endif
|
@ -13,6 +13,7 @@
|
|||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
|
|
||||||
#include "ptdma.h"
|
#include "ptdma.h"
|
||||||
|
#include "../ae4dma/ae4dma.h"
|
||||||
|
|
||||||
/* DebugFS helpers */
|
/* DebugFS helpers */
|
||||||
#define RI_VERSION_NUM 0x0000003F
|
#define RI_VERSION_NUM 0x0000003F
|
||||||
@ -23,11 +24,19 @@
|
|||||||
static int pt_debugfs_info_show(struct seq_file *s, void *p)
|
static int pt_debugfs_info_show(struct seq_file *s, void *p)
|
||||||
{
|
{
|
||||||
struct pt_device *pt = s->private;
|
struct pt_device *pt = s->private;
|
||||||
|
struct ae4_device *ae4;
|
||||||
unsigned int regval;
|
unsigned int regval;
|
||||||
|
|
||||||
seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
|
seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
|
||||||
seq_printf(s, " # Queues: %d\n", 1);
|
|
||||||
seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
|
if (pt->ver == AE4_DMA_VERSION) {
|
||||||
|
ae4 = container_of(pt, struct ae4_device, pt);
|
||||||
|
seq_printf(s, " # Queues: %d\n", ae4->cmd_q_count);
|
||||||
|
seq_printf(s, " # Cmds per queue: %d\n", CMD_Q_LEN);
|
||||||
|
} else {
|
||||||
|
seq_printf(s, " # Queues: %d\n", 1);
|
||||||
|
seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
|
||||||
|
}
|
||||||
|
|
||||||
regval = ioread32(pt->io_regs + CMD_PT_VERSION);
|
regval = ioread32(pt->io_regs + CMD_PT_VERSION);
|
||||||
|
|
||||||
@ -55,6 +64,7 @@ static int pt_debugfs_stats_show(struct seq_file *s, void *p)
|
|||||||
static int pt_debugfs_queue_show(struct seq_file *s, void *p)
|
static int pt_debugfs_queue_show(struct seq_file *s, void *p)
|
||||||
{
|
{
|
||||||
struct pt_cmd_queue *cmd_q = s->private;
|
struct pt_cmd_queue *cmd_q = s->private;
|
||||||
|
struct pt_device *pt;
|
||||||
unsigned int regval;
|
unsigned int regval;
|
||||||
|
|
||||||
if (!cmd_q)
|
if (!cmd_q)
|
||||||
@ -62,18 +72,24 @@ static int pt_debugfs_queue_show(struct seq_file *s, void *p)
|
|||||||
|
|
||||||
seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
|
seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
|
||||||
|
|
||||||
regval = ioread32(cmd_q->reg_control + 0x000C);
|
pt = cmd_q->pt;
|
||||||
|
if (pt->ver == AE4_DMA_VERSION) {
|
||||||
|
regval = readl(cmd_q->reg_control + 0x4);
|
||||||
|
seq_printf(s, " Enabled Interrupts:: status 0x%x\n", regval);
|
||||||
|
} else {
|
||||||
|
regval = ioread32(cmd_q->reg_control + 0x000C);
|
||||||
|
|
||||||
seq_puts(s, " Enabled Interrupts:");
|
seq_puts(s, " Enabled Interrupts:");
|
||||||
if (regval & INT_EMPTY_QUEUE)
|
if (regval & INT_EMPTY_QUEUE)
|
||||||
seq_puts(s, " EMPTY");
|
seq_puts(s, " EMPTY");
|
||||||
if (regval & INT_QUEUE_STOPPED)
|
if (regval & INT_QUEUE_STOPPED)
|
||||||
seq_puts(s, " STOPPED");
|
seq_puts(s, " STOPPED");
|
||||||
if (regval & INT_ERROR)
|
if (regval & INT_ERROR)
|
||||||
seq_puts(s, " ERROR");
|
seq_puts(s, " ERROR");
|
||||||
if (regval & INT_COMPLETION)
|
if (regval & INT_COMPLETION)
|
||||||
seq_puts(s, " COMPLETION");
|
seq_puts(s, " COMPLETION");
|
||||||
seq_puts(s, "\n");
|
seq_puts(s, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -84,8 +100,12 @@ DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
|
|||||||
|
|
||||||
void ptdma_debugfs_setup(struct pt_device *pt)
|
void ptdma_debugfs_setup(struct pt_device *pt)
|
||||||
{
|
{
|
||||||
struct pt_cmd_queue *cmd_q;
|
|
||||||
struct dentry *debugfs_q_instance;
|
struct dentry *debugfs_q_instance;
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q;
|
||||||
|
struct pt_cmd_queue *cmd_q;
|
||||||
|
struct ae4_device *ae4;
|
||||||
|
char name[30];
|
||||||
|
int i;
|
||||||
|
|
||||||
if (!debugfs_initialized())
|
if (!debugfs_initialized())
|
||||||
return;
|
return;
|
||||||
@ -96,11 +116,28 @@ void ptdma_debugfs_setup(struct pt_device *pt)
|
|||||||
debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
|
debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
|
||||||
&pt_debugfs_stats_fops);
|
&pt_debugfs_stats_fops);
|
||||||
|
|
||||||
cmd_q = &pt->cmd_q;
|
|
||||||
|
|
||||||
debugfs_q_instance =
|
if (pt->ver == AE4_DMA_VERSION) {
|
||||||
debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
|
ae4 = container_of(pt, struct ae4_device, pt);
|
||||||
|
for (i = 0; i < ae4->cmd_q_count; i++) {
|
||||||
|
ae4cmd_q = &ae4->ae4cmd_q[i];
|
||||||
|
cmd_q = &ae4cmd_q->cmd_q;
|
||||||
|
|
||||||
debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
|
memset(name, 0, sizeof(name));
|
||||||
&pt_debugfs_queue_fops);
|
snprintf(name, 29, "q%d", ae4cmd_q->id);
|
||||||
|
|
||||||
|
debugfs_q_instance =
|
||||||
|
debugfs_create_dir(name, pt->dma_dev.dbg_dev_root);
|
||||||
|
|
||||||
|
debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
|
||||||
|
&pt_debugfs_queue_fops);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
debugfs_q_instance =
|
||||||
|
debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
|
||||||
|
cmd_q = &pt->cmd_q;
|
||||||
|
debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
|
||||||
|
&pt_debugfs_queue_fops);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ptdma_debugfs_setup);
|
@ -9,9 +9,58 @@
|
|||||||
* Author: Gary R Hook <gary.hook@amd.com>
|
* Author: Gary R Hook <gary.hook@amd.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/bitfield.h>
|
||||||
#include "ptdma.h"
|
#include "ptdma.h"
|
||||||
#include "../dmaengine.h"
|
#include "../ae4dma/ae4dma.h"
|
||||||
#include "../virt-dma.h"
|
#include "../../dmaengine.h"
|
||||||
|
|
||||||
|
static char *ae4_error_codes[] = {
|
||||||
|
"",
|
||||||
|
"ERR 01: INVALID HEADER DW0",
|
||||||
|
"ERR 02: INVALID STATUS",
|
||||||
|
"ERR 03: INVALID LENGTH - 4 BYTE ALIGNMENT",
|
||||||
|
"ERR 04: INVALID SRC ADDR - 4 BYTE ALIGNMENT",
|
||||||
|
"ERR 05: INVALID DST ADDR - 4 BYTE ALIGNMENT",
|
||||||
|
"ERR 06: INVALID ALIGNMENT",
|
||||||
|
"ERR 07: INVALID DESCRIPTOR",
|
||||||
|
};
|
||||||
|
|
||||||
|
static void ae4_log_error(struct pt_device *d, int e)
|
||||||
|
{
|
||||||
|
/* ERR 01 - 07 represents Invalid AE4 errors */
|
||||||
|
if (e <= 7)
|
||||||
|
dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", ae4_error_codes[e], e);
|
||||||
|
/* ERR 08 - 15 represents Invalid Descriptor errors */
|
||||||
|
else if (e > 7 && e <= 15)
|
||||||
|
dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "INVALID DESCRIPTOR", e);
|
||||||
|
/* ERR 16 - 31 represents Firmware errors */
|
||||||
|
else if (e > 15 && e <= 31)
|
||||||
|
dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FIRMWARE ERROR", e);
|
||||||
|
/* ERR 32 - 63 represents Fatal errors */
|
||||||
|
else if (e > 31 && e <= 63)
|
||||||
|
dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FATAL ERROR", e);
|
||||||
|
/* ERR 64 - 255 represents PTE errors */
|
||||||
|
else if (e > 63 && e <= 255)
|
||||||
|
dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "PTE ERROR", e);
|
||||||
|
else
|
||||||
|
dev_info(d->dev, "Unknown AE4DMA error");
|
||||||
|
}
|
||||||
|
|
||||||
|
void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx)
|
||||||
|
{
|
||||||
|
struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
|
||||||
|
struct ae4dma_desc desc;
|
||||||
|
u8 status;
|
||||||
|
|
||||||
|
memcpy(&desc, &cmd_q->qbase[idx], sizeof(struct ae4dma_desc));
|
||||||
|
status = desc.dw1.status;
|
||||||
|
if (status && status != AE4_DESC_COMPLETED) {
|
||||||
|
cmd_q->cmd_error = desc.dw1.err_code;
|
||||||
|
if (cmd_q->cmd_error)
|
||||||
|
ae4_log_error(cmd_q->pt, cmd_q->cmd_error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ae4_check_status_error);
|
||||||
|
|
||||||
static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
|
static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
|
||||||
{
|
{
|
||||||
@ -45,7 +94,71 @@ static void pt_do_cleanup(struct virt_dma_desc *vd)
|
|||||||
kmem_cache_free(pt->dma_desc_cache, desc);
|
kmem_cache_free(pt->dma_desc_cache, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pt_dma_start_desc(struct pt_dma_desc *desc)
|
static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q;
|
||||||
|
struct pt_cmd_queue *cmd_q;
|
||||||
|
struct ae4_device *ae4;
|
||||||
|
|
||||||
|
if (pt->ver == AE4_DMA_VERSION) {
|
||||||
|
ae4 = container_of(pt, struct ae4_device, pt);
|
||||||
|
ae4cmd_q = &ae4->ae4cmd_q[chan->id];
|
||||||
|
cmd_q = &ae4cmd_q->cmd_q;
|
||||||
|
} else {
|
||||||
|
cmd_q = &pt->cmd_q;
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd_q;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ae4_core_execute_cmd(struct ae4dma_desc *desc, struct ae4_cmd_queue *ae4cmd_q)
|
||||||
|
{
|
||||||
|
bool soc = FIELD_GET(DWORD0_SOC, desc->dwouv.dw0);
|
||||||
|
struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
|
||||||
|
|
||||||
|
if (soc) {
|
||||||
|
desc->dwouv.dw0 |= FIELD_PREP(DWORD0_IOC, desc->dwouv.dw0);
|
||||||
|
desc->dwouv.dw0 &= ~DWORD0_SOC;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&ae4cmd_q->cmd_lock);
|
||||||
|
memcpy(&cmd_q->qbase[ae4cmd_q->tail_wi], desc, sizeof(struct ae4dma_desc));
|
||||||
|
ae4cmd_q->q_cmd_count++;
|
||||||
|
ae4cmd_q->tail_wi = (ae4cmd_q->tail_wi + 1) % CMD_Q_LEN;
|
||||||
|
writel(ae4cmd_q->tail_wi, cmd_q->reg_control + AE4_WR_IDX_OFF);
|
||||||
|
mutex_unlock(&ae4cmd_q->cmd_lock);
|
||||||
|
|
||||||
|
wake_up(&ae4cmd_q->q_w);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int pt_core_perform_passthru_ae4(struct pt_cmd_queue *cmd_q,
|
||||||
|
struct pt_passthru_engine *pt_engine)
|
||||||
|
{
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
|
||||||
|
struct ae4dma_desc desc;
|
||||||
|
|
||||||
|
cmd_q->cmd_error = 0;
|
||||||
|
cmd_q->total_pt_ops++;
|
||||||
|
memset(&desc, 0, sizeof(desc));
|
||||||
|
desc.dwouv.dws.byte0 = CMD_AE4_DESC_DW0_VAL;
|
||||||
|
|
||||||
|
desc.dw1.status = 0;
|
||||||
|
desc.dw1.err_code = 0;
|
||||||
|
desc.dw1.desc_id = 0;
|
||||||
|
|
||||||
|
desc.length = pt_engine->src_len;
|
||||||
|
|
||||||
|
desc.src_lo = upper_32_bits(pt_engine->src_dma);
|
||||||
|
desc.src_hi = lower_32_bits(pt_engine->src_dma);
|
||||||
|
desc.dst_lo = upper_32_bits(pt_engine->dst_dma);
|
||||||
|
desc.dst_hi = lower_32_bits(pt_engine->dst_dma);
|
||||||
|
|
||||||
|
return ae4_core_execute_cmd(&desc, ae4cmd_q);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct pt_passthru_engine *pt_engine;
|
struct pt_passthru_engine *pt_engine;
|
||||||
struct pt_device *pt;
|
struct pt_device *pt;
|
||||||
@ -56,13 +169,18 @@ static int pt_dma_start_desc(struct pt_dma_desc *desc)
|
|||||||
|
|
||||||
pt_cmd = &desc->pt_cmd;
|
pt_cmd = &desc->pt_cmd;
|
||||||
pt = pt_cmd->pt;
|
pt = pt_cmd->pt;
|
||||||
cmd_q = &pt->cmd_q;
|
|
||||||
|
cmd_q = pt_get_cmd_queue(pt, chan);
|
||||||
|
|
||||||
pt_engine = &pt_cmd->passthru;
|
pt_engine = &pt_cmd->passthru;
|
||||||
|
|
||||||
pt->tdata.cmd = pt_cmd;
|
pt->tdata.cmd = pt_cmd;
|
||||||
|
|
||||||
/* Execute the command */
|
/* Execute the command */
|
||||||
pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
|
if (pt->ver == AE4_DMA_VERSION)
|
||||||
|
pt_cmd->ret = pt_core_perform_passthru_ae4(cmd_q, pt_engine);
|
||||||
|
else
|
||||||
|
pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -151,7 +269,7 @@ static void pt_cmd_callback(void *data, int err)
|
|||||||
if (!desc)
|
if (!desc)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
ret = pt_dma_start_desc(desc);
|
ret = pt_dma_start_desc(desc, chan);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -186,7 +304,10 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
|
|||||||
{
|
{
|
||||||
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
||||||
struct pt_passthru_engine *pt_engine;
|
struct pt_passthru_engine *pt_engine;
|
||||||
|
struct pt_device *pt = chan->pt;
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q;
|
||||||
struct pt_dma_desc *desc;
|
struct pt_dma_desc *desc;
|
||||||
|
struct ae4_device *ae4;
|
||||||
struct pt_cmd *pt_cmd;
|
struct pt_cmd *pt_cmd;
|
||||||
|
|
||||||
desc = pt_alloc_dma_desc(chan, flags);
|
desc = pt_alloc_dma_desc(chan, flags);
|
||||||
@ -194,7 +315,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
pt_cmd = &desc->pt_cmd;
|
pt_cmd = &desc->pt_cmd;
|
||||||
pt_cmd->pt = chan->pt;
|
pt_cmd->pt = pt;
|
||||||
pt_engine = &pt_cmd->passthru;
|
pt_engine = &pt_cmd->passthru;
|
||||||
pt_cmd->engine = PT_ENGINE_PASSTHRU;
|
pt_cmd->engine = PT_ENGINE_PASSTHRU;
|
||||||
pt_engine->src_dma = src;
|
pt_engine->src_dma = src;
|
||||||
@ -205,6 +326,14 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
|
|||||||
|
|
||||||
desc->len = len;
|
desc->len = len;
|
||||||
|
|
||||||
|
if (pt->ver == AE4_DMA_VERSION) {
|
||||||
|
ae4 = container_of(pt, struct ae4_device, pt);
|
||||||
|
ae4cmd_q = &ae4->ae4cmd_q[chan->id];
|
||||||
|
mutex_lock(&ae4cmd_q->cmd_lock);
|
||||||
|
list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd);
|
||||||
|
mutex_unlock(&ae4cmd_q->cmd_lock);
|
||||||
|
}
|
||||||
|
|
||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -258,24 +387,43 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
|
|||||||
pt_cmd_callback(desc, 0);
|
pt_cmd_callback(desc, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pt_check_status_trans_ae4(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
|
||||||
|
{
|
||||||
|
struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < CMD_Q_LEN; i++)
|
||||||
|
ae4_check_status_error(ae4cmd_q, i);
|
||||||
|
}
|
||||||
|
|
||||||
static enum dma_status
|
static enum dma_status
|
||||||
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
|
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
|
||||||
struct dma_tx_state *txstate)
|
struct dma_tx_state *txstate)
|
||||||
{
|
{
|
||||||
struct pt_device *pt = to_pt_chan(c)->pt;
|
struct pt_dma_chan *chan = to_pt_chan(c);
|
||||||
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
|
struct pt_device *pt = chan->pt;
|
||||||
|
struct pt_cmd_queue *cmd_q;
|
||||||
|
|
||||||
|
cmd_q = pt_get_cmd_queue(pt, chan);
|
||||||
|
|
||||||
|
if (pt->ver == AE4_DMA_VERSION)
|
||||||
|
pt_check_status_trans_ae4(pt, cmd_q);
|
||||||
|
else
|
||||||
|
pt_check_status_trans(pt, cmd_q);
|
||||||
|
|
||||||
pt_check_status_trans(pt, cmd_q);
|
|
||||||
return dma_cookie_status(c, cookie, txstate);
|
return dma_cookie_status(c, cookie, txstate);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pt_pause(struct dma_chan *dma_chan)
|
static int pt_pause(struct dma_chan *dma_chan)
|
||||||
{
|
{
|
||||||
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
||||||
|
struct pt_device *pt = chan->pt;
|
||||||
|
struct pt_cmd_queue *cmd_q;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||||
pt_stop_queue(&chan->pt->cmd_q);
|
cmd_q = pt_get_cmd_queue(pt, chan);
|
||||||
|
pt_stop_queue(cmd_q);
|
||||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -285,10 +433,13 @@ static int pt_resume(struct dma_chan *dma_chan)
|
|||||||
{
|
{
|
||||||
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
||||||
struct pt_dma_desc *desc = NULL;
|
struct pt_dma_desc *desc = NULL;
|
||||||
|
struct pt_device *pt = chan->pt;
|
||||||
|
struct pt_cmd_queue *cmd_q;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||||
pt_start_queue(&chan->pt->cmd_q);
|
cmd_q = pt_get_cmd_queue(pt, chan);
|
||||||
|
pt_start_queue(cmd_q);
|
||||||
desc = pt_next_dma_desc(chan);
|
desc = pt_next_dma_desc(chan);
|
||||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||||
|
|
||||||
@ -302,11 +453,17 @@ static int pt_resume(struct dma_chan *dma_chan)
|
|||||||
static int pt_terminate_all(struct dma_chan *dma_chan)
|
static int pt_terminate_all(struct dma_chan *dma_chan)
|
||||||
{
|
{
|
||||||
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
||||||
|
struct pt_device *pt = chan->pt;
|
||||||
|
struct pt_cmd_queue *cmd_q;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
|
|
||||||
LIST_HEAD(head);
|
LIST_HEAD(head);
|
||||||
|
|
||||||
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
|
cmd_q = pt_get_cmd_queue(pt, chan);
|
||||||
|
if (pt->ver == AE4_DMA_VERSION)
|
||||||
|
pt_stop_queue(cmd_q);
|
||||||
|
else
|
||||||
|
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||||
vchan_get_all_descriptors(&chan->vc, &head);
|
vchan_get_all_descriptors(&chan->vc, &head);
|
||||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||||
@ -319,14 +476,24 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
|
|||||||
|
|
||||||
int pt_dmaengine_register(struct pt_device *pt)
|
int pt_dmaengine_register(struct pt_device *pt)
|
||||||
{
|
{
|
||||||
struct pt_dma_chan *chan;
|
|
||||||
struct dma_device *dma_dev = &pt->dma_dev;
|
struct dma_device *dma_dev = &pt->dma_dev;
|
||||||
char *cmd_cache_name;
|
struct ae4_cmd_queue *ae4cmd_q = NULL;
|
||||||
|
struct ae4_device *ae4 = NULL;
|
||||||
|
struct pt_dma_chan *chan;
|
||||||
char *desc_cache_name;
|
char *desc_cache_name;
|
||||||
int ret;
|
char *cmd_cache_name;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
if (pt->ver == AE4_DMA_VERSION)
|
||||||
|
ae4 = container_of(pt, struct ae4_device, pt);
|
||||||
|
|
||||||
|
if (ae4)
|
||||||
|
pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count,
|
||||||
|
sizeof(*pt->pt_dma_chan), GFP_KERNEL);
|
||||||
|
else
|
||||||
|
pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
|
||||||
|
GFP_KERNEL);
|
||||||
|
|
||||||
pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!pt->pt_dma_chan)
|
if (!pt->pt_dma_chan)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -368,9 +535,6 @@ int pt_dmaengine_register(struct pt_device *pt)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&dma_dev->channels);
|
INIT_LIST_HEAD(&dma_dev->channels);
|
||||||
|
|
||||||
chan = pt->pt_dma_chan;
|
|
||||||
chan->pt = pt;
|
|
||||||
|
|
||||||
/* Set base and prep routines */
|
/* Set base and prep routines */
|
||||||
dma_dev->device_free_chan_resources = pt_free_chan_resources;
|
dma_dev->device_free_chan_resources = pt_free_chan_resources;
|
||||||
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
|
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
|
||||||
@ -382,8 +546,21 @@ int pt_dmaengine_register(struct pt_device *pt)
|
|||||||
dma_dev->device_terminate_all = pt_terminate_all;
|
dma_dev->device_terminate_all = pt_terminate_all;
|
||||||
dma_dev->device_synchronize = pt_synchronize;
|
dma_dev->device_synchronize = pt_synchronize;
|
||||||
|
|
||||||
chan->vc.desc_free = pt_do_cleanup;
|
if (ae4) {
|
||||||
vchan_init(&chan->vc, dma_dev);
|
for (i = 0; i < ae4->cmd_q_count; i++) {
|
||||||
|
chan = pt->pt_dma_chan + i;
|
||||||
|
ae4cmd_q = &ae4->ae4cmd_q[i];
|
||||||
|
chan->id = ae4cmd_q->id;
|
||||||
|
chan->pt = pt;
|
||||||
|
chan->vc.desc_free = pt_do_cleanup;
|
||||||
|
vchan_init(&chan->vc, dma_dev);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chan = pt->pt_dma_chan;
|
||||||
|
chan->pt = pt;
|
||||||
|
chan->vc.desc_free = pt_do_cleanup;
|
||||||
|
vchan_init(&chan->vc, dma_dev);
|
||||||
|
}
|
||||||
|
|
||||||
ret = dma_async_device_register(dma_dev);
|
ret = dma_async_device_register(dma_dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -399,6 +576,7 @@ err_cache:
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pt_dmaengine_register);
|
||||||
|
|
||||||
void pt_dmaengine_unregister(struct pt_device *pt)
|
void pt_dmaengine_unregister(struct pt_device *pt)
|
||||||
{
|
{
|
@ -22,7 +22,7 @@
|
|||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
#include <linux/dmapool.h>
|
#include <linux/dmapool.h>
|
||||||
|
|
||||||
#include "../virt-dma.h"
|
#include "../../virt-dma.h"
|
||||||
|
|
||||||
#define MAX_PT_NAME_LEN 16
|
#define MAX_PT_NAME_LEN 16
|
||||||
#define MAX_DMAPOOL_NAME_LEN 32
|
#define MAX_DMAPOOL_NAME_LEN 32
|
||||||
@ -184,6 +184,7 @@ struct pt_dma_desc {
|
|||||||
struct pt_dma_chan {
|
struct pt_dma_chan {
|
||||||
struct virt_dma_chan vc;
|
struct virt_dma_chan vc;
|
||||||
struct pt_device *pt;
|
struct pt_device *pt;
|
||||||
|
u32 id;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pt_cmd_queue {
|
struct pt_cmd_queue {
|
||||||
@ -262,6 +263,7 @@ struct pt_device {
|
|||||||
unsigned long total_interrupts;
|
unsigned long total_interrupts;
|
||||||
|
|
||||||
struct pt_tasklet_data tdata;
|
struct pt_tasklet_data tdata;
|
||||||
|
int ver;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
@ -283,16 +283,20 @@ static int qdma_check_queue_status(struct qdma_device *qdev,
|
|||||||
|
|
||||||
static int qdma_clear_queue_context(const struct qdma_queue *queue)
|
static int qdma_clear_queue_context(const struct qdma_queue *queue)
|
||||||
{
|
{
|
||||||
enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
|
static const enum qdma_ctxt_type h2c_types[] = {
|
||||||
QDMA_CTXT_DESC_HW_H2C,
|
QDMA_CTXT_DESC_SW_H2C,
|
||||||
QDMA_CTXT_DESC_CR_H2C,
|
QDMA_CTXT_DESC_HW_H2C,
|
||||||
QDMA_CTXT_PFTCH, };
|
QDMA_CTXT_DESC_CR_H2C,
|
||||||
enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
|
QDMA_CTXT_PFTCH,
|
||||||
QDMA_CTXT_DESC_HW_C2H,
|
};
|
||||||
QDMA_CTXT_DESC_CR_C2H,
|
static const enum qdma_ctxt_type c2h_types[] = {
|
||||||
QDMA_CTXT_PFTCH, };
|
QDMA_CTXT_DESC_SW_C2H,
|
||||||
|
QDMA_CTXT_DESC_HW_C2H,
|
||||||
|
QDMA_CTXT_DESC_CR_C2H,
|
||||||
|
QDMA_CTXT_PFTCH,
|
||||||
|
};
|
||||||
struct qdma_device *qdev = queue->qdev;
|
struct qdma_device *qdev = queue->qdev;
|
||||||
enum qdma_ctxt_type *type;
|
const enum qdma_ctxt_type *type;
|
||||||
int ret, num, i;
|
int ret, num, i;
|
||||||
|
|
||||||
if (queue->dir == DMA_MEM_TO_DEV) {
|
if (queue->dir == DMA_MEM_TO_DEV) {
|
||||||
|
@ -28,7 +28,6 @@ struct idxd_cdev_context {
|
|||||||
* global to avoid conflict file names.
|
* global to avoid conflict file names.
|
||||||
*/
|
*/
|
||||||
static DEFINE_IDA(file_ida);
|
static DEFINE_IDA(file_ida);
|
||||||
static DEFINE_MUTEX(ida_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ictx is an array based off of accelerator types. enum idxd_type
|
* ictx is an array based off of accelerator types. enum idxd_type
|
||||||
@ -123,9 +122,7 @@ static void idxd_file_dev_release(struct device *dev)
|
|||||||
struct idxd_device *idxd = wq->idxd;
|
struct idxd_device *idxd = wq->idxd;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
mutex_lock(&ida_lock);
|
|
||||||
ida_free(&file_ida, ctx->id);
|
ida_free(&file_ida, ctx->id);
|
||||||
mutex_unlock(&ida_lock);
|
|
||||||
|
|
||||||
/* Wait for in-flight operations to complete. */
|
/* Wait for in-flight operations to complete. */
|
||||||
if (wq_shared(wq)) {
|
if (wq_shared(wq)) {
|
||||||
@ -284,9 +281,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
idxd_cdev = wq->idxd_cdev;
|
idxd_cdev = wq->idxd_cdev;
|
||||||
mutex_lock(&ida_lock);
|
|
||||||
ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
|
ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
|
||||||
mutex_unlock(&ida_lock);
|
|
||||||
if (ctx->id < 0) {
|
if (ctx->id < 0) {
|
||||||
dev_warn(dev, "ida alloc failure\n");
|
dev_warn(dev, "ida alloc failure\n");
|
||||||
goto failed_ida;
|
goto failed_ida;
|
||||||
|
@ -78,6 +78,8 @@ static struct pci_device_id idxd_pci_tbl[] = {
|
|||||||
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
|
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
|
||||||
/* IAA on DMR platforms */
|
/* IAA on DMR platforms */
|
||||||
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
|
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
|
||||||
|
/* IAA PTL platforms */
|
||||||
|
{ PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
|
||||||
{ 0, }
|
{ 0, }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
|
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
|
#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
|
||||||
#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
|
#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
|
||||||
#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
|
#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
|
||||||
|
#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
|
||||||
|
|
||||||
#define DEVICE_VERSION_1 0x100
|
#define DEVICE_VERSION_1 0x100
|
||||||
#define DEVICE_VERSION_2 0x200
|
#define DEVICE_VERSION_2 0x200
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0-only
|
|
||||||
config AMD_PTDMA
|
|
||||||
tristate "AMD PassThru DMA Engine"
|
|
||||||
depends on X86_64 && PCI
|
|
||||||
select DMA_ENGINE
|
|
||||||
select DMA_VIRTUAL_CHANNELS
|
|
||||||
help
|
|
||||||
Enable support for the AMD PTDMA controller. This controller
|
|
||||||
provides DMA capabilities to perform high bandwidth memory to
|
|
||||||
memory and IO copy operations. It performs DMA transfer through
|
|
||||||
queue-based descriptor management. This DMA controller is intended
|
|
||||||
to be used with AMD Non-Transparent Bridge devices and not for
|
|
||||||
general purpose peripheral DMA.
|
|
@ -2023,6 +2023,10 @@ static const struct of_device_id rcar_dmac_of_ids[] = {
|
|||||||
.compatible = "renesas,rcar-gen4-dmac",
|
.compatible = "renesas,rcar-gen4-dmac",
|
||||||
.data = &rcar_gen4_dmac_data,
|
.data = &rcar_gen4_dmac_data,
|
||||||
}, {
|
}, {
|
||||||
|
/*
|
||||||
|
* Backward compatibility for between v5.12 - v5.19
|
||||||
|
* which didn't combined with "renesas,rcar-gen4-dmac"
|
||||||
|
*/
|
||||||
.compatible = "renesas,dmac-r8a779a0",
|
.compatible = "renesas,dmac-r8a779a0",
|
||||||
.data = &rcar_gen4_dmac_data,
|
.data = &rcar_gen4_dmac_data,
|
||||||
},
|
},
|
||||||
|
@ -13,7 +13,9 @@
|
|||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/of_dma.h>
|
#include <linux/of_dma.h>
|
||||||
|
#include <linux/of_device.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/reset.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
@ -31,12 +33,21 @@
|
|||||||
#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
|
#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
|
||||||
#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
|
#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
|
||||||
|
|
||||||
|
#define SUNIV_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 24)
|
||||||
|
#define SUNIV_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 8)
|
||||||
|
|
||||||
|
#define SUN4I_MAX_BURST 8
|
||||||
|
#define SUNIV_MAX_BURST 4
|
||||||
|
|
||||||
/** Normal DMA register values **/
|
/** Normal DMA register values **/
|
||||||
|
|
||||||
/* Normal DMA source/destination data request type values */
|
/* Normal DMA source/destination data request type values */
|
||||||
#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
|
#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
|
||||||
#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
|
#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
|
||||||
|
|
||||||
|
#define SUNIV_NDMA_DRQ_TYPE_SDRAM 0x11
|
||||||
|
#define SUNIV_NDMA_DRQ_TYPE_LIMIT (0x17 + 1)
|
||||||
|
|
||||||
/** Normal DMA register layout **/
|
/** Normal DMA register layout **/
|
||||||
|
|
||||||
/* Dedicated DMA source/destination address mode values */
|
/* Dedicated DMA source/destination address mode values */
|
||||||
@ -50,6 +61,9 @@
|
|||||||
#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
|
#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
|
||||||
#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
|
#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
|
||||||
|
|
||||||
|
#define SUNIV_NDMA_CFG_CONT_MODE BIT(29)
|
||||||
|
#define SUNIV_NDMA_CFG_WAIT_STATE(n) ((n) << 26)
|
||||||
|
|
||||||
/** Dedicated DMA register values **/
|
/** Dedicated DMA register values **/
|
||||||
|
|
||||||
/* Dedicated DMA source/destination address mode values */
|
/* Dedicated DMA source/destination address mode values */
|
||||||
@ -62,6 +76,9 @@
|
|||||||
#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
|
#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
|
||||||
#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
|
#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
|
||||||
|
|
||||||
|
#define SUNIV_DDMA_DRQ_TYPE_SDRAM 0x1
|
||||||
|
#define SUNIV_DDMA_DRQ_TYPE_LIMIT (0x9 + 1)
|
||||||
|
|
||||||
/** Dedicated DMA register layout **/
|
/** Dedicated DMA register layout **/
|
||||||
|
|
||||||
/* Dedicated DMA configuration register layout */
|
/* Dedicated DMA configuration register layout */
|
||||||
@ -115,6 +132,11 @@
|
|||||||
#define SUN4I_DMA_NR_MAX_VCHANS \
|
#define SUN4I_DMA_NR_MAX_VCHANS \
|
||||||
(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
|
(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
|
||||||
|
|
||||||
|
#define SUNIV_NDMA_NR_MAX_CHANNELS 4
|
||||||
|
#define SUNIV_DDMA_NR_MAX_CHANNELS 4
|
||||||
|
#define SUNIV_NDMA_NR_MAX_VCHANS (24 * 2 - 1)
|
||||||
|
#define SUNIV_DDMA_NR_MAX_VCHANS 10
|
||||||
|
|
||||||
/* This set of SUN4I_DDMA timing parameters were found experimentally while
|
/* This set of SUN4I_DDMA timing parameters were found experimentally while
|
||||||
* working with the SPI driver and seem to make it behave correctly */
|
* working with the SPI driver and seem to make it behave correctly */
|
||||||
#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
|
#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
|
||||||
@ -132,6 +154,33 @@
|
|||||||
#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
|
#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
|
||||||
#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
|
#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hardware channels / ports representation
|
||||||
|
*
|
||||||
|
* The hardware is used in several SoCs, with differing numbers
|
||||||
|
* of channels and endpoints. This structure ties those numbers
|
||||||
|
* to a certain compatible string.
|
||||||
|
*/
|
||||||
|
struct sun4i_dma_config {
|
||||||
|
u32 ndma_nr_max_channels;
|
||||||
|
u32 ndma_nr_max_vchans;
|
||||||
|
|
||||||
|
u32 ddma_nr_max_channels;
|
||||||
|
u32 ddma_nr_max_vchans;
|
||||||
|
|
||||||
|
u32 dma_nr_max_channels;
|
||||||
|
|
||||||
|
void (*set_dst_data_width)(u32 *p_cfg, s8 data_width);
|
||||||
|
void (*set_src_data_width)(u32 *p_cfg, s8 data_width);
|
||||||
|
int (*convert_burst)(u32 maxburst);
|
||||||
|
|
||||||
|
u8 ndma_drq_sdram;
|
||||||
|
u8 ddma_drq_sdram;
|
||||||
|
|
||||||
|
u8 max_burst;
|
||||||
|
bool has_reset;
|
||||||
|
};
|
||||||
|
|
||||||
struct sun4i_dma_pchan {
|
struct sun4i_dma_pchan {
|
||||||
/* Register base of channel */
|
/* Register base of channel */
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
@ -170,7 +219,7 @@ struct sun4i_dma_contract {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct sun4i_dma_dev {
|
struct sun4i_dma_dev {
|
||||||
DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
|
unsigned long *pchans_used;
|
||||||
struct dma_device slave;
|
struct dma_device slave;
|
||||||
struct sun4i_dma_pchan *pchans;
|
struct sun4i_dma_pchan *pchans;
|
||||||
struct sun4i_dma_vchan *vchans;
|
struct sun4i_dma_vchan *vchans;
|
||||||
@ -178,6 +227,8 @@ struct sun4i_dma_dev {
|
|||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
int irq;
|
int irq;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
const struct sun4i_dma_config *cfg;
|
||||||
|
struct reset_control *rst;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
|
static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
|
||||||
@ -200,7 +251,27 @@ static struct device *chan2dev(struct dma_chan *chan)
|
|||||||
return &chan->dev->device;
|
return &chan->dev->device;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int convert_burst(u32 maxburst)
|
static void set_dst_data_width_a10(u32 *p_cfg, s8 data_width)
|
||||||
|
{
|
||||||
|
*p_cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(data_width);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void set_src_data_width_a10(u32 *p_cfg, s8 data_width)
|
||||||
|
{
|
||||||
|
*p_cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(data_width);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void set_dst_data_width_f1c100s(u32 *p_cfg, s8 data_width)
|
||||||
|
{
|
||||||
|
*p_cfg |= SUNIV_DMA_CFG_DST_DATA_WIDTH(data_width);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void set_src_data_width_f1c100s(u32 *p_cfg, s8 data_width)
|
||||||
|
{
|
||||||
|
*p_cfg |= SUNIV_DMA_CFG_SRC_DATA_WIDTH(data_width);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int convert_burst_a10(u32 maxburst)
|
||||||
{
|
{
|
||||||
if (maxburst > 8)
|
if (maxburst > 8)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -209,6 +280,15 @@ static int convert_burst(u32 maxburst)
|
|||||||
return (maxburst >> 2);
|
return (maxburst >> 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int convert_burst_f1c100s(u32 maxburst)
|
||||||
|
{
|
||||||
|
if (maxburst > 4)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* 1 -> 0, 4 -> 1 */
|
||||||
|
return (maxburst >> 2);
|
||||||
|
}
|
||||||
|
|
||||||
static int convert_buswidth(enum dma_slave_buswidth addr_width)
|
static int convert_buswidth(enum dma_slave_buswidth addr_width)
|
||||||
{
|
{
|
||||||
if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
|
if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
|
||||||
@ -233,15 +313,15 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
|
|||||||
int i, max;
|
int i, max;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
|
* pchans 0-priv->cfg->ndma_nr_max_channels are normal, and
|
||||||
* SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
|
* priv->cfg->ndma_nr_max_channels+ are dedicated ones
|
||||||
*/
|
*/
|
||||||
if (vchan->is_dedicated) {
|
if (vchan->is_dedicated) {
|
||||||
i = SUN4I_NDMA_NR_MAX_CHANNELS;
|
i = priv->cfg->ndma_nr_max_channels;
|
||||||
max = SUN4I_DMA_NR_MAX_CHANNELS;
|
max = priv->cfg->dma_nr_max_channels;
|
||||||
} else {
|
} else {
|
||||||
i = 0;
|
i = 0;
|
||||||
max = SUN4I_NDMA_NR_MAX_CHANNELS;
|
max = priv->cfg->ndma_nr_max_channels;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->lock, flags);
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
@ -444,6 +524,7 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
|
|||||||
size_t len, struct dma_slave_config *sconfig,
|
size_t len, struct dma_slave_config *sconfig,
|
||||||
enum dma_transfer_direction direction)
|
enum dma_transfer_direction direction)
|
||||||
{
|
{
|
||||||
|
struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
|
||||||
struct sun4i_dma_promise *promise;
|
struct sun4i_dma_promise *promise;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -467,13 +548,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
|
|||||||
sconfig->src_addr_width, sconfig->dst_addr_width);
|
sconfig->src_addr_width, sconfig->dst_addr_width);
|
||||||
|
|
||||||
/* Source burst */
|
/* Source burst */
|
||||||
ret = convert_burst(sconfig->src_maxburst);
|
ret = priv->cfg->convert_burst(sconfig->src_maxburst);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
|
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
|
||||||
|
|
||||||
/* Destination burst */
|
/* Destination burst */
|
||||||
ret = convert_burst(sconfig->dst_maxburst);
|
ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
|
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
|
||||||
@ -482,13 +563,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
|
|||||||
ret = convert_buswidth(sconfig->src_addr_width);
|
ret = convert_buswidth(sconfig->src_addr_width);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
|
priv->cfg->set_src_data_width(&promise->cfg, ret);
|
||||||
|
|
||||||
/* Destination bus width */
|
/* Destination bus width */
|
||||||
ret = convert_buswidth(sconfig->dst_addr_width);
|
ret = convert_buswidth(sconfig->dst_addr_width);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
|
priv->cfg->set_dst_data_width(&promise->cfg, ret);
|
||||||
|
|
||||||
return promise;
|
return promise;
|
||||||
|
|
||||||
@ -510,6 +591,7 @@ static struct sun4i_dma_promise *
|
|||||||
generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
|
generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
|
||||||
size_t len, struct dma_slave_config *sconfig)
|
size_t len, struct dma_slave_config *sconfig)
|
||||||
{
|
{
|
||||||
|
struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
|
||||||
struct sun4i_dma_promise *promise;
|
struct sun4i_dma_promise *promise;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -524,13 +606,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
|
|||||||
SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
|
SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
|
||||||
|
|
||||||
/* Source burst */
|
/* Source burst */
|
||||||
ret = convert_burst(sconfig->src_maxburst);
|
ret = priv->cfg->convert_burst(sconfig->src_maxburst);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
|
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
|
||||||
|
|
||||||
/* Destination burst */
|
/* Destination burst */
|
||||||
ret = convert_burst(sconfig->dst_maxburst);
|
ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
|
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
|
||||||
@ -539,13 +621,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
|
|||||||
ret = convert_buswidth(sconfig->src_addr_width);
|
ret = convert_buswidth(sconfig->src_addr_width);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
|
priv->cfg->set_src_data_width(&promise->cfg, ret);
|
||||||
|
|
||||||
/* Destination bus width */
|
/* Destination bus width */
|
||||||
ret = convert_buswidth(sconfig->dst_addr_width);
|
ret = convert_buswidth(sconfig->dst_addr_width);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
|
priv->cfg->set_dst_data_width(&promise->cfg, ret);
|
||||||
|
|
||||||
return promise;
|
return promise;
|
||||||
|
|
||||||
@ -622,6 +704,7 @@ static struct dma_async_tx_descriptor *
|
|||||||
sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
|
sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
|
||||||
dma_addr_t src, size_t len, unsigned long flags)
|
dma_addr_t src, size_t len, unsigned long flags)
|
||||||
{
|
{
|
||||||
|
struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
|
||||||
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
|
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
|
||||||
struct dma_slave_config *sconfig = &vchan->cfg;
|
struct dma_slave_config *sconfig = &vchan->cfg;
|
||||||
struct sun4i_dma_promise *promise;
|
struct sun4i_dma_promise *promise;
|
||||||
@ -638,8 +721,8 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
|
|||||||
*/
|
*/
|
||||||
sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||||
sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||||
sconfig->src_maxburst = 8;
|
sconfig->src_maxburst = priv->cfg->max_burst;
|
||||||
sconfig->dst_maxburst = 8;
|
sconfig->dst_maxburst = priv->cfg->max_burst;
|
||||||
|
|
||||||
if (vchan->is_dedicated)
|
if (vchan->is_dedicated)
|
||||||
promise = generate_ddma_promise(chan, src, dest, len, sconfig);
|
promise = generate_ddma_promise(chan, src, dest, len, sconfig);
|
||||||
@ -654,11 +737,13 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
|
|||||||
|
|
||||||
/* Configure memcpy mode */
|
/* Configure memcpy mode */
|
||||||
if (vchan->is_dedicated) {
|
if (vchan->is_dedicated) {
|
||||||
promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
|
promise->cfg |=
|
||||||
SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
|
SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ddma_drq_sdram) |
|
||||||
|
SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ddma_drq_sdram);
|
||||||
} else {
|
} else {
|
||||||
promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
|
promise->cfg |=
|
||||||
SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
|
SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ndma_drq_sdram) |
|
||||||
|
SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ndma_drq_sdram);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fill the contract with our only promise */
|
/* Fill the contract with our only promise */
|
||||||
@ -673,6 +758,7 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
|
|||||||
size_t period_len, enum dma_transfer_direction dir,
|
size_t period_len, enum dma_transfer_direction dir,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
|
struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
|
||||||
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
|
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
|
||||||
struct dma_slave_config *sconfig = &vchan->cfg;
|
struct dma_slave_config *sconfig = &vchan->cfg;
|
||||||
struct sun4i_dma_promise *promise;
|
struct sun4i_dma_promise *promise;
|
||||||
@ -696,11 +782,11 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
|
|||||||
if (vchan->is_dedicated) {
|
if (vchan->is_dedicated) {
|
||||||
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
|
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
|
||||||
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
|
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
|
||||||
ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
|
ram_type = priv->cfg->ddma_drq_sdram;
|
||||||
} else {
|
} else {
|
||||||
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
|
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
|
||||||
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
|
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
|
||||||
ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
|
ram_type = priv->cfg->ndma_drq_sdram;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dir == DMA_MEM_TO_DEV) {
|
if (dir == DMA_MEM_TO_DEV) {
|
||||||
@ -793,6 +879,7 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
unsigned int sg_len, enum dma_transfer_direction dir,
|
unsigned int sg_len, enum dma_transfer_direction dir,
|
||||||
unsigned long flags, void *context)
|
unsigned long flags, void *context)
|
||||||
{
|
{
|
||||||
|
struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
|
||||||
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
|
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
|
||||||
struct dma_slave_config *sconfig = &vchan->cfg;
|
struct dma_slave_config *sconfig = &vchan->cfg;
|
||||||
struct sun4i_dma_promise *promise;
|
struct sun4i_dma_promise *promise;
|
||||||
@ -818,11 +905,11 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
if (vchan->is_dedicated) {
|
if (vchan->is_dedicated) {
|
||||||
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
|
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
|
||||||
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
|
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
|
||||||
ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
|
ram_type = priv->cfg->ddma_drq_sdram;
|
||||||
} else {
|
} else {
|
||||||
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
|
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
|
||||||
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
|
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
|
||||||
ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
|
ram_type = priv->cfg->ndma_drq_sdram;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dir == DMA_MEM_TO_DEV)
|
if (dir == DMA_MEM_TO_DEV)
|
||||||
@ -1150,6 +1237,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
|
|||||||
if (!priv)
|
if (!priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
priv->cfg = of_device_get_match_data(&pdev->dev);
|
||||||
|
if (!priv->cfg)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||||
if (IS_ERR(priv->base))
|
if (IS_ERR(priv->base))
|
||||||
return PTR_ERR(priv->base);
|
return PTR_ERR(priv->base);
|
||||||
@ -1164,6 +1255,13 @@ static int sun4i_dma_probe(struct platform_device *pdev)
|
|||||||
return PTR_ERR(priv->clk);
|
return PTR_ERR(priv->clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (priv->cfg->has_reset) {
|
||||||
|
priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
|
||||||
|
if (IS_ERR(priv->rst))
|
||||||
|
return dev_err_probe(&pdev->dev, PTR_ERR(priv->rst),
|
||||||
|
"Failed to get reset control\n");
|
||||||
|
}
|
||||||
|
|
||||||
platform_set_drvdata(pdev, priv);
|
platform_set_drvdata(pdev, priv);
|
||||||
spin_lock_init(&priv->lock);
|
spin_lock_init(&priv->lock);
|
||||||
|
|
||||||
@ -1197,23 +1295,26 @@ static int sun4i_dma_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
priv->slave.dev = &pdev->dev;
|
priv->slave.dev = &pdev->dev;
|
||||||
|
|
||||||
priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
|
priv->pchans = devm_kcalloc(&pdev->dev, priv->cfg->dma_nr_max_channels,
|
||||||
sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
|
sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
|
||||||
priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
|
priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
|
||||||
sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
|
sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
|
||||||
if (!priv->vchans || !priv->pchans)
|
priv->pchans_used = devm_kcalloc(&pdev->dev,
|
||||||
|
BITS_TO_LONGS(priv->cfg->dma_nr_max_channels),
|
||||||
|
sizeof(unsigned long), GFP_KERNEL);
|
||||||
|
if (!priv->vchans || !priv->pchans || !priv->pchans_used)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
|
* [0..priv->cfg->ndma_nr_max_channels) are normal pchans, and
|
||||||
* [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
|
* [priv->cfg->ndma_nr_max_channels..priv->cfg->dma_nr_max_channels) are
|
||||||
* dedicated ones
|
* dedicated ones
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
|
for (i = 0; i < priv->cfg->ndma_nr_max_channels; i++)
|
||||||
priv->pchans[i].base = priv->base +
|
priv->pchans[i].base = priv->base +
|
||||||
SUN4I_NDMA_CHANNEL_REG_BASE(i);
|
SUN4I_NDMA_CHANNEL_REG_BASE(i);
|
||||||
|
|
||||||
for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
|
for (j = 0; i < priv->cfg->dma_nr_max_channels; i++, j++) {
|
||||||
priv->pchans[i].base = priv->base +
|
priv->pchans[i].base = priv->base +
|
||||||
SUN4I_DDMA_CHANNEL_REG_BASE(j);
|
SUN4I_DDMA_CHANNEL_REG_BASE(j);
|
||||||
priv->pchans[i].is_dedicated = 1;
|
priv->pchans[i].is_dedicated = 1;
|
||||||
@ -1284,8 +1385,51 @@ static void sun4i_dma_remove(struct platform_device *pdev)
|
|||||||
clk_disable_unprepare(priv->clk);
|
clk_disable_unprepare(priv->clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct sun4i_dma_config sun4i_a10_dma_cfg = {
|
||||||
|
.ndma_nr_max_channels = SUN4I_NDMA_NR_MAX_CHANNELS,
|
||||||
|
.ndma_nr_max_vchans = SUN4I_NDMA_NR_MAX_VCHANS,
|
||||||
|
|
||||||
|
.ddma_nr_max_channels = SUN4I_DDMA_NR_MAX_CHANNELS,
|
||||||
|
.ddma_nr_max_vchans = SUN4I_DDMA_NR_MAX_VCHANS,
|
||||||
|
|
||||||
|
.dma_nr_max_channels = SUN4I_DMA_NR_MAX_CHANNELS,
|
||||||
|
|
||||||
|
.set_dst_data_width = set_dst_data_width_a10,
|
||||||
|
.set_src_data_width = set_src_data_width_a10,
|
||||||
|
.convert_burst = convert_burst_a10,
|
||||||
|
|
||||||
|
.ndma_drq_sdram = SUN4I_NDMA_DRQ_TYPE_SDRAM,
|
||||||
|
.ddma_drq_sdram = SUN4I_DDMA_DRQ_TYPE_SDRAM,
|
||||||
|
|
||||||
|
.max_burst = SUN4I_MAX_BURST,
|
||||||
|
.has_reset = false,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct sun4i_dma_config suniv_f1c100s_dma_cfg = {
|
||||||
|
.ndma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS,
|
||||||
|
.ndma_nr_max_vchans = SUNIV_NDMA_NR_MAX_VCHANS,
|
||||||
|
|
||||||
|
.ddma_nr_max_channels = SUNIV_DDMA_NR_MAX_CHANNELS,
|
||||||
|
.ddma_nr_max_vchans = SUNIV_DDMA_NR_MAX_VCHANS,
|
||||||
|
|
||||||
|
.dma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS +
|
||||||
|
SUNIV_DDMA_NR_MAX_CHANNELS,
|
||||||
|
|
||||||
|
.set_dst_data_width = set_dst_data_width_f1c100s,
|
||||||
|
.set_src_data_width = set_src_data_width_f1c100s,
|
||||||
|
.convert_burst = convert_burst_f1c100s,
|
||||||
|
|
||||||
|
.ndma_drq_sdram = SUNIV_NDMA_DRQ_TYPE_SDRAM,
|
||||||
|
.ddma_drq_sdram = SUNIV_DDMA_DRQ_TYPE_SDRAM,
|
||||||
|
|
||||||
|
.max_burst = SUNIV_MAX_BURST,
|
||||||
|
.has_reset = true,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct of_device_id sun4i_dma_match[] = {
|
static const struct of_device_id sun4i_dma_match[] = {
|
||||||
{ .compatible = "allwinner,sun4i-a10-dma" },
|
{ .compatible = "allwinner,sun4i-a10-dma", .data = &sun4i_a10_dma_cfg },
|
||||||
|
{ .compatible = "allwinner,suniv-f1c100s-dma",
|
||||||
|
.data = &suniv_f1c100s_dma_cfg },
|
||||||
{ /* sentinel */ },
|
{ /* sentinel */ },
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, sun4i_dma_match);
|
MODULE_DEVICE_TABLE(of, sun4i_dma_match);
|
||||||
|
@ -390,15 +390,11 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
|
|||||||
*/
|
*/
|
||||||
static int xdma_xfer_stop(struct xdma_chan *xchan)
|
static int xdma_xfer_stop(struct xdma_chan *xchan)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct xdma_device *xdev = xchan->xdev_hdl;
|
struct xdma_device *xdev = xchan->xdev_hdl;
|
||||||
|
|
||||||
/* clear run stop bit to prevent any further auto-triggering */
|
/* clear run stop bit to prevent any further auto-triggering */
|
||||||
ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
|
return regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
|
||||||
CHAN_CTRL_RUN_STOP);
|
CHAN_CTRL_RUN_STOP);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1404,16 +1404,18 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
|||||||
|
|
||||||
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
|
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
|
||||||
|
|
||||||
j = chan->desc_submitcount;
|
if (config->park) {
|
||||||
reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
|
j = chan->desc_submitcount;
|
||||||
if (chan->direction == DMA_MEM_TO_DEV) {
|
reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
|
||||||
reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
|
if (chan->direction == DMA_MEM_TO_DEV) {
|
||||||
reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
|
reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
|
||||||
} else {
|
reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
|
||||||
reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
|
} else {
|
||||||
reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
|
reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
|
||||||
|
reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
|
||||||
|
}
|
||||||
|
dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
|
||||||
}
|
}
|
||||||
dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
|
|
||||||
|
|
||||||
/* Start the hardware */
|
/* Start the hardware */
|
||||||
xilinx_dma_start(chan);
|
xilinx_dma_start(chan);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user