mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
dmaengine second set of fixes for v6.8
Driver fixes for: - dw-edma fixes to improve driver and remote HDMA setup - fsl-edma fixes for SoC hange, irq init and byte calculations and sparse fixes - idxd: safe user copy of completion record fix - ptdma: consistent DMA mask fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmXkqM4ACgkQfBQHDyUj g0flMg//bgnkYdt/Y4nDbJxPobQHSOLl86GbD9mIddeocwdsA/QQ4uz/WzN+rngY 0M7/7Zfq+RMq6lfesLMJ5DXycYBGIMQYZ1ItpIL4Xw3sSo2i6qFn18dSqbesRpkB bsfdrbqCEHWwghkBszoTGXGx24XRdQzTUmMziL9jqdIvXXf5HCKeGSZnC5L6ssxs JfdCee7ZSPK6gOVlBlN8zjzG8ZI2bwH+B1okyZwWljorPInr8UQ3ysR3neEUab6O JMzVmp46LhTQmHEZxzBbfR18rbVhr0xRRs78UwJPhPrgp9PFOkxdLl9lGDh8Fa2x jApi9cl5rSrW2bEv5fe8k5EM64G9G5arJU9F6+Goqm39ftDTktKBPhwmIR/NqOZF AHmW2c+0trDMOg099oWd+ozgbMkdbrdjf6BA9vSLKsXLTFHDZEA1fEKifJP5NCSQ ZQJVpQn0wiBOQSJxpr4mbk/n5JeKzt/uyQSF8Qo8Kp9OWXiNrjWzJ0bZnVafkTlU E91WGiGjYQmh28DV432IM07IXVKLtIMa/BXuWMhOZY+/HUJK/AaRStxEB5kkLJDm EExDe23Rviu0lXDW4cH+R14d4L/9EJY87Ynm2p85rOj/rnwW9gWyu3BwD3aBXa2e yKlng355WCBISMl8wXpeMQe1/yaxf48YLdrmdLD95bT0w5Dc0gc= =mX0o -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix2-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine fixes from Vinod Koul: - dw-edma fixes to improve driver and remote HDMA setup - fsl-edma fixes for SoC hange, irq init and byte calculations and sparse fixes - idxd: safe user copy of completion record fix - ptdma: consistent DMA mask fix * tag 'dmaengine-fix2-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine: ptdma: use consistent DMA masks dmaengine: fsl-qdma: add __iomem and struct in union to fix sparse warning dmaengine: idxd: Ensure safe user copy of completion record dmaengine: fsl-edma: correct max_segment_size setting dmaengine: idxd: Remove shadow Event Log head stored in idxd dmaengine: fsl-edma: correct calculation of 'nbytes' in multi-fifo scenario dmaengine: fsl-qdma: init irq after reg initialization dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read dmaengine: dw-edma: eDMA: Add sync read before starting the DMA transfer in remote setup dmaengine: dw-edma: HDMA: Add sync read before starting the DMA transfer in remote setup dmaengine: dw-edma: Add HDMA remote interrupt configuration dmaengine: dw-edma: HDMA_V0_REMOTEL_STOP_INT_EN typo fix dmaengine: dw-edma: Fix wrong interrupt bit set for HDMA dmaengine: dw-edma: Fix the ch_count hdma callback
This commit is contained in:
commit
d57dd2d24d
@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
||||
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
|
||||
}
|
||||
|
||||
static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
|
||||
{
|
||||
/*
|
||||
* In case of remote eDMA engine setup, the DW PCIe RP/EP internal
|
||||
* configuration registers and application memory are normally accessed
|
||||
* over different buses. Ensure LL-data reaches the memory before the
|
||||
* doorbell register is toggled by issuing the dummy-read from the remote
|
||||
* LL memory in a hope that the MRd TLP will return only after the
|
||||
* last MWr TLP is completed
|
||||
*/
|
||||
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
|
||||
readl(chunk->ll_region.vaddr.io);
|
||||
}
|
||||
|
||||
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||
{
|
||||
struct dw_edma_chan *chan = chunk->chan;
|
||||
@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
|
||||
upper_32_bits(chunk->ll_region.paddr));
|
||||
}
|
||||
|
||||
dw_edma_v0_sync_ll_data(chunk);
|
||||
|
||||
/* Doorbell */
|
||||
SET_RW_32(dw, chan->dir, doorbell,
|
||||
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
|
||||
|
@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
|
||||
|
||||
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
|
||||
{
|
||||
u32 num_ch = 0;
|
||||
int id;
|
||||
|
||||
for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
|
||||
if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
|
||||
num_ch++;
|
||||
}
|
||||
|
||||
if (num_ch > HDMA_V0_MAX_NR_CH)
|
||||
num_ch = HDMA_V0_MAX_NR_CH;
|
||||
|
||||
return (u16)num_ch;
|
||||
/*
|
||||
* The HDMA IP have no way to know the number of hardware channels
|
||||
* available, we set it to maximum channels and let the platform
|
||||
* set the right number of channels.
|
||||
*/
|
||||
return HDMA_V0_MAX_NR_CH;
|
||||
}
|
||||
|
||||
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
|
||||
@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
||||
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
|
||||
}
|
||||
|
||||
static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
|
||||
{
|
||||
/*
|
||||
* In case of remote HDMA engine setup, the DW PCIe RP/EP internal
|
||||
* configuration registers and application memory are normally accessed
|
||||
* over different buses. Ensure LL-data reaches the memory before the
|
||||
* doorbell register is toggled by issuing the dummy-read from the remote
|
||||
* LL memory in a hope that the MRd TLP will return only after the
|
||||
* last MWr TLP is completed
|
||||
*/
|
||||
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
|
||||
readl(chunk->ll_region.vaddr.io);
|
||||
}
|
||||
|
||||
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||
{
|
||||
struct dw_edma_chan *chan = chunk->chan;
|
||||
@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||
/* Interrupt enable&unmask - done, abort */
|
||||
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
|
||||
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
|
||||
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
|
||||
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
|
||||
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
|
||||
tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
|
||||
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
|
||||
/* Channel control */
|
||||
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
|
||||
@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||
/* Set consumer cycle */
|
||||
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
|
||||
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
|
||||
|
||||
dw_hdma_v0_sync_ll_data(chunk);
|
||||
|
||||
/* Doorbell */
|
||||
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
|
||||
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
|
||||
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
|
||||
#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
|
||||
#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
|
||||
#define HDMA_V0_ABORT_INT_MASK BIT(2)
|
||||
#define HDMA_V0_STOP_INT_MASK BIT(0)
|
||||
#define HDMA_V0_LINKLIST_EN BIT(0)
|
||||
|
@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
|
||||
if (fsl_chan->is_multi_fifo) {
|
||||
/* set mloff to support multiple fifo */
|
||||
burst = cfg->direction == DMA_DEV_TO_MEM ?
|
||||
cfg->src_addr_width : cfg->dst_addr_width;
|
||||
cfg->src_maxburst : cfg->dst_maxburst;
|
||||
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
|
||||
/* enable DMLOE/SMLOE */
|
||||
if (cfg->direction == DMA_MEM_TO_DEV) {
|
||||
|
@ -30,8 +30,9 @@
|
||||
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
|
||||
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
|
||||
|
||||
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
|
||||
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
|
||||
#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
|
||||
#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
|
||||
#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
|
||||
|
||||
#define EDMA_TCD_CSR_START BIT(0)
|
||||
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
|
||||
|
@ -10,6 +10,7 @@
|
||||
*/
|
||||
|
||||
#include <dt-bindings/dma/fsl-edma.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/clk.h>
|
||||
@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
DMAENGINE_ALIGN_32_BYTES;
|
||||
|
||||
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
|
||||
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
|
||||
dma_set_max_seg_size(fsl_edma->dma_dev.dev,
|
||||
FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
|
||||
|
||||
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||
|
||||
|
@ -109,6 +109,7 @@
|
||||
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
|
||||
#define FSL_QDMA_CMD_DSEN_OFFSET 19
|
||||
#define FSL_QDMA_CMD_LWC_OFFSET 16
|
||||
#define FSL_QDMA_CMD_PF BIT(17)
|
||||
|
||||
/* Field definition for Descriptor status */
|
||||
#define QDMA_CCDF_STATUS_RTE BIT(5)
|
||||
@ -160,6 +161,10 @@ struct fsl_qdma_format {
|
||||
u8 __reserved1[2];
|
||||
u8 cfg8b_w1;
|
||||
} __packed;
|
||||
struct {
|
||||
__le32 __reserved2;
|
||||
__le32 cmd;
|
||||
} __packed;
|
||||
__le64 data;
|
||||
};
|
||||
} __packed;
|
||||
@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
|
||||
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
|
||||
dma_addr_t dst, dma_addr_t src, u32 len)
|
||||
{
|
||||
u32 cmd;
|
||||
struct fsl_qdma_format *sdf, *ddf;
|
||||
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
|
||||
|
||||
@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
|
||||
/* This entry is the last entry. */
|
||||
qdma_csgf_set_f(csgf_dest, len);
|
||||
/* Descriptor Buffer */
|
||||
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
|
||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
||||
sdf->data = QDMA_SDDF_CMD(cmd);
|
||||
sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
|
||||
FSL_QDMA_CMD_PF);
|
||||
|
||||
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
|
||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
||||
cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
|
||||
ddf->data = QDMA_SDDF_CMD(cmd);
|
||||
ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
|
||||
(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -624,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
|
||||
|
||||
static int
|
||||
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
|
||||
void *block,
|
||||
__iomem void *block,
|
||||
int id)
|
||||
{
|
||||
bool duplicate;
|
||||
@ -1196,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
|
||||
if (!fsl_qdma->queue)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
|
||||
if (fsl_qdma->irq_base < 0)
|
||||
return fsl_qdma->irq_base;
|
||||
@ -1238,19 +1235,22 @@ static int fsl_qdma_probe(struct platform_device *pdev)
|
||||
|
||||
platform_set_drvdata(pdev, fsl_qdma);
|
||||
|
||||
ret = dma_async_device_register(&fsl_qdma->dma_dev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Can't register NXP Layerscape qDMA engine.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = fsl_qdma_reg_init(fsl_qdma);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = dma_async_device_register(&fsl_qdma->dma_dev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
|
||||
spin_lock(&evl->lock);
|
||||
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
t = status.tail;
|
||||
h = evl->head;
|
||||
h = status.head;
|
||||
size = evl->size;
|
||||
|
||||
while (h != t) {
|
||||
|
@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
|
||||
h = evl->head;
|
||||
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
t = evl_status.tail;
|
||||
h = evl_status.head;
|
||||
evl_size = evl->size;
|
||||
|
||||
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
|
||||
|
@ -300,7 +300,6 @@ struct idxd_evl {
|
||||
unsigned int log_size;
|
||||
/* The number of entries in the event log. */
|
||||
u16 size;
|
||||
u16 head;
|
||||
unsigned long *bmap;
|
||||
bool batch_fail[IDXD_MAX_BATCH_IDENT];
|
||||
};
|
||||
|
@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
|
||||
static int idxd_init_evl(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
unsigned int evl_cache_size;
|
||||
struct idxd_evl *evl;
|
||||
const char *idxd_name;
|
||||
|
||||
if (idxd->hw.gen_cap.evl_support == 0)
|
||||
return 0;
|
||||
@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
|
||||
spin_lock_init(&evl->lock);
|
||||
evl->size = IDXD_EVL_SIZE_MIN;
|
||||
|
||||
idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
|
||||
sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
|
||||
0, 0, NULL);
|
||||
idxd_name = dev_name(idxd_confdev(idxd));
|
||||
evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
|
||||
/*
|
||||
* Since completion record in evl_cache will be copied to user
|
||||
* when handling completion record page fault, need to create
|
||||
* the cache suitable for user copy.
|
||||
*/
|
||||
idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
|
||||
0, 0, 0, evl_cache_size,
|
||||
NULL);
|
||||
if (!idxd->evl_cache) {
|
||||
kfree(evl);
|
||||
return -ENOMEM;
|
||||
|
@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
|
||||
/* Clear interrupt pending bit */
|
||||
iowrite32(evl_status.bits_upper32,
|
||||
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
|
||||
h = evl->head;
|
||||
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
t = evl_status.tail;
|
||||
h = evl_status.head;
|
||||
size = idxd->evl->size;
|
||||
|
||||
while (h != t) {
|
||||
@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
|
||||
h = (h + 1) % size;
|
||||
}
|
||||
|
||||
evl->head = h;
|
||||
evl_status.head = h;
|
||||
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
spin_unlock(&evl->lock);
|
||||
|
@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
|
||||
chan->vc.desc_free = pt_do_cleanup;
|
||||
vchan_init(&chan->vc, dma_dev);
|
||||
|
||||
dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
|
||||
|
||||
ret = dma_async_device_register(dma_dev);
|
||||
if (ret)
|
||||
goto err_reg;
|
||||
|
Loading…
Reference in New Issue
Block a user