mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 02:36:02 +00:00
dmaengine: Fix spelling mistakes
Correct spelling mistakes in the DMA engine to improve readability and clarity without altering functionality. Signed-off-by: Amit Vadhavana <av2082000@gmail.com> Reviewed-by: Kees Cook <kees@kernel.org> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Fenghua Yu <fenghua.yu@intel.com> Link: https://lore.kernel.org/r/20240831172949.13189-1-av2082000@gmail.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
5d318b5959
commit
a688efea0f
@ -112,7 +112,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
|
||||
* acpi_dma_parse_csrt - parse CSRT to extract additional DMA resources
|
||||
* @adev: ACPI device to match with
|
||||
* @adma: struct acpi_dma of the given DMA controller
|
||||
*
|
||||
@ -305,7 +305,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
|
||||
* found.
|
||||
*
|
||||
* Return:
|
||||
* 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
|
||||
* 0, if no information is available, -1 on mismatch, and 1 otherwise.
|
||||
*/
|
||||
static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
|
||||
struct acpi_dma_spec *dma_spec)
|
||||
|
@ -153,7 +153,7 @@ struct msgdma_extended_desc {
|
||||
/**
|
||||
* struct msgdma_sw_desc - implements a sw descriptor
|
||||
* @async_tx: support for the async_tx api
|
||||
* @hw_desc: assosiated HW descriptor
|
||||
* @hw_desc: associated HW descriptor
|
||||
* @node: node to move from the free list to the tx list
|
||||
* @tx_list: transmit list node
|
||||
*/
|
||||
@ -511,7 +511,7 @@ static void msgdma_copy_one(struct msgdma_device *mdev,
|
||||
* of the DMA controller. The descriptor will get flushed to the
|
||||
* FIFO, once the last word (control word) is written. Since we
|
||||
* are not 100% sure that memcpy() writes all word in the "correct"
|
||||
* oder (address from low to high) on all architectures, we make
|
||||
* order (address from low to high) on all architectures, we make
|
||||
* sure this control word is written last by single coding it and
|
||||
* adding some write-barriers here.
|
||||
*/
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Copyright (c) 2006 ARM Ltd.
|
||||
* Copyright (c) 2010 ST-Ericsson SA
|
||||
* Copyirght (c) 2017 Linaro Ltd.
|
||||
* Copyright (c) 2017 Linaro Ltd.
|
||||
*
|
||||
* Author: Peter Pearse <peter.pearse@arm.com>
|
||||
* Author: Linus Walleij <linus.walleij@linaro.org>
|
||||
|
@ -339,7 +339,7 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
|
||||
* @regs: memory mapped register base
|
||||
* @clk: dma controller clock
|
||||
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
|
||||
* @all_chan_mask: all channels availlable in a mask
|
||||
* @all_chan_mask: all channels available in a mask
|
||||
* @lli_pool: hw lli table
|
||||
* @memset_pool: hw memset pool
|
||||
* @chan: channels table to store at_dma_chan structures
|
||||
@ -668,7 +668,7 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
|
||||
* CTRLA is read in turn, next the DSCR is read a second time. If the two
|
||||
* consecutive read values of the DSCR are the same then we assume both refers
|
||||
* to the very same LLI as well as the CTRLA value read inbetween does. For
|
||||
* cyclic tranfers, the assumption is that a full loop is "not so fast". If the
|
||||
* cyclic transfers, the assumption is that a full loop is "not so fast". If the
|
||||
* two DSCR values are different, we read again the CTRLA then the DSCR till two
|
||||
* consecutive read values from DSCR are equal or till the maximum trials is
|
||||
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
|
||||
@ -700,7 +700,7 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
|
||||
break;
|
||||
|
||||
/*
|
||||
* DSCR has changed inside the DMA controller, so the previouly
|
||||
* DSCR has changed inside the DMA controller, so the previously
|
||||
* read value of CTRLA may refer to an already processed
|
||||
* descriptor hence could be outdated. We need to update ctrla
|
||||
* to match the current descriptor.
|
||||
|
@ -15,7 +15,7 @@
|
||||
* number of hardware rings over one or more SBA hardware devices. By
|
||||
* design, the internal buffer size of SBA hardware device is limited
|
||||
* but all offload operations supported by SBA can be broken down into
|
||||
* multiple small size requests and executed parallely on multiple SBA
|
||||
* multiple small size requests and executed parallelly on multiple SBA
|
||||
* hardware devices for achieving high through-put.
|
||||
*
|
||||
* The Broadcom SBA RAID driver does not require any register programming
|
||||
@ -135,7 +135,7 @@ struct sba_device {
|
||||
u32 max_xor_srcs;
|
||||
u32 max_resp_pool_size;
|
||||
u32 max_cmds_pool_size;
|
||||
/* Maibox client and Mailbox channels */
|
||||
/* Mailbox client and Mailbox channels */
|
||||
struct mbox_client client;
|
||||
struct mbox_chan *mchan;
|
||||
struct device *mbox_dev;
|
||||
|
@ -369,7 +369,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
|
||||
/* the last frame requires extra flags */
|
||||
d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
|
||||
|
||||
/* detect a size missmatch */
|
||||
/* detect a size mismatch */
|
||||
if (buf_len && (d->size != buf_len))
|
||||
goto error_cb;
|
||||
|
||||
|
@ -841,7 +841,7 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
desc = container_of(tx, struct ep93xx_dma_desc, txd);
|
||||
|
||||
/*
|
||||
* If nothing is currently prosessed, we push this descriptor
|
||||
* If nothing is currently processed, we push this descriptor
|
||||
* directly to the hardware. Otherwise we put the descriptor
|
||||
* to the pending queue.
|
||||
*/
|
||||
@ -1025,7 +1025,7 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
|
||||
* @chan: channel
|
||||
* @sgl: list of buffers to transfer
|
||||
* @sg_len: number of entries in @sgl
|
||||
* @dir: direction of tha DMA transfer
|
||||
* @dir: direction of the DMA transfer
|
||||
* @flags: flags for the descriptor
|
||||
* @context: operation context (ignored)
|
||||
*
|
||||
|
@ -12,8 +12,8 @@ struct dpaa2_qdma_sd_d {
|
||||
u32 rsv:32;
|
||||
union {
|
||||
struct {
|
||||
u32 ssd:12; /* souce stride distance */
|
||||
u32 sss:12; /* souce stride size */
|
||||
u32 ssd:12; /* source stride distance */
|
||||
u32 sss:12; /* source stride size */
|
||||
u32 rsv1:8;
|
||||
} sdf;
|
||||
struct {
|
||||
@ -48,7 +48,7 @@ struct dpaa2_qdma_sd_d {
|
||||
#define QDMA_SER_DISABLE (8) /* no notification */
|
||||
#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
|
||||
#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
|
||||
#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
|
||||
#define QDMA_SER_BOTH (3 << 8) /* source and dest notification */
|
||||
#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
|
||||
|
||||
#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
|
||||
|
@ -677,7 +677,7 @@ static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
|
||||
writel_relaxed(tmp, addr);
|
||||
|
||||
/*
|
||||
* 0 - dma should process FLR whith CPU.
|
||||
* 0 - dma should process FLR with CPU.
|
||||
* 1 - dma not process FLR, only cpu process FLR.
|
||||
*/
|
||||
addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE +
|
||||
|
@ -290,7 +290,7 @@ static void idma64_desc_fill(struct idma64_chan *idma64c,
|
||||
desc->length += hw->len;
|
||||
} while (i);
|
||||
|
||||
/* Trigger an interrupt after the last block is transfered */
|
||||
/* Trigger an interrupt after the last block is transferred */
|
||||
lli->ctllo |= IDMA64C_CTLL_INT_EN;
|
||||
|
||||
/* Disable LLP transfer in the last block */
|
||||
@ -364,7 +364,7 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
|
||||
if (!i)
|
||||
return bytes;
|
||||
|
||||
/* The current chunk is not fully transfered yet */
|
||||
/* The current chunk is not fully transferred yet */
|
||||
bytes += desc->hw[--i].len;
|
||||
|
||||
return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
|
||||
|
@ -134,7 +134,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
|
||||
* completing the descriptor will return desc to allocator and
|
||||
* the desc can be acquired by a different process and the
|
||||
* desc->list can be modified. Delete desc from list so the
|
||||
* list trasversing does not get corrupted by the other process.
|
||||
* list traversing does not get corrupted by the other process.
|
||||
*/
|
||||
list_for_each_entry_safe(d, t, &flist, list) {
|
||||
list_del_init(&d->list);
|
||||
|
@ -905,7 +905,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
|
||||
|
||||
op = IOAT_OP_XOR_VAL;
|
||||
|
||||
/* validate the sources with the destintation page */
|
||||
/* validate the sources with the destination page */
|
||||
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
||||
xor_val_srcs[i] = xor_srcs[i];
|
||||
xor_val_srcs[i] = dest;
|
||||
|
@ -107,7 +107,7 @@
|
||||
* If header mode is set in DMA descriptor,
|
||||
* If bit 30 is disabled, HDR_LEN must be configured according to channel
|
||||
* requirement.
|
||||
* If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to
|
||||
* If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to
|
||||
* be configured. It will enable check sum for switch
|
||||
* If header mode is not set in DMA descriptor,
|
||||
* This register setting doesn't matter
|
||||
|
@ -33,11 +33,11 @@
|
||||
#define LDMA_STOP BIT(4) /* DMA stop operation */
|
||||
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
|
||||
|
||||
/* Bitfields in ndesc_addr field of HW decriptor */
|
||||
/* Bitfields in ndesc_addr field of HW descriptor */
|
||||
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
|
||||
#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
|
||||
|
||||
/* Bitfields in cmd field of HW decriptor */
|
||||
/* Bitfields in cmd field of HW descriptor */
|
||||
#define LDMA_INT BIT(1) /* Enable DMA interrupts */
|
||||
#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
|
||||
|
||||
|
@ -518,7 +518,7 @@ mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
|
||||
/* setup dma channel */
|
||||
cvd[i]->ch = c;
|
||||
|
||||
/* setup sourece, destination, and length */
|
||||
/* setup source, destination, and length */
|
||||
tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len;
|
||||
cvd[i]->len = tlen;
|
||||
cvd[i]->src = src;
|
||||
@ -617,7 +617,7 @@ static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c)
|
||||
u32 i, min_refcnt = U32_MAX, refcnt;
|
||||
unsigned long flags;
|
||||
|
||||
/* allocate PC with the minimun refcount */
|
||||
/* allocate PC with the minimum refcount */
|
||||
for (i = 0; i < cqdma->dma_channels; ++i) {
|
||||
refcnt = refcount_read(&cqdma->pc[i]->refcnt);
|
||||
if (refcnt < min_refcnt) {
|
||||
|
@ -226,7 +226,7 @@ struct mtk_hsdma_soc {
|
||||
* @pc_refcnt: Track how many VCs are using the PC
|
||||
* @lock: Lock protect agaisting multiple VCs access PC
|
||||
* @soc: The pointer to area holding differences among
|
||||
* vaious platform
|
||||
* various platform
|
||||
*/
|
||||
struct mtk_hsdma_device {
|
||||
struct dma_device ddev;
|
||||
|
@ -414,7 +414,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
if (!mv_chan_is_busy(mv_chan)) {
|
||||
u32 current_desc = mv_chan_get_current_desc(mv_chan);
|
||||
/*
|
||||
* and the curren desc is the end of the chain before
|
||||
* and the current desc is the end of the chain before
|
||||
* the append, then we need to start the channel
|
||||
*/
|
||||
if (current_desc == old_chain_tail->async_tx.phys)
|
||||
@ -1074,7 +1074,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
||||
if (!mv_chan->dma_desc_pool_virt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* discover transaction capabilites from the platform data */
|
||||
/* discover transaction capabilities from the platform data */
|
||||
dma_dev->cap_mask = cap_mask;
|
||||
|
||||
INIT_LIST_HEAD(&dma_dev->channels);
|
||||
|
@ -99,7 +99,7 @@ struct mv_xor_device {
|
||||
* @common: common dmaengine channel object members
|
||||
* @slots_allocated: records the actual size of the descriptor slot pool
|
||||
* @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
|
||||
* @op_in_desc: new mode of driver, each op is writen to descriptor.
|
||||
* @op_in_desc: new mode of driver, each op is written to descriptor.
|
||||
*/
|
||||
struct mv_xor_chan {
|
||||
int pending;
|
||||
|
@ -175,7 +175,7 @@ struct mv_xor_v2_device {
|
||||
* struct mv_xor_v2_sw_desc - implements a xor SW descriptor
|
||||
* @idx: descriptor index
|
||||
* @async_tx: support for the async_tx api
|
||||
* @hw_desc: assosiated HW descriptor
|
||||
* @hw_desc: associated HW descriptor
|
||||
* @free_list: node of the free SW descriprots list
|
||||
*/
|
||||
struct mv_xor_v2_sw_desc {
|
||||
|
@ -897,7 +897,7 @@ static int nbpf_config(struct dma_chan *dchan,
|
||||
/*
|
||||
* We could check config->slave_id to match chan->terminal here,
|
||||
* but with DT they would be coming from the same source, so
|
||||
* such a check would be superflous
|
||||
* such a check would be superfluous
|
||||
*/
|
||||
|
||||
chan->slave_dst_addr = config->dst_addr;
|
||||
|
@ -26,7 +26,7 @@ static DEFINE_MUTEX(of_dma_lock);
|
||||
*
|
||||
* Finds a DMA controller with matching device node and number for dma cells
|
||||
* in a list of registered DMA controllers. If a match is found a valid pointer
|
||||
* to the DMA data stored is retuned. A NULL pointer is returned if no match is
|
||||
* to the DMA data stored is returned. A NULL pointer is returned if no match is
|
||||
* found.
|
||||
*/
|
||||
static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec)
|
||||
@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
|
||||
*
|
||||
* This function can be used as the of xlate callback for DMA driver which wants
|
||||
* to match the channel based on the channel id. When using this xlate function
|
||||
* the #dma-cells propety of the DMA controller dt node needs to be set to 1.
|
||||
* the #dma-cells property of the DMA controller dt node needs to be set to 1.
|
||||
* The data parameter of of_dma_controller_register must be a pointer to the
|
||||
* dma_device struct the function should match upon.
|
||||
*
|
||||
|
@ -1156,7 +1156,7 @@ static int owl_dma_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
/*
|
||||
* Eventhough the DMA controller is capable of generating 4
|
||||
* Even though the DMA controller is capable of generating 4
|
||||
* IRQ's for DMA priority feature, we only use 1 IRQ for
|
||||
* simplification.
|
||||
*/
|
||||
|
@ -9,7 +9,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* This driver supports the asynchrounous DMA copy and RAID engines available
|
||||
* This driver supports the asynchronous DMA copy and RAID engines available
|
||||
* on the AMCC PPC440SPe Processors.
|
||||
* Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
|
||||
* ADMA driver written by D.Williams.
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
/* Number of elements in the array with statical CDBs */
|
||||
#define MAX_STAT_DMA_CDBS 16
|
||||
/* Number of DMA engines available on the contoller */
|
||||
/* Number of DMA engines available on the controller */
|
||||
#define DMA_ENGINES_NUM 2
|
||||
|
||||
/* Maximum h/w supported number of destinations */
|
||||
|
@ -192,7 +192,7 @@ struct pt_cmd_queue {
|
||||
/* Queue dma pool */
|
||||
struct dma_pool *dma_pool;
|
||||
|
||||
/* Queue base address (not neccessarily aligned)*/
|
||||
/* Queue base address (not necessarily aligned)*/
|
||||
struct ptdma_desc *qbase;
|
||||
|
||||
/* Aligned queue start address (per requirement) */
|
||||
|
@ -440,7 +440,7 @@ static void bam_reset(struct bam_device *bdev)
|
||||
val |= BAM_EN;
|
||||
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||
|
||||
/* set descriptor threshhold, start with 4 bytes */
|
||||
/* set descriptor threshold, start with 4 bytes */
|
||||
writel_relaxed(DEFAULT_CNT_THRSHLD,
|
||||
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
||||
|
||||
@ -667,7 +667,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
|
||||
for_each_sg(sgl, sg, sg_len, i)
|
||||
num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
|
||||
|
||||
/* allocate enough room to accomodate the number of entries */
|
||||
/* allocate enough room to accommodate the number of entries */
|
||||
async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
|
||||
GFP_NOWAIT);
|
||||
|
||||
|
@ -1856,7 +1856,7 @@ static void gpi_issue_pending(struct dma_chan *chan)
|
||||
|
||||
read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
|
||||
|
||||
/* move all submitted discriptors to issued list */
|
||||
/* move all submitted descriptors to issued list */
|
||||
spin_lock_irqsave(&gchan->vc.lock, flags);
|
||||
if (vchan_issue_pending(&gchan->vc))
|
||||
vd = list_last_entry(&gchan->vc.desc_issued,
|
||||
|
@ -650,7 +650,7 @@ static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||||
/*
|
||||
* residue is either the full length if it is in the issued list, or 0
|
||||
* if it is in progress. We have no reliable way of determining
|
||||
* anything inbetween
|
||||
* anything in between
|
||||
*/
|
||||
dma_set_residue(txstate, residue);
|
||||
|
||||
|
@ -318,7 +318,7 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan,
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a slave channel configuration from the contoller list by either a slave
|
||||
* Find a slave channel configuration from the controller list by either a slave
|
||||
* ID in the non-DT case, or by a MID/RID value in the DT case
|
||||
*/
|
||||
static const struct sh_dmae_slave_config *dmae_find_slave(
|
||||
|
@ -4,7 +4,7 @@
|
||||
#define STE_DMA40_H
|
||||
|
||||
/*
|
||||
* Maxium size for a single dma descriptor
|
||||
* Maximum size for a single dma descriptor
|
||||
* Size is limited to 16 bits.
|
||||
* Size is in the units of addr-widths (1,2,4,8 bytes)
|
||||
* Larger transfers will be split up to multiple linked desc
|
||||
|
@ -369,7 +369,7 @@ struct d40_phy_lli_bidir {
|
||||
* @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
|
||||
* @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
|
||||
*
|
||||
* This struct must be 8 bytes aligned since it will be accessed directy by
|
||||
* This struct must be 8 bytes aligned since it will be accessed directly by
|
||||
* the DMA. Never add any none hw mapped registers to this struct.
|
||||
*/
|
||||
|
||||
|
@ -463,7 +463,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
|
||||
|
||||
/*
|
||||
* If interrupt is pending then do nothing as the ISR will handle
|
||||
* the programing for new request.
|
||||
* the programming for new request.
|
||||
*/
|
||||
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
|
||||
dev_err(tdc2dev(tdc),
|
||||
|
@ -1742,7 +1742,7 @@ static int xgene_dma_probe(struct platform_device *pdev)
|
||||
/* Initialize DMA channels software state */
|
||||
xgene_dma_init_channels(pdma);
|
||||
|
||||
/* Configue DMA rings */
|
||||
/* Configure DMA rings */
|
||||
ret = xgene_dma_init_rings(pdma);
|
||||
if (ret)
|
||||
goto err_clk_enable;
|
||||
|
@ -149,7 +149,7 @@ struct xilinx_dpdma_chan;
|
||||
* @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
|
||||
* @next_desc: next descriptor 32 bit address
|
||||
* @src_addr: payload source address (1st page, 32 LSB)
|
||||
* @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
|
||||
* @addr_ext_23: payload source address (2nd and 3rd pages, 16 LSBs)
|
||||
* @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
|
||||
* @src_addr2: payload source address (2nd page, 32 LSB)
|
||||
* @src_addr3: payload source address (3rd page, 32 LSB)
|
||||
@ -210,7 +210,7 @@ struct xilinx_dpdma_tx_desc {
|
||||
* @vchan: virtual DMA channel
|
||||
* @reg: register base address
|
||||
* @id: channel ID
|
||||
* @wait_to_stop: queue to wait for outstanding transacitons before stopping
|
||||
* @wait_to_stop: queue to wait for outstanding transactions before stopping
|
||||
* @running: true if the channel is running
|
||||
* @first_frame: flag for the first frame of stream
|
||||
* @video_group: flag if multi-channel operation is needed for video channels
|
||||
|
Loading…
Reference in New Issue
Block a user