mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-12 00:00:00 +00:00
Merge branch 'dma_complete' into next
This commit is contained in:
commit
f0dad6e701
@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
|
|||||||
}
|
}
|
||||||
device->device_issue_pending(chan);
|
device->device_issue_pending(chan);
|
||||||
} else {
|
} else {
|
||||||
if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
|
if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
|
||||||
panic("%s: DMA error waiting for depend_tx\n",
|
panic("%s: DMA error waiting for depend_tx\n",
|
||||||
__func__);
|
__func__);
|
||||||
tx->tx_submit(tx);
|
tx->tx_submit(tx);
|
||||||
@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
|
|||||||
* we are referring to the correct operation
|
* we are referring to the correct operation
|
||||||
*/
|
*/
|
||||||
BUG_ON(async_tx_test_ack(*tx));
|
BUG_ON(async_tx_test_ack(*tx));
|
||||||
if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
|
if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
|
||||||
panic("%s: DMA error waiting for transaction\n",
|
panic("%s: DMA error waiting for transaction\n",
|
||||||
__func__);
|
__func__);
|
||||||
async_tx_ack(*tx);
|
async_tx_ack(*tx);
|
||||||
|
@ -1252,7 +1252,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
|
|||||||
size_t bytes = 0;
|
size_t bytes = 0;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1267,7 +1267,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
|
|||||||
|
|
||||||
spin_lock_irqsave(&plchan->vc.lock, flags);
|
spin_lock_irqsave(&plchan->vc.lock, flags);
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret != DMA_SUCCESS) {
|
if (ret != DMA_COMPLETE) {
|
||||||
vd = vchan_find_desc(&plchan->vc, cookie);
|
vd = vchan_find_desc(&plchan->vc, cookie);
|
||||||
if (vd) {
|
if (vd) {
|
||||||
/* On the issued list, so hasn't been processed yet */
|
/* On the issued list, so hasn't been processed yet */
|
||||||
|
@ -1102,7 +1102,7 @@ atc_tx_status(struct dma_chan *chan,
|
|||||||
int bytes = 0;
|
int bytes = 0;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
/*
|
/*
|
||||||
* There's no point calculating the residue if there's
|
* There's no point calculating the residue if there's
|
||||||
|
@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
|
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
|
||||||
|
@ -353,7 +353,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
|
|||||||
|
|
||||||
/* lock */
|
/* lock */
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (txstate && ret == DMA_SUCCESS)
|
if (txstate && ret == DMA_COMPLETE)
|
||||||
txstate->residue = c->residue;
|
txstate->residue = c->residue;
|
||||||
/* unlock */
|
/* unlock */
|
||||||
|
|
||||||
|
@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
status = dma_cookie_status(c, cookie, state);
|
status = dma_cookie_status(c, cookie, state);
|
||||||
if (status == DMA_SUCCESS || !state)
|
if (status == DMA_COMPLETE || !state)
|
||||||
return status;
|
return status;
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||||
|
@ -1062,7 +1062,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
|||||||
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
|
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
|
||||||
|
|
||||||
if (!tx)
|
if (!tx)
|
||||||
return DMA_SUCCESS;
|
return DMA_COMPLETE;
|
||||||
|
|
||||||
while (tx->cookie == -EBUSY) {
|
while (tx->cookie == -EBUSY) {
|
||||||
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
|
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
|
||||||
|
@ -740,7 +740,7 @@ static int dmatest_func(void *data)
|
|||||||
len, 0);
|
len, 0);
|
||||||
failed_tests++;
|
failed_tests++;
|
||||||
continue;
|
continue;
|
||||||
} else if (status != DMA_SUCCESS) {
|
} else if (status != DMA_COMPLETE) {
|
||||||
enum dmatest_error_type type = (status == DMA_ERROR) ?
|
enum dmatest_error_type type = (status == DMA_ERROR) ?
|
||||||
DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
|
DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
|
||||||
thread_result_add(info, result, type,
|
thread_result_add(info, result, type,
|
||||||
|
@ -1098,13 +1098,13 @@ dwc_tx_status(struct dma_chan *chan,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret != DMA_SUCCESS)
|
if (ret != DMA_COMPLETE)
|
||||||
dma_set_residue(txstate, dwc_get_residue(dwc));
|
dma_set_residue(txstate, dwc_get_residue(dwc));
|
||||||
|
|
||||||
if (dwc->paused && ret == DMA_IN_PROGRESS)
|
if (dwc->paused && ret == DMA_IN_PROGRESS)
|
||||||
|
@ -640,7 +640,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS || !txstate)
|
if (ret == DMA_COMPLETE || !txstate)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&echan->vchan.lock, flags);
|
spin_lock_irqsave(&echan->vchan.lock, flags);
|
||||||
|
@ -771,7 +771,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
desc->desc.tx_submit = imxdma_tx_submit;
|
desc->desc.tx_submit = imxdma_tx_submit;
|
||||||
/* txd.flags will be overwritten in prep funcs */
|
/* txd.flags will be overwritten in prep funcs */
|
||||||
desc->desc.flags = DMA_CTRL_ACK;
|
desc->desc.flags = DMA_CTRL_ACK;
|
||||||
desc->status = DMA_SUCCESS;
|
desc->status = DMA_COMPLETE;
|
||||||
|
|
||||||
list_add_tail(&desc->node, &imxdmac->ld_free);
|
list_add_tail(&desc->node, &imxdmac->ld_free);
|
||||||
imxdmac->descs_allocated++;
|
imxdmac->descs_allocated++;
|
||||||
|
@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
|
|||||||
if (error)
|
if (error)
|
||||||
sdmac->status = DMA_ERROR;
|
sdmac->status = DMA_ERROR;
|
||||||
else
|
else
|
||||||
sdmac->status = DMA_SUCCESS;
|
sdmac->status = DMA_COMPLETE;
|
||||||
|
|
||||||
dma_cookie_complete(&sdmac->desc);
|
dma_cookie_complete(&sdmac->desc);
|
||||||
if (sdmac->desc.callback)
|
if (sdmac->desc.callback)
|
||||||
|
@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
|
|||||||
callback_txd(param_txd);
|
callback_txd(param_txd);
|
||||||
}
|
}
|
||||||
if (midc->raw_tfr) {
|
if (midc->raw_tfr) {
|
||||||
desc->status = DMA_SUCCESS;
|
desc->status = DMA_COMPLETE;
|
||||||
if (desc->lli != NULL) {
|
if (desc->lli != NULL) {
|
||||||
pci_pool_free(desc->lli_pool, desc->lli,
|
pci_pool_free(desc->lli_pool, desc->lli,
|
||||||
desc->lli_phys);
|
desc->lli_phys);
|
||||||
@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret != DMA_SUCCESS) {
|
if (ret != DMA_COMPLETE) {
|
||||||
spin_lock_bh(&midc->lock);
|
spin_lock_bh(&midc->lock);
|
||||||
midc_scan_descriptors(to_middma_device(chan->device), midc);
|
midc_scan_descriptors(to_middma_device(chan->device), midc);
|
||||||
spin_unlock_bh(&midc->lock);
|
spin_unlock_bh(&midc->lock);
|
||||||
|
@ -733,7 +733,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(c, cookie, txstate);
|
ret = dma_cookie_status(c, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
device->cleanup_fn((unsigned long) c);
|
device->cleanup_fn((unsigned long) c);
|
||||||
@ -859,7 +859,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
|
|||||||
|
|
||||||
if (tmo == 0 ||
|
if (tmo == 0 ||
|
||||||
dma->device_tx_status(dma_chan, cookie, NULL)
|
dma->device_tx_status(dma_chan, cookie, NULL)
|
||||||
!= DMA_SUCCESS) {
|
!= DMA_COMPLETE) {
|
||||||
dev_err(dev, "Self-test copy timed out, disabling\n");
|
dev_err(dev, "Self-test copy timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto unmap_dma;
|
goto unmap_dma;
|
||||||
|
@ -807,7 +807,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(c, cookie, txstate);
|
ret = dma_cookie_status(c, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ioat3_cleanup(ioat);
|
ioat3_cleanup(ioat);
|
||||||
@ -1468,7 +1468,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
|
|
||||||
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
|
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
|
||||||
|
|
||||||
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
|
||||||
dev_err(dev, "Self-test xor timed out\n");
|
dev_err(dev, "Self-test xor timed out\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto dma_unmap;
|
goto dma_unmap;
|
||||||
@ -1530,7 +1530,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
|
|
||||||
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
|
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
|
||||||
|
|
||||||
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
|
||||||
dev_err(dev, "Self-test validate timed out\n");
|
dev_err(dev, "Self-test validate timed out\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto dma_unmap;
|
goto dma_unmap;
|
||||||
@ -1577,7 +1577,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
|
|||||||
|
|
||||||
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
|
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
|
||||||
|
|
||||||
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
|
||||||
dev_err(dev, "Self-test 2nd validate timed out\n");
|
dev_err(dev, "Self-test 2nd validate timed out\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto dma_unmap;
|
goto dma_unmap;
|
||||||
|
@ -864,7 +864,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
iop_adma_slot_cleanup(iop_chan);
|
iop_adma_slot_cleanup(iop_chan);
|
||||||
@ -983,7 +983,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
|
|||||||
msleep(1);
|
msleep(1);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_COMPLETE) {
|
||||||
dev_err(dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test copy timed out, disabling\n");
|
"Self-test copy timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@ -1083,7 +1083,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_COMPLETE) {
|
||||||
dev_err(dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test xor timed out, disabling\n");
|
"Self-test xor timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
iop_adma_issue_pending(dma_chan);
|
iop_adma_issue_pending(dma_chan);
|
||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
|
||||||
dev_err(dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test zero sum timed out, disabling\n");
|
"Self-test zero sum timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@ -1158,7 +1158,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
|
|||||||
iop_adma_issue_pending(dma_chan);
|
iop_adma_issue_pending(dma_chan);
|
||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
|
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
|
||||||
dev_err(dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test non-zero sum timed out, disabling\n");
|
"Self-test non-zero sum timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@ -1254,7 +1254,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
|
|||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_COMPLETE) {
|
||||||
dev_err(dev, "Self-test pq timed out, disabling\n");
|
dev_err(dev, "Self-test pq timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1291,7 +1291,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
|
|||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_COMPLETE) {
|
||||||
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
|
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
@ -1323,7 +1323,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
|
|||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
if (iop_adma_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_COMPLETE) {
|
||||||
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
|
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
|
@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
|
|||||||
size_t bytes = 0;
|
size_t bytes = 0;
|
||||||
|
|
||||||
ret = dma_cookie_status(&c->vc.chan, cookie, state);
|
ret = dma_cookie_status(&c->vc.chan, cookie, state);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&c->vc.lock, flags);
|
spin_lock_irqsave(&c->vc.lock, flags);
|
||||||
|
@ -163,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
|
|||||||
/* disable irq */
|
/* disable irq */
|
||||||
writel(0, tdmac->reg_base + TDIMR);
|
writel(0, tdmac->reg_base + TDIMR);
|
||||||
|
|
||||||
tdmac->status = DMA_SUCCESS;
|
tdmac->status = DMA_COMPLETE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
|
static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
|
||||||
@ -398,7 +398,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
|
|||||||
int num_periods = buf_len / period_len;
|
int num_periods = buf_len / period_len;
|
||||||
int i = 0, buf = 0;
|
int i = 0, buf = 0;
|
||||||
|
|
||||||
if (tdmac->status != DMA_SUCCESS)
|
if (tdmac->status != DMA_COMPLETE)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (period_len > TDMA_MAX_XFER_BYTES) {
|
if (period_len > TDMA_MAX_XFER_BYTES) {
|
||||||
@ -532,7 +532,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
|
|||||||
tdmac->idx = idx;
|
tdmac->idx = idx;
|
||||||
tdmac->type = type;
|
tdmac->type = type;
|
||||||
tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
|
tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
|
||||||
tdmac->status = DMA_SUCCESS;
|
tdmac->status = DMA_COMPLETE;
|
||||||
tdev->tdmac[tdmac->idx] = tdmac;
|
tdev->tdmac[tdmac->idx] = tdmac;
|
||||||
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
|
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
|
||||||
|
|
||||||
|
@ -749,7 +749,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS) {
|
if (ret == DMA_COMPLETE) {
|
||||||
mv_xor_clean_completed_slots(mv_chan);
|
mv_xor_clean_completed_slots(mv_chan);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -874,7 +874,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
|||||||
msleep(1);
|
msleep(1);
|
||||||
|
|
||||||
if (mv_xor_status(dma_chan, cookie, NULL) !=
|
if (mv_xor_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_COMPLETE) {
|
||||||
dev_err(dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test copy timed out, disabling\n");
|
"Self-test copy timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@ -968,7 +968,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
|||||||
msleep(8);
|
msleep(8);
|
||||||
|
|
||||||
if (mv_xor_status(dma_chan, cookie, NULL) !=
|
if (mv_xor_status(dma_chan, cookie, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_COMPLETE) {
|
||||||
dev_err(dma_chan->device->dev,
|
dev_err(dma_chan->device->dev,
|
||||||
"Self-test xor timed out, disabling\n");
|
"Self-test xor timed out, disabling\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
|
@ -224,7 +224,7 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
|
|||||||
|
|
||||||
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
|
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
|
||||||
{
|
{
|
||||||
mxs_chan->status = DMA_SUCCESS;
|
mxs_chan->status = DMA_COMPLETE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
|
static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
|
||||||
@ -312,12 +312,12 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
|
|||||||
if (mxs_chan->flags & MXS_DMA_SG_LOOP)
|
if (mxs_chan->flags & MXS_DMA_SG_LOOP)
|
||||||
mxs_chan->status = DMA_IN_PROGRESS;
|
mxs_chan->status = DMA_IN_PROGRESS;
|
||||||
else
|
else
|
||||||
mxs_chan->status = DMA_SUCCESS;
|
mxs_chan->status = DMA_COMPLETE;
|
||||||
}
|
}
|
||||||
|
|
||||||
stat1 &= ~(1 << channel);
|
stat1 &= ~(1 << channel);
|
||||||
|
|
||||||
if (mxs_chan->status == DMA_SUCCESS)
|
if (mxs_chan->status == DMA_COMPLETE)
|
||||||
dma_cookie_complete(&mxs_chan->desc);
|
dma_cookie_complete(&mxs_chan->desc);
|
||||||
|
|
||||||
/* schedule tasklet on this channel */
|
/* schedule tasklet on this channel */
|
||||||
|
@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS || !txstate)
|
if (ret == DMA_COMPLETE || !txstate)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&c->vc.lock, flags);
|
spin_lock_irqsave(&c->vc.lock, flags);
|
||||||
|
@ -3891,7 +3891,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
|
|||||||
|
|
||||||
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
|
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ppc440spe_adma_slot_cleanup(ppc440spe_chan);
|
ppc440spe_adma_slot_cleanup(ppc440spe_chan);
|
||||||
|
@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(&c->vc.chan, cookie, state);
|
ret = dma_cookie_status(&c->vc.chan, cookie, state);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!state)
|
if (!state)
|
||||||
|
@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
|
|||||||
* If we don't find cookie on the queue, it has been aborted and we have
|
* If we don't find cookie on the queue, it has been aborted and we have
|
||||||
* to report error
|
* to report error
|
||||||
*/
|
*/
|
||||||
if (status != DMA_SUCCESS) {
|
if (status != DMA_COMPLETE) {
|
||||||
struct shdma_desc *sdesc;
|
struct shdma_desc *sdesc;
|
||||||
status = DMA_ERROR;
|
status = DMA_ERROR;
|
||||||
list_for_each_entry(sdesc, &schan->ld_queue, node)
|
list_for_each_entry(sdesc, &schan->ld_queue, node)
|
||||||
|
@ -2627,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret != DMA_SUCCESS)
|
if (ret != DMA_COMPLETE)
|
||||||
dma_set_residue(txstate, stedma40_residue(chan));
|
dma_set_residue(txstate, stedma40_residue(chan));
|
||||||
|
|
||||||
if (d40_is_paused(d40c))
|
if (d40_is_paused(d40c))
|
||||||
|
@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
|
|||||||
|
|
||||||
list_del(&sgreq->node);
|
list_del(&sgreq->node);
|
||||||
if (sgreq->last_sg) {
|
if (sgreq->last_sg) {
|
||||||
dma_desc->dma_status = DMA_SUCCESS;
|
dma_desc->dma_status = DMA_COMPLETE;
|
||||||
dma_cookie_complete(&dma_desc->txd);
|
dma_cookie_complete(&dma_desc->txd);
|
||||||
if (!dma_desc->cb_count)
|
if (!dma_desc->cb_count)
|
||||||
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
|
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
|
||||||
@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
|
|||||||
unsigned int residual;
|
unsigned int residual;
|
||||||
|
|
||||||
ret = dma_cookie_status(dc, cookie, txstate);
|
ret = dma_cookie_status(dc, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&tdc->lock, flags);
|
spin_lock_irqsave(&tdc->lock, flags);
|
||||||
|
@ -962,8 +962,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||||||
enum dma_status ret;
|
enum dma_status ret;
|
||||||
|
|
||||||
ret = dma_cookie_status(chan, cookie, txstate);
|
ret = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (ret == DMA_SUCCESS)
|
if (ret == DMA_COMPLETE)
|
||||||
return DMA_SUCCESS;
|
return DMA_COMPLETE;
|
||||||
|
|
||||||
spin_lock_bh(&dc->lock);
|
spin_lock_bh(&dc->lock);
|
||||||
txx9dmac_scan_descriptors(dc);
|
txx9dmac_scan_descriptors(dc);
|
||||||
|
@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work)
|
|||||||
desc = s->desc_rx[new];
|
desc = s->desc_rx[new];
|
||||||
|
|
||||||
if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
|
if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
|
||||||
DMA_SUCCESS) {
|
DMA_COMPLETE) {
|
||||||
/* Handle incomplete DMA receive */
|
/* Handle incomplete DMA receive */
|
||||||
struct dma_chan *chan = s->chan_rx;
|
struct dma_chan *chan = s->chan_rx;
|
||||||
struct shdma_desc *sh_desc = container_of(desc,
|
struct shdma_desc *sh_desc = container_of(desc,
|
||||||
|
@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* enum dma_status - DMA transaction status
|
* enum dma_status - DMA transaction status
|
||||||
* @DMA_SUCCESS: transaction completed successfully
|
* @DMA_COMPLETE: transaction completed
|
||||||
* @DMA_IN_PROGRESS: transaction not yet processed
|
* @DMA_IN_PROGRESS: transaction not yet processed
|
||||||
* @DMA_PAUSED: transaction is paused
|
* @DMA_PAUSED: transaction is paused
|
||||||
* @DMA_ERROR: transaction failed
|
* @DMA_ERROR: transaction failed
|
||||||
*/
|
*/
|
||||||
enum dma_status {
|
enum dma_status {
|
||||||
DMA_SUCCESS,
|
DMA_COMPLETE,
|
||||||
DMA_IN_PROGRESS,
|
DMA_IN_PROGRESS,
|
||||||
DMA_PAUSED,
|
DMA_PAUSED,
|
||||||
DMA_ERROR,
|
DMA_ERROR,
|
||||||
@ -979,10 +979,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
|
|||||||
{
|
{
|
||||||
if (last_complete <= last_used) {
|
if (last_complete <= last_used) {
|
||||||
if ((cookie <= last_complete) || (cookie > last_used))
|
if ((cookie <= last_complete) || (cookie > last_used))
|
||||||
return DMA_SUCCESS;
|
return DMA_COMPLETE;
|
||||||
} else {
|
} else {
|
||||||
if ((cookie <= last_complete) && (cookie > last_used))
|
if ((cookie <= last_complete) && (cookie > last_used))
|
||||||
return DMA_SUCCESS;
|
return DMA_COMPLETE;
|
||||||
}
|
}
|
||||||
return DMA_IN_PROGRESS;
|
return DMA_IN_PROGRESS;
|
||||||
}
|
}
|
||||||
@ -1013,11 +1013,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
|
|||||||
}
|
}
|
||||||
static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
|
static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
|
||||||
{
|
{
|
||||||
return DMA_SUCCESS;
|
return DMA_COMPLETE;
|
||||||
}
|
}
|
||||||
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
||||||
{
|
{
|
||||||
return DMA_SUCCESS;
|
return DMA_COMPLETE;
|
||||||
}
|
}
|
||||||
static inline void dma_issue_pending_all(void)
|
static inline void dma_issue_pending_all(void)
|
||||||
{
|
{
|
||||||
|
@ -1429,7 +1429,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
|
|||||||
do {
|
do {
|
||||||
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
|
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
|
||||||
last_issued, &done,
|
last_issued, &done,
|
||||||
&used) == DMA_SUCCESS) {
|
&used) == DMA_COMPLETE) {
|
||||||
/* Safe to free early-copied skbs now */
|
/* Safe to free early-copied skbs now */
|
||||||
__skb_queue_purge(&sk->sk_async_wait_queue);
|
__skb_queue_purge(&sk->sk_async_wait_queue);
|
||||||
break;
|
break;
|
||||||
@ -1437,7 +1437,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
|
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
|
||||||
(dma_async_is_complete(skb->dma_cookie, done,
|
(dma_async_is_complete(skb->dma_cookie, done,
|
||||||
used) == DMA_SUCCESS)) {
|
used) == DMA_COMPLETE)) {
|
||||||
__skb_dequeue(&sk->sk_async_wait_queue);
|
__skb_dequeue(&sk->sk_async_wait_queue);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user