mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 06:33:34 +00:00
1788cf6a91
Switch from struct circ_buf to proper kfifo. kfifo provides much better API, esp. when wrap-around of the buffer needs to be taken into account. Look at pl011_dma_tx_refill() or cpm_uart_tx_pump() changes for example. Kfifo API can also fill in scatter-gather DMA structures, so it easier for that use case too. Look at lpuart_dma_tx() for example. Note that not all drivers can be converted to that (like atmel_serial), they handle DMA specially. Note that usb-serial uses kfifo for TX for ages. omap needed a bit more care as it needs to put a char into FIFO to start the DMA transfer when OMAP_DMA_TX_KICK is set. In that case, we have to do kfifo_dma_out_prepare twice: once to find out the tx_size (to find out if it is worths to do DMA at all -- size >= 4), the second time for the actual transfer. All traces of circ_buf are removed from serial_core.h (and its struct uart_state). Signed-off-by: Jiri Slaby (SUSE) <jirislaby@kernel.org> Cc: Al Cooper <alcooperx@gmail.com> Cc: Matthias Brugger <matthias.bgg@gmail.com> Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> Cc: Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com> Cc: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Richard Genoud <richard.genoud@gmail.com> Cc: Nicolas Ferre <nicolas.ferre@microchip.com> Cc: Alexandre Belloni <alexandre.belloni@bootlin.com> Cc: Claudiu Beznea <claudiu.beznea@tuxon.dev> Cc: Alexander Shiyan <shc_work@mail.ru> Cc: Baruch Siach <baruch@tkos.co.il> Cc: Maciej W. Rozycki <macro@orcam.me.uk> Cc: Shawn Guo <shawnguo@kernel.org> Cc: Sascha Hauer <s.hauer@pengutronix.de> Cc: Fabio Estevam <festevam@gmail.com> Cc: Neil Armstrong <neil.armstrong@linaro.org> Cc: Kevin Hilman <khilman@baylibre.com> Cc: Jerome Brunet <jbrunet@baylibre.com> Cc: Martin Blumenstingl <martin.blumenstingl@googlemail.com> Cc: Taichi Sugaya <sugaya.taichi@socionext.com> Cc: Takao Orito <orito.takao@socionext.com> Cc: Bjorn Andersson <andersson@kernel.org> Cc: Konrad Dybcio <konrad.dybcio@linaro.org> Cc: Pali Rohár <pali@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com> Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Cc: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org> Cc: Alim Akhtar <alim.akhtar@samsung.com> Cc: Laxman Dewangan <ldewangan@nvidia.com> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Jonathan Hunter <jonathanh@nvidia.com> Cc: Orson Zhai <orsonzhai@gmail.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Chunyan Zhang <zhang.lyra@gmail.com> Cc: Patrice Chotard <patrice.chotard@foss.st.com> Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com> Cc: Alexandre Torgue <alexandre.torgue@foss.st.com> Cc: David S. Miller <davem@davemloft.net> Cc: Hammer Hsieh <hammerh0314@gmail.com> Cc: Peter Korsgaard <jacmet@sunsite.dk> Cc: Timur Tabi <timur@kernel.org> Cc: Michal Simek <michal.simek@amd.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20240405060826.2521-13-jirislaby@kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
311 lines
7.4 KiB
C
311 lines
7.4 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* 8250_dma.c - DMA Engine API support for 8250.c
|
|
*
|
|
* Copyright (C) 2013 Intel Corporation
|
|
*/
|
|
#include <linux/tty.h>
|
|
#include <linux/tty_flip.h>
|
|
#include <linux/serial_reg.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include "8250.h"
|
|
|
|
static void __dma_tx_complete(void *param)
|
|
{
|
|
struct uart_8250_port *p = param;
|
|
struct uart_8250_dma *dma = p->dma;
|
|
struct tty_port *tport = &p->port.state->port;
|
|
unsigned long flags;
|
|
int ret;
|
|
|
|
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
|
|
UART_XMIT_SIZE, DMA_TO_DEVICE);
|
|
|
|
uart_port_lock_irqsave(&p->port, &flags);
|
|
|
|
dma->tx_running = 0;
|
|
|
|
uart_xmit_advance(&p->port, dma->tx_size);
|
|
|
|
if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
|
|
uart_write_wakeup(&p->port);
|
|
|
|
ret = serial8250_tx_dma(p);
|
|
if (ret || !dma->tx_running)
|
|
serial8250_set_THRI(p);
|
|
|
|
uart_port_unlock_irqrestore(&p->port, flags);
|
|
}
|
|
|
|
static void __dma_rx_complete(struct uart_8250_port *p)
|
|
{
|
|
struct uart_8250_dma *dma = p->dma;
|
|
struct tty_port *tty_port = &p->port.state->port;
|
|
struct dma_tx_state state;
|
|
enum dma_status dma_status;
|
|
int count;
|
|
|
|
/*
|
|
* New DMA Rx can be started during the completion handler before it
|
|
* could acquire port's lock and it might still be ongoing. Don't to
|
|
* anything in such case.
|
|
*/
|
|
dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
|
|
if (dma_status == DMA_IN_PROGRESS)
|
|
return;
|
|
|
|
count = dma->rx_size - state.residue;
|
|
|
|
tty_insert_flip_string(tty_port, dma->rx_buf, count);
|
|
p->port.icount.rx += count;
|
|
dma->rx_running = 0;
|
|
|
|
tty_flip_buffer_push(tty_port);
|
|
}
|
|
|
|
static void dma_rx_complete(void *param)
|
|
{
|
|
struct uart_8250_port *p = param;
|
|
struct uart_8250_dma *dma = p->dma;
|
|
unsigned long flags;
|
|
|
|
uart_port_lock_irqsave(&p->port, &flags);
|
|
if (dma->rx_running)
|
|
__dma_rx_complete(p);
|
|
|
|
/*
|
|
* Cannot be combined with the previous check because __dma_rx_complete()
|
|
* changes dma->rx_running.
|
|
*/
|
|
if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
|
|
p->dma->rx_dma(p);
|
|
uart_port_unlock_irqrestore(&p->port, flags);
|
|
}
|
|
|
|
int serial8250_tx_dma(struct uart_8250_port *p)
|
|
{
|
|
struct uart_8250_dma *dma = p->dma;
|
|
struct tty_port *tport = &p->port.state->port;
|
|
struct dma_async_tx_descriptor *desc;
|
|
struct uart_port *up = &p->port;
|
|
struct scatterlist sg;
|
|
int ret;
|
|
|
|
if (dma->tx_running) {
|
|
if (up->x_char) {
|
|
dmaengine_pause(dma->txchan);
|
|
uart_xchar_out(up, UART_TX);
|
|
dmaengine_resume(dma->txchan);
|
|
}
|
|
return 0;
|
|
} else if (up->x_char) {
|
|
uart_xchar_out(up, UART_TX);
|
|
}
|
|
|
|
if (uart_tx_stopped(&p->port) || kfifo_is_empty(&tport->xmit_fifo)) {
|
|
/* We have been called from __dma_tx_complete() */
|
|
return 0;
|
|
}
|
|
|
|
serial8250_do_prepare_tx_dma(p);
|
|
|
|
sg_init_table(&sg, 1);
|
|
/* kfifo can do more than one sg, we don't (quite yet) */
|
|
ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1,
|
|
UART_XMIT_SIZE, dma->tx_addr);
|
|
|
|
/* we already checked empty fifo above, so there should be something */
|
|
if (WARN_ON_ONCE(ret != 1))
|
|
return 0;
|
|
|
|
dma->tx_size = sg_dma_len(&sg);
|
|
|
|
desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1,
|
|
DMA_MEM_TO_DEV,
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
if (!desc) {
|
|
ret = -EBUSY;
|
|
goto err;
|
|
}
|
|
|
|
dma->tx_running = 1;
|
|
desc->callback = __dma_tx_complete;
|
|
desc->callback_param = p;
|
|
|
|
dma->tx_cookie = dmaengine_submit(desc);
|
|
|
|
dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
|
|
UART_XMIT_SIZE, DMA_TO_DEVICE);
|
|
|
|
dma_async_issue_pending(dma->txchan);
|
|
serial8250_clear_THRI(p);
|
|
dma->tx_err = 0;
|
|
|
|
return 0;
|
|
err:
|
|
dma->tx_err = 1;
|
|
return ret;
|
|
}
|
|
|
|
int serial8250_rx_dma(struct uart_8250_port *p)
|
|
{
|
|
struct uart_8250_dma *dma = p->dma;
|
|
struct dma_async_tx_descriptor *desc;
|
|
|
|
if (dma->rx_running)
|
|
return 0;
|
|
|
|
serial8250_do_prepare_rx_dma(p);
|
|
|
|
desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
|
|
dma->rx_size, DMA_DEV_TO_MEM,
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
if (!desc)
|
|
return -EBUSY;
|
|
|
|
dma->rx_running = 1;
|
|
desc->callback = dma_rx_complete;
|
|
desc->callback_param = p;
|
|
|
|
dma->rx_cookie = dmaengine_submit(desc);
|
|
|
|
dma_async_issue_pending(dma->rxchan);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void serial8250_rx_dma_flush(struct uart_8250_port *p)
|
|
{
|
|
struct uart_8250_dma *dma = p->dma;
|
|
|
|
if (dma->rx_running) {
|
|
dmaengine_pause(dma->rxchan);
|
|
__dma_rx_complete(p);
|
|
dmaengine_terminate_async(dma->rxchan);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
|
|
|
|
int serial8250_request_dma(struct uart_8250_port *p)
|
|
{
|
|
struct uart_8250_dma *dma = p->dma;
|
|
phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
|
|
dma->rx_dma_addr : p->port.mapbase;
|
|
phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
|
|
dma->tx_dma_addr : p->port.mapbase;
|
|
dma_cap_mask_t mask;
|
|
struct dma_slave_caps caps;
|
|
int ret;
|
|
|
|
/* Default slave configuration parameters */
|
|
dma->rxconf.direction = DMA_DEV_TO_MEM;
|
|
dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
dma->rxconf.src_addr = rx_dma_addr + UART_RX;
|
|
|
|
dma->txconf.direction = DMA_MEM_TO_DEV;
|
|
dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
dma->txconf.dst_addr = tx_dma_addr + UART_TX;
|
|
|
|
dma_cap_zero(mask);
|
|
dma_cap_set(DMA_SLAVE, mask);
|
|
|
|
/* Get a channel for RX */
|
|
dma->rxchan = dma_request_slave_channel_compat(mask,
|
|
dma->fn, dma->rx_param,
|
|
p->port.dev, "rx");
|
|
if (!dma->rxchan)
|
|
return -ENODEV;
|
|
|
|
/* 8250 rx dma requires dmaengine driver to support pause/terminate */
|
|
ret = dma_get_slave_caps(dma->rxchan, &caps);
|
|
if (ret)
|
|
goto release_rx;
|
|
if (!caps.cmd_pause || !caps.cmd_terminate ||
|
|
caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
|
|
ret = -EINVAL;
|
|
goto release_rx;
|
|
}
|
|
|
|
dmaengine_slave_config(dma->rxchan, &dma->rxconf);
|
|
|
|
/* Get a channel for TX */
|
|
dma->txchan = dma_request_slave_channel_compat(mask,
|
|
dma->fn, dma->tx_param,
|
|
p->port.dev, "tx");
|
|
if (!dma->txchan) {
|
|
ret = -ENODEV;
|
|
goto release_rx;
|
|
}
|
|
|
|
/* 8250 tx dma requires dmaengine driver to support terminate */
|
|
ret = dma_get_slave_caps(dma->txchan, &caps);
|
|
if (ret)
|
|
goto err;
|
|
if (!caps.cmd_terminate) {
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
|
|
dmaengine_slave_config(dma->txchan, &dma->txconf);
|
|
|
|
/* RX buffer */
|
|
if (!dma->rx_size)
|
|
dma->rx_size = PAGE_SIZE;
|
|
|
|
dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
|
|
&dma->rx_addr, GFP_KERNEL);
|
|
if (!dma->rx_buf) {
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
}
|
|
|
|
/* TX buffer */
|
|
dma->tx_addr = dma_map_single(dma->txchan->device->dev,
|
|
p->port.state->port.xmit_buf,
|
|
UART_XMIT_SIZE,
|
|
DMA_TO_DEVICE);
|
|
if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
|
|
dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
|
|
dma->rx_buf, dma->rx_addr);
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
}
|
|
|
|
dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
|
|
|
|
return 0;
|
|
err:
|
|
dma_release_channel(dma->txchan);
|
|
release_rx:
|
|
dma_release_channel(dma->rxchan);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(serial8250_request_dma);
|
|
|
|
void serial8250_release_dma(struct uart_8250_port *p)
|
|
{
|
|
struct uart_8250_dma *dma = p->dma;
|
|
|
|
if (!dma)
|
|
return;
|
|
|
|
/* Release RX resources */
|
|
dmaengine_terminate_sync(dma->rxchan);
|
|
dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
|
|
dma->rx_addr);
|
|
dma_release_channel(dma->rxchan);
|
|
dma->rxchan = NULL;
|
|
|
|
/* Release TX resources */
|
|
dmaengine_terminate_sync(dma->txchan);
|
|
dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
|
|
UART_XMIT_SIZE, DMA_TO_DEVICE);
|
|
dma_release_channel(dma->txchan);
|
|
dma->txchan = NULL;
|
|
dma->tx_running = 0;
|
|
|
|
dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
|
|
}
|
|
EXPORT_SYMBOL_GPL(serial8250_release_dma);
|