mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 21:53:44 +00:00
Revert "dmaengine: imx-sdma: alloclate bd memory from dma pool"
This reverts commit fe5b85c656
. The SDMA engine needs the descriptors to
be contiguous in memory. As the dma pool API is only able to provide a
single descriptor per alloc invocation there is no guarantee that multiple
descriptors satisfy this requirement. Also the code in question is broken
as it only allocates memory for a single descriptor, without looking at the
number of descriptors required for the transfer, leading to out-of-bounds
accesses when the descriptors are written.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Signed-off-by: Robin Gong <yibin.gong@nxp.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
c06abca692
commit
ebb853b1bd
@ -24,7 +24,6 @@
|
|||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/dmapool.h>
|
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
@ -376,7 +375,6 @@ struct sdma_channel {
|
|||||||
u32 shp_addr, per_addr;
|
u32 shp_addr, per_addr;
|
||||||
enum dma_status status;
|
enum dma_status status;
|
||||||
struct imx_dma_data data;
|
struct imx_dma_data data;
|
||||||
struct dma_pool *bd_pool;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IMX_DMA_SG_LOOP BIT(0)
|
#define IMX_DMA_SG_LOOP BIT(0)
|
||||||
@ -1192,10 +1190,11 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
|
|||||||
|
|
||||||
static int sdma_alloc_bd(struct sdma_desc *desc)
|
static int sdma_alloc_bd(struct sdma_desc *desc)
|
||||||
{
|
{
|
||||||
|
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_ATOMIC,
|
desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
|
||||||
&desc->bd_phys);
|
GFP_ATOMIC);
|
||||||
if (!desc->bd) {
|
if (!desc->bd) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
@ -1206,7 +1205,9 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
|
|||||||
|
|
||||||
static void sdma_free_bd(struct sdma_desc *desc)
|
static void sdma_free_bd(struct sdma_desc *desc)
|
||||||
{
|
{
|
||||||
dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
|
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
|
||||||
|
|
||||||
|
dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sdma_desc_free(struct virt_dma_desc *vd)
|
static void sdma_desc_free(struct virt_dma_desc *vd)
|
||||||
@ -1272,10 +1273,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto disable_clk_ahb;
|
goto disable_clk_ahb;
|
||||||
|
|
||||||
sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
|
|
||||||
sizeof(struct sdma_buffer_descriptor),
|
|
||||||
32, 0);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
disable_clk_ahb:
|
disable_clk_ahb:
|
||||||
@ -1304,9 +1301,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
|
|||||||
|
|
||||||
clk_disable(sdma->clk_ipg);
|
clk_disable(sdma->clk_ipg);
|
||||||
clk_disable(sdma->clk_ahb);
|
clk_disable(sdma->clk_ahb);
|
||||||
|
|
||||||
dma_pool_destroy(sdmac->bd_pool);
|
|
||||||
sdmac->bd_pool = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
|
static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
|
||||||
|
Loading…
Reference in New Issue
Block a user