mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
ALSA: memalloc: Use proper DMA mapping API for x86 S/G buffer allocations
The fallback S/G buffer allocation for x86 used the addresses deduced from the page allocations blindly. It broke the allocations on IOMMU and made us to work around with a hackish DMA ops check. For cleaning up those messes, this patch switches to the proper DMA mapping API usages with the standard sg-table instead. By introducing the sg-table, the address table isn't needed, but for keeping the original allocation sizes for freeing, replace it with the array keeping the number of pages. The get_addr callback is changed to use the existing one for non-contiguous buffers. (Also it's the reason sg_table is put at the beginning of struct snd_dma_sg_fallback.) And finally, the hackish workaround that checks the DMA ops is dropped now. Link: https://patch.msgid.link/20240912155227.4078-3-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
parent
c880a51466
commit
0b9f2bd00f
@ -680,43 +680,43 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
|
|||||||
#ifdef CONFIG_SND_DMA_SGBUF
|
#ifdef CONFIG_SND_DMA_SGBUF
|
||||||
/* Fallback SG-buffer allocations for x86 */
|
/* Fallback SG-buffer allocations for x86 */
|
||||||
struct snd_dma_sg_fallback {
|
struct snd_dma_sg_fallback {
|
||||||
|
struct sg_table sgt; /* used by get_addr - must be the first item */
|
||||||
size_t count;
|
size_t count;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
/* DMA address array; the first page contains #pages in ~PAGE_MASK */
|
unsigned int *npages;
|
||||||
dma_addr_t *addrs;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
|
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
|
||||||
struct snd_dma_sg_fallback *sgbuf)
|
struct snd_dma_sg_fallback *sgbuf)
|
||||||
{
|
{
|
||||||
|
bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
|
||||||
size_t i, size;
|
size_t i, size;
|
||||||
|
|
||||||
if (sgbuf->pages && sgbuf->addrs) {
|
if (sgbuf->pages && sgbuf->npages) {
|
||||||
i = 0;
|
i = 0;
|
||||||
while (i < sgbuf->count) {
|
while (i < sgbuf->count) {
|
||||||
if (!sgbuf->pages[i] || !sgbuf->addrs[i])
|
size = sgbuf->npages[i];
|
||||||
break;
|
if (!size)
|
||||||
size = sgbuf->addrs[i] & ~PAGE_MASK;
|
|
||||||
if (WARN_ON(!size))
|
|
||||||
break;
|
break;
|
||||||
do_free_pages(page_address(sgbuf->pages[i]),
|
do_free_pages(page_address(sgbuf->pages[i]),
|
||||||
size << PAGE_SHIFT, false);
|
size << PAGE_SHIFT, wc);
|
||||||
i += size;
|
i += size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kvfree(sgbuf->pages);
|
kvfree(sgbuf->pages);
|
||||||
kvfree(sgbuf->addrs);
|
kvfree(sgbuf->npages);
|
||||||
kfree(sgbuf);
|
kfree(sgbuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* fallback manual S/G buffer allocations */
|
/* fallback manual S/G buffer allocations */
|
||||||
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
|
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
|
||||||
{
|
{
|
||||||
|
bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
|
||||||
struct snd_dma_sg_fallback *sgbuf;
|
struct snd_dma_sg_fallback *sgbuf;
|
||||||
struct page **pagep, *curp;
|
struct page **pagep, *curp;
|
||||||
size_t chunk, npages;
|
size_t chunk;
|
||||||
dma_addr_t *addrp;
|
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
|
unsigned int idx, npages;
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
|
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
|
||||||
@ -725,16 +725,16 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
|
|||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
sgbuf->count = size >> PAGE_SHIFT;
|
sgbuf->count = size >> PAGE_SHIFT;
|
||||||
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
|
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
|
||||||
sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
|
sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL);
|
||||||
if (!sgbuf->pages || !sgbuf->addrs)
|
if (!sgbuf->pages || !sgbuf->npages)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
pagep = sgbuf->pages;
|
pagep = sgbuf->pages;
|
||||||
addrp = sgbuf->addrs;
|
chunk = size;
|
||||||
chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
|
idx = 0;
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
chunk = min(size, chunk);
|
chunk = min(size, chunk);
|
||||||
p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
|
p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
|
||||||
if (!p) {
|
if (!p) {
|
||||||
if (chunk <= PAGE_SIZE)
|
if (chunk <= PAGE_SIZE)
|
||||||
goto error;
|
goto error;
|
||||||
@ -746,27 +746,33 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
|
|||||||
size -= chunk;
|
size -= chunk;
|
||||||
/* fill pages */
|
/* fill pages */
|
||||||
npages = chunk >> PAGE_SHIFT;
|
npages = chunk >> PAGE_SHIFT;
|
||||||
*addrp = npages; /* store in lower bits */
|
sgbuf->npages[idx] = npages;
|
||||||
|
idx += npages;
|
||||||
curp = virt_to_page(p);
|
curp = virt_to_page(p);
|
||||||
while (npages--) {
|
while (npages--)
|
||||||
*pagep++ = curp++;
|
*pagep++ = curp++;
|
||||||
*addrp++ |= addr;
|
|
||||||
addr += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count,
|
||||||
|
0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL))
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0))
|
||||||
|
goto error_dma_map;
|
||||||
|
|
||||||
p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
|
p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
|
||||||
if (!p)
|
if (!p)
|
||||||
goto error;
|
goto error_vmap;
|
||||||
|
|
||||||
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
|
|
||||||
set_pages_array_wc(sgbuf->pages, sgbuf->count);
|
|
||||||
|
|
||||||
dmab->private_data = sgbuf;
|
dmab->private_data = sgbuf;
|
||||||
/* store the first page address for convenience */
|
/* store the first page address for convenience */
|
||||||
dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
|
dmab->addr = snd_sgbuf_get_addr(dmab, 0);
|
||||||
return p;
|
return p;
|
||||||
|
|
||||||
|
error_vmap:
|
||||||
|
dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
|
error_dma_map:
|
||||||
|
sg_free_table(&sgbuf->sgt);
|
||||||
error:
|
error:
|
||||||
__snd_dma_sg_fallback_free(dmab, sgbuf);
|
__snd_dma_sg_fallback_free(dmab, sgbuf);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -776,21 +782,12 @@ static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
|
|||||||
{
|
{
|
||||||
struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
|
struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
|
||||||
|
|
||||||
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
|
|
||||||
set_pages_array_wb(sgbuf->pages, sgbuf->count);
|
|
||||||
vunmap(dmab->area);
|
vunmap(dmab->area);
|
||||||
|
dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
|
||||||
|
sg_free_table(&sgbuf->sgt);
|
||||||
__snd_dma_sg_fallback_free(dmab, dmab->private_data);
|
__snd_dma_sg_fallback_free(dmab, dmab->private_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
|
|
||||||
size_t offset)
|
|
||||||
{
|
|
||||||
struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
|
|
||||||
size_t index = offset >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
|
static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
|
||||||
struct vm_area_struct *area)
|
struct vm_area_struct *area)
|
||||||
{
|
{
|
||||||
@ -816,10 +813,6 @@ static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
|
|||||||
return p;
|
return p;
|
||||||
|
|
||||||
dmab->dev.type = type; /* restore the type */
|
dmab->dev.type = type; /* restore the type */
|
||||||
/* if IOMMU is present but failed, give up */
|
|
||||||
if (get_dma_ops(dmab->dev.dev))
|
|
||||||
return NULL;
|
|
||||||
/* try fallback */
|
|
||||||
return snd_dma_sg_fallback_alloc(dmab, size);
|
return snd_dma_sg_fallback_alloc(dmab, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -827,7 +820,8 @@ static const struct snd_malloc_ops snd_dma_sg_ops = {
|
|||||||
.alloc = snd_dma_sg_alloc,
|
.alloc = snd_dma_sg_alloc,
|
||||||
.free = snd_dma_sg_fallback_free,
|
.free = snd_dma_sg_fallback_free,
|
||||||
.mmap = snd_dma_sg_fallback_mmap,
|
.mmap = snd_dma_sg_fallback_mmap,
|
||||||
.get_addr = snd_dma_sg_fallback_get_addr,
|
/* reuse noncontig helper */
|
||||||
|
.get_addr = snd_dma_noncontig_get_addr,
|
||||||
/* reuse vmalloc helpers */
|
/* reuse vmalloc helpers */
|
||||||
.get_page = snd_dma_vmalloc_get_page,
|
.get_page = snd_dma_vmalloc_get_page,
|
||||||
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
|
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
|
||||||
|
Loading…
Reference in New Issue
Block a user