mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 06:43:09 +00:00
arc: remove the partial DMA_ATTR_NON_CONSISTENT support
The arc DMA code supports DMA_ATTR_NON_CONSISTENT allocations, but does not provide a cache_sync operation. This means any user of it will never be able to actually transfer cache ownership and thus cause coherency bugs. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Evgeniy Paltsev <paltsev@synopsys.com> Tested-by: Evgeniy Paltsev <paltsev@synopsys.com>
This commit is contained in:
parent
34ab03160e
commit
80e61fcd23
@ -24,7 +24,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
phys_addr_t paddr;
|
phys_addr_t paddr;
|
||||||
void *kvaddr;
|
void *kvaddr;
|
||||||
bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __GFP_HIGHMEM flag is cleared by upper layer functions
|
* __GFP_HIGHMEM flag is cleared by upper layer functions
|
||||||
@ -46,15 +45,11 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
* A coherent buffer needs MMU mapping to enforce non-cachability.
|
* A coherent buffer needs MMU mapping to enforce non-cachability.
|
||||||
* kvaddr is kernel Virtual address (0x7000_0000 based).
|
* kvaddr is kernel Virtual address (0x7000_0000 based).
|
||||||
*/
|
*/
|
||||||
if (need_coh) {
|
|
||||||
kvaddr = ioremap_nocache(paddr, size);
|
kvaddr = ioremap_nocache(paddr, size);
|
||||||
if (kvaddr == NULL) {
|
if (kvaddr == NULL) {
|
||||||
__free_pages(page, order);
|
__free_pages(page, order);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
kvaddr = (void *)(u32)paddr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Evict any existing L1 and/or L2 lines for the backing page
|
* Evict any existing L1 and/or L2 lines for the backing page
|
||||||
@ -66,9 +61,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
* Currently flush_cache_vmap nukes the L1 cache completely which
|
* Currently flush_cache_vmap nukes the L1 cache completely which
|
||||||
* will be optimized as a separate commit
|
* will be optimized as a separate commit
|
||||||
*/
|
*/
|
||||||
if (need_coh)
|
|
||||||
dma_cache_wback_inv(paddr, size);
|
dma_cache_wback_inv(paddr, size);
|
||||||
|
|
||||||
return kvaddr;
|
return kvaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,9 +71,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||||||
phys_addr_t paddr = dma_handle;
|
phys_addr_t paddr = dma_handle;
|
||||||
struct page *page = virt_to_page(paddr);
|
struct page *page = virt_to_page(paddr);
|
||||||
|
|
||||||
if (!(attrs & DMA_ATTR_NON_CONSISTENT))
|
|
||||||
iounmap((void __force __iomem *)vaddr);
|
iounmap((void __force __iomem *)vaddr);
|
||||||
|
|
||||||
__free_pages(page, get_order(size));
|
__free_pages(page, get_order(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user