mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-17 02:36:21 +00:00
swiotlb: Refactor swiotlb_tbl_unmap_single
Add a new function, swiotlb_release_slots, to make the code reusable for supporting different bounce buffer pools. Signed-off-by: Claire Chang <tientzu@chromium.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: Stefano Stabellini <sstabellini@kernel.org> Tested-by: Will Deacon <will@kernel.org> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
36f7b2f3ca
commit
7034787723
@ -555,27 +555,15 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
||||
return tlb_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* tlb_addr is the physical address of the bounce buffer to unmap.
|
||||
*/
|
||||
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
|
||||
size_t mapping_size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
|
||||
{
|
||||
struct io_tlb_mem *mem = hwdev->dma_io_tlb_mem;
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
unsigned long flags;
|
||||
unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
|
||||
unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
|
||||
int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
|
||||
int nslots = nr_slots(mem->slots[index].alloc_size + offset);
|
||||
int count, i;
|
||||
|
||||
/*
|
||||
* First, sync the memory before unmapping the entry
|
||||
*/
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||
swiotlb_bounce(hwdev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
|
||||
|
||||
/*
|
||||
* Return the buffer to the free list by setting the corresponding
|
||||
* entries to indicate the number of contiguous entries available.
|
||||
@ -610,6 +598,23 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
|
||||
spin_unlock_irqrestore(&mem->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* tlb_addr is the physical address of the bounce buffer to unmap.
|
||||
*/
|
||||
void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
|
||||
size_t mapping_size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
/*
|
||||
* First, sync the memory before unmapping the entry
|
||||
*/
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
|
||||
|
||||
swiotlb_release_slots(dev, tlb_addr);
|
||||
}
|
||||
|
||||
void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user