mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 14:43:16 +00:00
[POWERPC] Workaround for iommu page alignment
Commit 5d2efba64b231a1733c4048d1708d77e07f26426 changed our iommu code so that it always uses an iommu page size of 4kB. That means with our current code, drivers may do a dma_map_sg() of a 64kB page and obtain a dma_addr_t that is only 4k aligned. This works fine in most cases except for some infiniband HW it seems, where they tell the HW about the page size and it ignores the low bits of the DMA address. This works around it by making our IOMMU code enforce a PAGE_SIZE alignment for mappings of objects that are page aligned in the first place and whose size is larger or equal to a page. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
031f2dcd70
commit
d262c32a4b
@ -278,6 +278,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||
unsigned long flags;
|
||||
struct scatterlist *s, *outs, *segstart;
|
||||
int outcount, incount, i;
|
||||
unsigned int align;
|
||||
unsigned long handle;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
@ -309,7 +310,12 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||
/* Allocate iommu entries for that segment */
|
||||
vaddr = (unsigned long) sg_virt(s);
|
||||
npages = iommu_num_pages(vaddr, slen);
|
||||
entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
|
||||
align = 0;
|
||||
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
||||
(vaddr & ~PAGE_MASK) == 0)
|
||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
||||
entry = iommu_range_alloc(tbl, npages, &handle,
|
||||
mask >> IOMMU_PAGE_SHIFT, align);
|
||||
|
||||
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
||||
|
||||
@ -572,7 +578,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
||||
{
|
||||
dma_addr_t dma_handle = DMA_ERROR_CODE;
|
||||
unsigned long uaddr;
|
||||
unsigned int npages;
|
||||
unsigned int npages, align;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
@ -580,8 +586,13 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
||||
npages = iommu_num_pages(uaddr, size);
|
||||
|
||||
if (tbl) {
|
||||
align = 0;
|
||||
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
|
||||
((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
||||
|
||||
dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
|
||||
mask >> IOMMU_PAGE_SHIFT, 0);
|
||||
mask >> IOMMU_PAGE_SHIFT, align);
|
||||
if (dma_handle == DMA_ERROR_CODE) {
|
||||
if (printk_ratelimit()) {
|
||||
printk(KERN_INFO "iommu_alloc failed, "
|
||||
|
Loading…
x
Reference in New Issue
Block a user