mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
dma-mapping updates for 5.10
- rework the non-coherent DMA allocator - move private definitions out of <linux/dma-mapping.h> - lower CMA_ALIGNMENT (Paul Cercueil) - remove the omap1 dma address translation in favor of the common code - make dma-direct aware of multiple dma offset ranges (Jim Quinlan) - support per-node DMA CMA areas (Barry Song) - increase the default seg boundary limit (Nicolin Chen) - misc fixes (Robin Murphy, Thomas Tai, Xu Wang) - various cleanups -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl+IiPwLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYPKEQ//TM8vxjucnRl/pklpMin49dJorwiVvROLhQqLmdxw 286ZKpVzYYAPc7LnNqwIBugnFZiXuHu8xPKQkIiOa2OtNDTwhKNoBxOAmOJaV6DD 8JfEtZYeX5mKJ/Nqd2iSkIqOvCwZ9Wzii+aytJ2U88wezQr1fnyF4X49MegETEey FHWreSaRWZKa0MMRu9AQ0QxmoNTHAQUNaPc0PeqEtPULybfkGOGw4/ghSB7WcKrA gtKTuooNOSpVEHkTas2TMpcBp6lxtOjFqKzVN0ml+/nqq5NeTSDx91VOCX/6Cj76 mXIg+s7fbACTk/BmkkwAkd0QEw4fo4tyD6Bep/5QNhvEoAriTuSRbhvLdOwFz0EF vhkF0Rer6umdhSK7nPd7SBqn8kAnP4vBbdmB68+nc3lmkqysLyE4VkgkdH/IYYQI 6TJ0oilXWFmU6DT5Rm4FBqCvfcEfU2dUIHJr5wZHqrF2kLzoZ+mpg42fADoG4GuI D/oOsz7soeaRe3eYfWybC0omGR6YYPozZJ9lsfftcElmwSsFrmPsbO1DM5IBkj1B gItmEbOB9ZK3RhIK55T/3u1UWY3Uc/RVr+kchWvADGrWnRQnW0kxYIqDgiOytLFi JZNH8uHpJIwzoJAv6XXSPyEUBwXTG+zK37Ce769HGbUEaUrE71MxBbQAQsK8mDpg 7fM= =Bkf/ -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.10' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - rework the non-coherent DMA allocator - move private definitions out of <linux/dma-mapping.h> - lower CMA_ALIGNMENT (Paul Cercueil) - remove the omap1 dma address translation in favor of the common code - make dma-direct aware of multiple dma offset ranges (Jim Quinlan) - support per-node DMA CMA areas (Barry Song) - increase the default seg boundary limit (Nicolin Chen) - misc fixes (Robin Murphy, Thomas Tai, Xu Wang) - various cleanups * tag 'dma-mapping-5.10' of git://git.infradead.org/users/hch/dma-mapping: (63 commits) ARM/ixp4xx: add a missing include of dma-map-ops.h dma-direct: simplify the DMA_ATTR_NO_KERNEL_MAPPING handling dma-direct: factor out a dma_direct_alloc_from_pool helper dma-direct check for highmem pages in dma_direct_alloc_pages dma-mapping: merge <linux/dma-noncoherent.h> into <linux/dma-map-ops.h> dma-mapping: move large parts of <linux/dma-direct.h> to kernel/dma dma-mapping: move dma-debug.h to kernel/dma/ dma-mapping: remove <asm/dma-contiguous.h> dma-mapping: merge <linux/dma-contiguous.h> into <linux/dma-map-ops.h> dma-contiguous: remove dma_contiguous_set_default dma-contiguous: remove dev_set_cma_area dma-contiguous: remove dma_declare_contiguous dma-mapping: split <linux/dma-mapping.h> cma: decrease CMA_ALIGNMENT lower limit to 2 firewire-ohci: use dma_alloc_pages dma-iommu: implement ->alloc_noncoherent dma-mapping: add new {alloc,free}_noncoherent dma_map_ops methods dma-mapping: add a new dma_alloc_pages API dma-mapping: remove dma_cache_sync 53c700: convert to dma_alloc_noncoherent ...
This commit is contained in:
commit
5a32c3413d
@ -597,7 +597,18 @@
|
||||
placement constraint by the physical address range of
|
||||
memory allocations. A value of 0 disables CMA
|
||||
altogether. For more information, see
|
||||
include/linux/dma-contiguous.h
|
||||
kernel/dma/contiguous.c
|
||||
|
||||
cma_pernuma=nn[MG]
|
||||
[ARM64,KNL]
|
||||
Sets the size of kernel per-numa memory area for
|
||||
contiguous memory allocations. A value of 0 disables
|
||||
per-numa CMA altogether. And If this option is not
|
||||
specificed, the default value is 0.
|
||||
With per-numa CMA enabled, DMA users on node nid will
|
||||
first try to allocate buffer from the pernuma area
|
||||
which is located in node nid, if the allocation fails,
|
||||
they will fallback to the global default memory area.
|
||||
|
||||
cmo_free_hint= [PPC] Format: { yes | no }
|
||||
Specify whether pages are marked as being inactive
|
||||
|
@ -516,48 +516,56 @@ routines, e.g.:::
|
||||
}
|
||||
|
||||
|
||||
Part II - Advanced dma usage
|
||||
----------------------------
|
||||
Part II - Non-coherent DMA allocations
|
||||
--------------------------------------
|
||||
|
||||
Warning: These pieces of the DMA API should not be used in the
|
||||
majority of cases, since they cater for unlikely corner cases that
|
||||
don't belong in usual drivers.
|
||||
These APIs allow to allocate pages in the kernel direct mapping that are
|
||||
guaranteed to be DMA addressable. This means that unlike dma_alloc_coherent,
|
||||
virt_to_page can be called on the resulting address, and the resulting
|
||||
struct page can be used for everything a struct page is suitable for.
|
||||
|
||||
If you don't understand how cache line coherency works between a
|
||||
processor and an I/O device, you should not be using this part of the
|
||||
API at all.
|
||||
If you don't understand how cache line coherency works between a processor and
|
||||
an I/O device, you should not be using this part of the API.
|
||||
|
||||
::
|
||||
|
||||
void *
|
||||
dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag, unsigned long attrs)
|
||||
dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, enum dma_data_direction dir,
|
||||
gfp_t gfp)
|
||||
|
||||
Identical to dma_alloc_coherent() except that when the
|
||||
DMA_ATTR_NON_CONSISTENT flags is passed in the attrs argument, the
|
||||
platform will choose to return either consistent or non-consistent memory
|
||||
as it sees fit. By using this API, you are guaranteeing to the platform
|
||||
that you have all the correct and necessary sync points for this memory
|
||||
in the driver should it choose to return non-consistent memory.
|
||||
This routine allocates a region of <size> bytes of consistent memory. It
|
||||
returns a pointer to the allocated region (in the processor's virtual address
|
||||
space) or NULL if the allocation failed. The returned memory may or may not
|
||||
be in the kernels direct mapping. Drivers must not call virt_to_page on
|
||||
the returned memory region.
|
||||
|
||||
Note: where the platform can return consistent memory, it will
|
||||
guarantee that the sync points become nops.
|
||||
It also returns a <dma_handle> which may be cast to an unsigned integer the
|
||||
same width as the bus and given to the device as the DMA address base of
|
||||
the region.
|
||||
|
||||
Warning: Handling non-consistent memory is a real pain. You should
|
||||
only use this API if you positively know your driver will be
|
||||
required to work on one of the rare (usually non-PCI) architectures
|
||||
that simply cannot make consistent memory.
|
||||
The dir parameter specified if data is read and/or written by the device,
|
||||
see dma_map_single() for details.
|
||||
|
||||
The gfp parameter allows the caller to specify the ``GFP_`` flags (see
|
||||
kmalloc()) for the allocation, but rejects flags used to specify a memory
|
||||
zone such as GFP_DMA or GFP_HIGHMEM.
|
||||
|
||||
Before giving the memory to the device, dma_sync_single_for_device() needs
|
||||
to be called, and before reading memory written by the device,
|
||||
dma_sync_single_for_cpu(), just like for streaming DMA mappings that are
|
||||
reused.
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle, enum dma_data_direction dir)
|
||||
|
||||
Free memory allocated by the dma_alloc_attrs(). All common
|
||||
parameters must be identical to those otherwise passed to dma_free_coherent,
|
||||
and the attrs argument must be identical to the attrs passed to
|
||||
dma_alloc_attrs().
|
||||
Free a region of memory previously allocated using dma_alloc_noncoherent().
|
||||
dev, size and dma_handle and dir must all be the same as those passed into
|
||||
dma_alloc_noncoherent(). cpu_addr must be the virtual address returned by
|
||||
the dma_alloc_noncoherent().
|
||||
|
||||
::
|
||||
|
||||
@ -575,41 +583,6 @@ memory or doing partial flushes.
|
||||
into the width returned by this call. It will also always be a power
|
||||
of two for easy alignment.
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
|
||||
Do a partial sync of memory that was allocated by dma_alloc_attrs() with
|
||||
the DMA_ATTR_NON_CONSISTENT flag starting at virtual address vaddr and
|
||||
continuing on for size. Again, you *must* observe the cache line
|
||||
boundaries when doing this.
|
||||
|
||||
::
|
||||
|
||||
int
|
||||
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size);
|
||||
|
||||
Declare region of memory to be handed out by dma_alloc_coherent() when
|
||||
it's asked for coherent memory for this device.
|
||||
|
||||
phys_addr is the CPU physical address to which the memory is currently
|
||||
assigned (this will be ioremapped so the CPU can access the region).
|
||||
|
||||
device_addr is the DMA address the device needs to be programmed
|
||||
with to actually address this memory (this will be handed out as the
|
||||
dma_addr_t in dma_alloc_coherent()).
|
||||
|
||||
size is the size of the area (must be multiples of PAGE_SIZE).
|
||||
|
||||
As a simplification for the platforms, only *one* such region of
|
||||
memory may be declared per device.
|
||||
|
||||
For reasons of efficiency, most platforms choose to track the declared
|
||||
region only at the granularity of a page. For smaller allocations,
|
||||
you should use the dma_pool() API.
|
||||
|
||||
Part III - Debug drivers use of the DMA-API
|
||||
-------------------------------------------
|
||||
|
@ -25,14 +25,6 @@ Since it is optional for platforms to implement DMA_ATTR_WRITE_COMBINE,
|
||||
those that do not will simply ignore the attribute and exhibit default
|
||||
behavior.
|
||||
|
||||
DMA_ATTR_NON_CONSISTENT
|
||||
-----------------------
|
||||
|
||||
DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
|
||||
consistent or non-consistent memory as it sees fit. By using this API,
|
||||
you are guaranteeing to the platform that you have all the correct and
|
||||
necessary sync points for this memory in the driver.
|
||||
|
||||
DMA_ATTR_NO_KERNEL_MAPPING
|
||||
--------------------------
|
||||
|
||||
|
@ -5216,7 +5216,7 @@ T: git git://git.infradead.org/users/hch/dma-mapping.git
|
||||
F: include/asm-generic/dma-mapping.h
|
||||
F: include/linux/dma-direct.h
|
||||
F: include/linux/dma-mapping.h
|
||||
F: include/linux/dma-noncoherent.h
|
||||
F: include/linux/dma-map-ops.h
|
||||
F: kernel/dma/
|
||||
|
||||
DMA-BUF HEAPS FRAMEWORK
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
@ -141,12 +141,7 @@ iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
|
||||
unsigned long boundary_size;
|
||||
|
||||
base = arena->dma_base >> PAGE_SHIFT;
|
||||
if (dev) {
|
||||
boundary_size = dma_get_seg_boundary(dev) + 1;
|
||||
boundary_size >>= PAGE_SHIFT;
|
||||
} else {
|
||||
boundary_size = 1UL << (32 - PAGE_SHIFT);
|
||||
}
|
||||
boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
|
||||
|
||||
/* Search forward for the first mask-aligned sequence of N free ptes */
|
||||
ptes = arena->ptes;
|
||||
@ -957,5 +952,7 @@ const struct dma_map_ops alpha_pci_ops = {
|
||||
.dma_supported = alpha_pci_supported,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
};
|
||||
EXPORT_SYMBOL(alpha_pci_ops);
|
||||
|
@ -3,7 +3,7 @@
|
||||
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*/
|
||||
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
|
@ -24,7 +24,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
@ -1,15 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ASMARM_DMA_CONTIGUOUS_H
|
||||
#define ASMARM_DMA_CONTIGUOUS_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef CONFIG_DMA_CMA
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif
|
@ -2,13 +2,44 @@
|
||||
#ifndef ASM_ARM_DMA_DIRECT_H
|
||||
#define ASM_ARM_DMA_DIRECT_H 1
|
||||
|
||||
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
#include <asm/memory.h>
|
||||
|
||||
/*
|
||||
* dma_to_pfn/pfn_to_dma/virt_to_dma are architecture private
|
||||
* functions used internally by the DMA-mapping API to provide DMA
|
||||
* addresses. They must not be used by drivers.
|
||||
*/
|
||||
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
if (dev && dev->dma_range_map)
|
||||
pfn = PFN_DOWN(translate_phys_to_dma(dev, PFN_PHYS(pfn)));
|
||||
return (dma_addr_t)__pfn_to_bus(pfn);
|
||||
}
|
||||
|
||||
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
unsigned long pfn = __bus_to_pfn(addr);
|
||||
|
||||
if (dev && dev->dma_range_map)
|
||||
pfn = PFN_DOWN(translate_dma_to_phys(dev, PFN_PHYS(pfn)));
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
||||
{
|
||||
if (dev)
|
||||
return pfn_to_dma(dev, virt_to_pfn(addr));
|
||||
|
||||
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
unsigned int offset = paddr & ~PAGE_MASK;
|
||||
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
|
||||
}
|
||||
|
||||
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
unsigned int offset = dev_addr & ~PAGE_MASK;
|
||||
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
struct dma_iommu_mapping {
|
||||
|
@ -6,9 +6,6 @@
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#include <asm/memory.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
@ -23,74 +20,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef __arch_page_to_dma
|
||||
#error Please update to __arch_pfn_to_dma
|
||||
#endif
|
||||
|
||||
/*
|
||||
* dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
|
||||
* functions used internally by the DMA-mapping API to provide DMA
|
||||
* addresses. They must not be used by drivers.
|
||||
*/
|
||||
#ifndef __arch_pfn_to_dma
|
||||
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
if (dev)
|
||||
pfn -= dev->dma_pfn_offset;
|
||||
return (dma_addr_t)__pfn_to_bus(pfn);
|
||||
}
|
||||
|
||||
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
unsigned long pfn = __bus_to_pfn(addr);
|
||||
|
||||
if (dev)
|
||||
pfn += dev->dma_pfn_offset;
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
if (dev) {
|
||||
unsigned long pfn = dma_to_pfn(dev, addr);
|
||||
|
||||
return phys_to_virt(__pfn_to_phys(pfn));
|
||||
}
|
||||
|
||||
return (void *)__bus_to_virt((unsigned long)addr);
|
||||
}
|
||||
|
||||
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
||||
{
|
||||
if (dev)
|
||||
return pfn_to_dma(dev, virt_to_pfn(addr));
|
||||
|
||||
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
|
||||
}
|
||||
|
||||
#else
|
||||
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return __arch_pfn_to_dma(dev, pfn);
|
||||
}
|
||||
|
||||
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
return __arch_dma_to_pfn(dev, addr);
|
||||
}
|
||||
|
||||
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
return __arch_dma_to_virt(dev, addr);
|
||||
}
|
||||
|
||||
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
||||
{
|
||||
return __arch_virt_to_dma(dev, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* arm_dma_alloc - allocate consistent memory for DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
@ -884,6 +884,7 @@ early_param("rproc_mem", early_rproc_mem);
|
||||
|
||||
void __init da8xx_rproc_reserve_cma(void)
|
||||
{
|
||||
struct cma *cma;
|
||||
int ret;
|
||||
|
||||
if (!rproc_base || !rproc_size) {
|
||||
@ -897,13 +898,16 @@ void __init da8xx_rproc_reserve_cma(void)
|
||||
pr_info("%s: reserving 0x%lx @ 0x%lx...\n",
|
||||
__func__, rproc_size, (unsigned long)rproc_base);
|
||||
|
||||
ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0);
|
||||
if (ret)
|
||||
pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret);
|
||||
else
|
||||
rproc_mem_inited = true;
|
||||
ret = dma_contiguous_reserve_area(rproc_size, rproc_base, 0, &cma,
|
||||
true);
|
||||
if (ret) {
|
||||
pr_err("%s: dma_contiguous_reserve_area failed %d\n",
|
||||
__func__, ret);
|
||||
return;
|
||||
}
|
||||
da8xx_dsp.dev.cma_area = cma;
|
||||
rproc_mem_inited = true;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void __init da8xx_rproc_reserve_cma(void)
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqchip.h>
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <linux/input.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/platform_data/asoc-mx27vis.h>
|
||||
#include <sound/tlv320aic32x4.h>
|
||||
|
@ -4,7 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/sched_clock.h>
|
||||
#include <linux/irqchip/irq-ixp4xx.h>
|
||||
#include <linux/platform_data/timer-ixp4xx.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <mach/udc.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/io.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
*/
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_address.h>
|
||||
@ -24,8 +25,7 @@
|
||||
|
||||
#include "keystone.h"
|
||||
|
||||
static unsigned long keystone_dma_pfn_offset __read_mostly;
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
static int keystone_platform_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
@ -38,9 +38,12 @@ static int keystone_platform_notifier(struct notifier_block *nb,
|
||||
return NOTIFY_BAD;
|
||||
|
||||
if (!dev->of_node) {
|
||||
dev->dma_pfn_offset = keystone_dma_pfn_offset;
|
||||
dev_err(dev, "set dma_pfn_offset%08lx\n",
|
||||
dev->dma_pfn_offset);
|
||||
int ret = dma_direct_set_offset(dev, KEYSTONE_HIGH_PHYS_START,
|
||||
KEYSTONE_LOW_PHYS_START,
|
||||
KEYSTONE_HIGH_PHYS_SIZE);
|
||||
dev_err(dev, "set dma_offset%08llx%s\n",
|
||||
KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START,
|
||||
ret ? " failed" : "");
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
@ -48,14 +51,14 @@ static int keystone_platform_notifier(struct notifier_block *nb,
|
||||
static struct notifier_block platform_nb = {
|
||||
.notifier_call = keystone_platform_notifier,
|
||||
};
|
||||
#endif /* CONFIG_ARM_LPAE */
|
||||
|
||||
static void __init keystone_init(void)
|
||||
{
|
||||
if (PHYS_OFFSET >= KEYSTONE_HIGH_PHYS_START) {
|
||||
keystone_dma_pfn_offset = PFN_DOWN(KEYSTONE_HIGH_PHYS_START -
|
||||
KEYSTONE_LOW_PHYS_START);
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
if (PHYS_OFFSET >= KEYSTONE_HIGH_PHYS_START)
|
||||
bus_register_notifier(&platform_bus_type, &platform_nb);
|
||||
}
|
||||
#endif
|
||||
keystone_pm_runtime_init();
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mbus.h>
|
||||
|
@ -14,42 +14,11 @@
|
||||
* OMAP-1510 bus address is translated into a Local Bus address if the
|
||||
* OMAP bus type is lbus. We do the address translation based on the
|
||||
* device overriding the defaults used in the dma-mapping API.
|
||||
* Note that the is_lbus_device() test is not very efficient on 1510
|
||||
* because of the strncmp().
|
||||
*/
|
||||
#if defined(CONFIG_ARCH_OMAP15XX) && !defined(__ASSEMBLER__)
|
||||
|
||||
/*
|
||||
* OMAP-1510 Local Bus address offset
|
||||
*/
|
||||
#define OMAP1510_LB_OFFSET UL(0x30000000)
|
||||
|
||||
#define virt_to_lbus(x) ((x) - PAGE_OFFSET + OMAP1510_LB_OFFSET)
|
||||
#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET)
|
||||
#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0))
|
||||
|
||||
#define __arch_pfn_to_dma(dev, pfn) \
|
||||
({ dma_addr_t __dma = __pfn_to_phys(pfn); \
|
||||
if (is_lbus_device(dev)) \
|
||||
__dma = __dma - PHYS_OFFSET + OMAP1510_LB_OFFSET; \
|
||||
__dma; })
|
||||
|
||||
#define __arch_dma_to_pfn(dev, addr) \
|
||||
({ dma_addr_t __dma = addr; \
|
||||
if (is_lbus_device(dev)) \
|
||||
__dma += PHYS_OFFSET - OMAP1510_LB_OFFSET; \
|
||||
__phys_to_pfn(__dma); \
|
||||
})
|
||||
|
||||
#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
|
||||
lbus_to_virt(addr) : \
|
||||
__phys_to_virt(addr)); })
|
||||
|
||||
#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \
|
||||
(dma_addr_t) (is_lbus_device(dev) ? \
|
||||
virt_to_lbus(__addr) : \
|
||||
__virt_to_phys(__addr)); })
|
||||
|
||||
#endif /* CONFIG_ARCH_OMAP15XX */
|
||||
|
||||
#endif
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
@ -542,6 +543,25 @@ static u32 __init omap1_usb2_init(unsigned nwires, unsigned alt_pingroup)
|
||||
/* ULPD_APLL_CTRL */
|
||||
#define APLL_NDPLL_SWITCH (1 << 0)
|
||||
|
||||
static int omap_1510_usb_ohci_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
if (event != BUS_NOTIFY_ADD_DEVICE)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (strncmp(dev_name(dev), "ohci", 4) == 0 &&
|
||||
dma_direct_set_offset(dev, PHYS_OFFSET, OMAP1510_LB_OFFSET,
|
||||
(u64)-1))
|
||||
WARN_ONCE(1, "failed to set DMA offset\n");
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block omap_1510_usb_ohci_nb = {
|
||||
.notifier_call = omap_1510_usb_ohci_notifier,
|
||||
};
|
||||
|
||||
static void __init omap_1510_usb_init(struct omap_usb_config *config)
|
||||
{
|
||||
unsigned int val;
|
||||
@ -600,6 +620,8 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
|
||||
if (config->register_host) {
|
||||
int status;
|
||||
|
||||
bus_register_notifier(&platform_bus_type,
|
||||
&omap_1510_usb_ohci_nb);
|
||||
ohci_device.dev.platform_data = config;
|
||||
status = platform_device_register(&ohci_device);
|
||||
if (status)
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/memblock.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/cachetype.h>
|
||||
@ -176,6 +177,8 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist
|
||||
const struct dma_map_ops arm_nommu_dma_ops = {
|
||||
.alloc = arm_nommu_dma_alloc,
|
||||
.free = arm_nommu_dma_free,
|
||||
.alloc_pages = dma_direct_alloc_pages,
|
||||
.free_pages = dma_direct_free_pages,
|
||||
.mmap = arm_nommu_dma_mmap,
|
||||
.map_page = arm_nommu_dma_map_page,
|
||||
.unmap_page = arm_nommu_dma_unmap_page,
|
||||
|
@ -15,9 +15,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/slab.h>
|
||||
@ -35,7 +33,6 @@
|
||||
#include <asm/dma-iommu.h>
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/dma-contiguous.h>
|
||||
#include <xen/swiotlb-xen.h>
|
||||
|
||||
#include "dma.h"
|
||||
@ -199,6 +196,8 @@ static int arm_dma_supported(struct device *dev, u64 mask)
|
||||
const struct dma_map_ops arm_dma_ops = {
|
||||
.alloc = arm_dma_alloc,
|
||||
.free = arm_dma_free,
|
||||
.alloc_pages = dma_direct_alloc_pages,
|
||||
.free_pages = dma_direct_free_pages,
|
||||
.mmap = arm_dma_mmap,
|
||||
.get_sgtable = arm_dma_get_sgtable,
|
||||
.map_page = arm_dma_map_page,
|
||||
@ -226,6 +225,8 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
const struct dma_map_ops arm_coherent_dma_ops = {
|
||||
.alloc = arm_coherent_dma_alloc,
|
||||
.free = arm_coherent_dma_free,
|
||||
.alloc_pages = dma_direct_alloc_pages,
|
||||
.free_pages = dma_direct_free_pages,
|
||||
.mmap = arm_coherent_dma_mmap,
|
||||
.get_sgtable = arm_dma_get_sgtable,
|
||||
.map_page = arm_coherent_dma_map_page,
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/export.h>
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dma-iommu.h>
|
||||
#include <xen/xen.h>
|
||||
#include <xen/swiotlb-xen.h>
|
||||
|
@ -21,8 +21,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/vmalloc.h>
|
||||
@ -429,6 +428,8 @@ void __init bootmem_init(void)
|
||||
arm64_hugetlb_cma_reserve();
|
||||
#endif
|
||||
|
||||
dma_pernuma_cma_reserve();
|
||||
|
||||
/*
|
||||
* sparse_init() tries to allocate memory from memblock, so must be
|
||||
* done after the fixed reservations
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/start_kernel.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -2,9 +2,7 @@
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/io.h>
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -8,6 +8,7 @@ menu "Processor type and features"
|
||||
|
||||
config IA64
|
||||
bool
|
||||
select ARCH_HAS_DMA_MARK_CLEAN
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select ACPI
|
||||
@ -32,8 +33,6 @@ config IA64
|
||||
select TTY
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
select DMA_NONCOHERENT_MMAP
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select VIRT_TO_BUS
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_PENDING_IRQ if SMP
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include <linux/bitops.h> /* hweight64() */
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
@ -485,8 +485,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev,
|
||||
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
|
||||
ASSERT(res_ptr < res_end);
|
||||
|
||||
boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
|
||||
boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
|
||||
boundary_size = dma_get_seg_boundary_nr_pages(dev, iovp_shift);
|
||||
|
||||
BUG_ON(ioc->ibase & ~iovp_mask);
|
||||
shift = ioc->ibase >> iovp_shift;
|
||||
@ -2071,6 +2070,8 @@ static const struct dma_map_ops sba_dma_ops = {
|
||||
.dma_supported = sba_dma_supported,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
};
|
||||
|
||||
static int __init
|
||||
|
@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||
@ -7,15 +7,3 @@ int iommu_detected __read_mostly;
|
||||
|
||||
const struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
|
||||
}
|
||||
|
||||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dmar.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/elf.h>
|
||||
@ -73,8 +73,7 @@ __ia64_sync_icache_dcache (pte_t pte)
|
||||
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
||||
* flush them when they get mapped into an executable vm-area.
|
||||
*/
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
|
||||
{
|
||||
unsigned long pfn = PHYS_PFN(paddr);
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -8,9 +8,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <asm/cpuinfo.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -1136,7 +1136,6 @@ config DMA_NONCOHERENT
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_HAS_DMA_SET_UNCACHED
|
||||
select DMA_NONCOHERENT_MMAP
|
||||
select DMA_NONCOHERENT_CACHE_SYNC
|
||||
select NEED_DMA_MAP_STATE
|
||||
|
||||
config SYS_HAS_EARLY_PRINTK
|
||||
|
@ -40,7 +40,7 @@ static struct bmips_dma_range *bmips_dma_ranges;
|
||||
|
||||
#define FLUSH_RAC 0x100
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t pa)
|
||||
{
|
||||
struct bmips_dma_range *r;
|
||||
|
||||
@ -52,7 +52,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
|
||||
return pa;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
struct bmips_dma_range *r;
|
||||
|
||||
|
@ -168,7 +168,7 @@ void __init octeon_pci_dma_init(void)
|
||||
}
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev && dev_is_pci(dev))
|
||||
@ -177,7 +177,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
return paddr;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev && dev_is_pci(dev))
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef _MIPS_DMA_DIRECT_H
|
||||
#define _MIPS_DMA_DIRECT_H 1
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
|
||||
#endif /* _MIPS_DMA_DIRECT_H */
|
||||
|
@ -10,8 +10,6 @@
|
||||
*/
|
||||
extern unsigned long vdma_alloc(unsigned long paddr, unsigned long size);
|
||||
extern int vdma_free(unsigned long laddr);
|
||||
extern int vdma_remap(unsigned long laddr, unsigned long paddr,
|
||||
unsigned long size);
|
||||
extern unsigned long vdma_phys2log(unsigned long paddr);
|
||||
extern unsigned long vdma_log2phys(unsigned long laddr);
|
||||
extern void vdma_stats(void); /* for debugging only */
|
||||
|
@ -16,8 +16,7 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/jazz.h>
|
||||
#include <asm/io.h>
|
||||
@ -209,76 +208,6 @@ int vdma_free(unsigned long laddr)
|
||||
|
||||
EXPORT_SYMBOL(vdma_free);
|
||||
|
||||
/*
|
||||
* Map certain page(s) to another physical address.
|
||||
* Caller must have allocated the page(s) before.
|
||||
*/
|
||||
int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
|
||||
{
|
||||
int first, pages;
|
||||
|
||||
if (laddr > 0xffffff) {
|
||||
if (vdma_debug)
|
||||
printk
|
||||
("vdma_map: Invalid logical address: %08lx\n",
|
||||
laddr);
|
||||
return -EINVAL; /* invalid logical address */
|
||||
}
|
||||
if (paddr > 0x1fffffff) {
|
||||
if (vdma_debug)
|
||||
printk
|
||||
("vdma_map: Invalid physical address: %08lx\n",
|
||||
paddr);
|
||||
return -EINVAL; /* invalid physical address */
|
||||
}
|
||||
|
||||
pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
|
||||
first = laddr >> 12;
|
||||
if (vdma_debug)
|
||||
printk("vdma_remap: first=%x, pages=%x\n", first, pages);
|
||||
if (first + pages > VDMA_PGTBL_ENTRIES) {
|
||||
if (vdma_debug)
|
||||
printk("vdma_alloc: Invalid size: %08lx\n", size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
paddr &= ~(VDMA_PAGESIZE - 1);
|
||||
while (pages > 0 && first < VDMA_PGTBL_ENTRIES) {
|
||||
if (pgtbl[first].owner != laddr) {
|
||||
if (vdma_debug)
|
||||
printk("Trying to remap other's pages.\n");
|
||||
return -EPERM; /* not owner */
|
||||
}
|
||||
pgtbl[first].frame = paddr;
|
||||
paddr += VDMA_PAGESIZE;
|
||||
first++;
|
||||
pages--;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update translation table
|
||||
*/
|
||||
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
|
||||
|
||||
if (vdma_debug > 2) {
|
||||
int i;
|
||||
pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
|
||||
first = laddr >> 12;
|
||||
printk("LADDR: ");
|
||||
for (i = first; i < first + pages; i++)
|
||||
printk("%08x ", i << 12);
|
||||
printk("\nPADDR: ");
|
||||
for (i = first; i < first + pages; i++)
|
||||
printk("%08x ", pgtbl[i].frame);
|
||||
printk("\nOWNER: ");
|
||||
for (i = first; i < first + pages; i++)
|
||||
printk("%08x ", pgtbl[i].owner);
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Translate a physical address to a logical address.
|
||||
* This will return the logical address of the first
|
||||
@ -562,26 +491,34 @@ int vdma_get_enable(int channel)
|
||||
static void *jazz_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
void *ret;
|
||||
|
||||
ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
if (attrs & DMA_ATTR_NO_WARN)
|
||||
gfp |= __GFP_NOWARN;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
page = alloc_pages(gfp, get_order(size));
|
||||
if (!page)
|
||||
return NULL;
|
||||
ret = page_address(page);
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = vdma_alloc(virt_to_phys(ret), size);
|
||||
if (*dma_handle == DMA_MAPPING_ERROR) {
|
||||
dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
|
||||
return NULL;
|
||||
}
|
||||
if (*dma_handle == DMA_MAPPING_ERROR)
|
||||
goto out_free_pages;
|
||||
arch_dma_prep_coherent(page, size);
|
||||
return (void *)(UNCAC_BASE + __pa(ret));
|
||||
|
||||
return ret;
|
||||
out_free_pages:
|
||||
__free_pages(page, get_order(size));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
vdma_free(dma_handle);
|
||||
dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs);
|
||||
__free_pages(virt_to_page(vaddr), get_order(size));
|
||||
}
|
||||
|
||||
static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
|
||||
@ -678,9 +615,9 @@ const struct dma_map_ops jazz_dma_ops = {
|
||||
.sync_single_for_device = jazz_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = jazz_dma_sync_sg_for_device,
|
||||
.dma_supported = dma_direct_supported,
|
||||
.cache_sync = arch_dma_cache_sync,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
};
|
||||
EXPORT_SYMBOL(jazz_dma_ops);
|
||||
|
@ -24,7 +24,7 @@
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/decompress/generic.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
|
@ -1,12 +1,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/dma-direct.h>
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr | 0x80000000;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr & 0x7fffffff;
|
||||
}
|
||||
|
@ -1,12 +1,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/dma-direct.h>
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr | 0x80000000;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
if (dma_addr > 0x8fffffff)
|
||||
return dma_addr;
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <linux/swiotlb.h>
|
||||
#include <boot_param.h>
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
|
||||
* Loongson-3's 48bit address space and embed it into 40bit */
|
||||
@ -13,7 +13,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
return ((nid << 44) ^ paddr) | (nid << node_id_offset);
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
/* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
|
||||
* Loongson-3's 48bit address space and embed it into 40bit */
|
||||
|
@ -5,8 +5,7 @@
|
||||
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
|
||||
*/
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
@ -55,22 +54,34 @@ void *arch_dma_set_uncached(void *addr, size_t size)
|
||||
return (void *)(__pa(addr) + UNCAC_BASE);
|
||||
}
|
||||
|
||||
static inline void dma_sync_virt(void *addr, size_t size,
|
||||
static inline void dma_sync_virt_for_device(void *addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
dma_cache_wback((unsigned long)addr, size);
|
||||
break;
|
||||
|
||||
case DMA_FROM_DEVICE:
|
||||
dma_cache_inv((unsigned long)addr, size);
|
||||
break;
|
||||
|
||||
case DMA_BIDIRECTIONAL:
|
||||
dma_cache_wback_inv((unsigned long)addr, size);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_BIDIRECTIONAL:
|
||||
dma_cache_inv((unsigned long)addr, size);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -82,7 +93,7 @@ static inline void dma_sync_virt(void *addr, size_t size,
|
||||
* configured then the bulk of this loop gets optimized out.
|
||||
*/
|
||||
static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
enum dma_data_direction dir, bool for_device)
|
||||
{
|
||||
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
|
||||
unsigned long offset = paddr & ~PAGE_MASK;
|
||||
@ -90,18 +101,20 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
|
||||
|
||||
do {
|
||||
size_t len = left;
|
||||
void *addr;
|
||||
|
||||
if (PageHighMem(page)) {
|
||||
void *addr;
|
||||
|
||||
if (offset + len > PAGE_SIZE)
|
||||
len = PAGE_SIZE - offset;
|
||||
}
|
||||
|
||||
addr = kmap_atomic(page);
|
||||
if (for_device)
|
||||
dma_sync_virt_for_device(addr + offset, len, dir);
|
||||
else
|
||||
dma_sync_virt_for_cpu(addr + offset, len, dir);
|
||||
kunmap_atomic(addr);
|
||||
|
||||
addr = kmap_atomic(page);
|
||||
dma_sync_virt(addr + offset, len, dir);
|
||||
kunmap_atomic(addr);
|
||||
} else
|
||||
dma_sync_virt(page_address(page) + offset, size, dir);
|
||||
offset = 0;
|
||||
page++;
|
||||
left -= len;
|
||||
@ -111,7 +124,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_phys(paddr, size, dir);
|
||||
dma_sync_phys(paddr, size, dir, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
@ -119,18 +132,10 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (cpu_needs_post_dma_flush())
|
||||
dma_sync_phys(paddr, size, dir);
|
||||
dma_sync_phys(paddr, size, dir, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
dma_sync_virt(vaddr, size, direction);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DMA_PERDEV_COHERENT
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
|
@ -170,12 +170,12 @@ static inline dma_addr_t ar2315_dev_offset(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + ar2315_dev_offset(dev);
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr - ar2315_dev_offset(dev);
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
/*
|
||||
* Common phys<->dma mapping for platforms using pci xtalk bridge
|
||||
*/
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
|
||||
@ -33,7 +33,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
return bc->baddr + paddr;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr & ~(0xffUL << 56);
|
||||
}
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
#define RAM_OFFSET_MASK 0x3fffffffUL
|
||||
|
||||
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK;
|
||||
|
||||
@ -27,7 +27,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK;
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -13,7 +13,7 @@
|
||||
* DMA mapping callbacks...
|
||||
*/
|
||||
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/pagewalk.h>
|
||||
|
||||
#include <asm/cpuinfo.h>
|
||||
|
@ -195,7 +195,6 @@ config PA11
|
||||
depends on PA7000 || PA7100LC || PA7200 || PA7300LC
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select DMA_NONCOHERENT_CACHE_SYNC
|
||||
|
||||
config PREFETCH
|
||||
def_bool y
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <asm/hardware.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pdc.h>
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
|
||||
@ -454,9 +454,3 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
{
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
}
|
||||
|
||||
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
flush_kernel_dcache_range((unsigned long)vaddr, size);
|
||||
}
|
||||
|
@ -2,12 +2,12 @@
|
||||
#ifndef ASM_POWERPC_DMA_DIRECT_H
|
||||
#define ASM_POWERPC_DMA_DIRECT_H 1
|
||||
|
||||
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + dev->archdata.dma_offset;
|
||||
}
|
||||
|
||||
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr - dev->archdata.dma_offset;
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/types.h>
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/machdep.h>
|
||||
|
@ -138,4 +138,6 @@ const struct dma_map_ops dma_iommu_ops = {
|
||||
.get_required_mask = dma_iommu_get_required_mask,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
};
|
||||
|
@ -172,7 +172,6 @@ static unsigned long iommu_range_alloc(struct device *dev,
|
||||
int largealloc = npages > 15;
|
||||
int pass = 0;
|
||||
unsigned long align_mask;
|
||||
unsigned long boundary_size;
|
||||
unsigned long flags;
|
||||
unsigned int pool_nr;
|
||||
struct iommu_pool *pool;
|
||||
@ -236,15 +235,9 @@ static unsigned long iommu_range_alloc(struct device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
if (dev)
|
||||
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
1 << tbl->it_page_shift);
|
||||
else
|
||||
boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
|
||||
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
|
||||
|
||||
n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
|
||||
boundary_size >> tbl->it_page_shift, align_mask);
|
||||
dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
|
||||
align_mask);
|
||||
if (n == -1) {
|
||||
if (likely(pass == 0)) {
|
||||
/* First try the pool from the start */
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/dma.h>
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
@ -696,6 +696,8 @@ static const struct dma_map_ops ps3_sb_dma_ops = {
|
||||
.unmap_page = ps3_unmap_page,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
};
|
||||
|
||||
static const struct dma_map_ops ps3_ioc0_dma_ops = {
|
||||
@ -708,6 +710,8 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = {
|
||||
.unmap_page = ps3_unmap_page,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <linux/console.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/kobject.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
@ -608,6 +608,8 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
|
||||
.get_required_mask = dma_iommu_get_required_mask,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -37,7 +37,7 @@
|
||||
#include <linux/root_dev.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/pfn.h>
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/pci_dma.h>
|
||||
@ -261,13 +261,11 @@ static unsigned long __dma_alloc_iommu(struct device *dev,
|
||||
unsigned long start, int size)
|
||||
{
|
||||
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
|
||||
unsigned long boundary_size;
|
||||
|
||||
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
PAGE_SIZE) >> PAGE_SHIFT;
|
||||
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
|
||||
start, size, zdev->start_dma >> PAGE_SHIFT,
|
||||
boundary_size, 0);
|
||||
dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
|
||||
0);
|
||||
}
|
||||
|
||||
static dma_addr_t dma_alloc_address(struct device *dev, int size)
|
||||
@ -670,6 +668,8 @@ const struct dma_map_ops s390_pci_dma_ops = {
|
||||
.unmap_page = s390_dma_unmap_pages,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
/* dma_supported is unconditionally true without a callback */
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include <cpu/sh7723.h>
|
||||
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/usb/r8a66597.h>
|
||||
#include <linux/usb/renesas_usbhs.h>
|
||||
#include <linux/videodev2.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#include <media/drv-intf/renesas-ceu.h>
|
||||
#include <media/i2c/mt9t112.h>
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/machine.h>
|
||||
#include <linux/i2c.h>
|
||||
@ -33,6 +32,7 @@
|
||||
#include <linux/sh_intc.h>
|
||||
#include <linux/usb/r8a66597.h>
|
||||
#include <linux/videodev2.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#include <mach/kfr2r09.h>
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Copyright (C) 2008 Magnus Damm
|
||||
*/
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/smc91x.h>
|
||||
#include <linux/usb/r8a66597.h>
|
||||
#include <linux/videodev2.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#include <mach-se/mach/se7724.h>
|
||||
#include <media/drv-intf/renesas-ceu.h>
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/sh_clk.h>
|
||||
@ -31,6 +32,8 @@ struct sh7786_pcie_port {
|
||||
static struct sh7786_pcie_port *sh7786_pcie_ports;
|
||||
static unsigned int nr_ports;
|
||||
static unsigned long dma_pfn_offset;
|
||||
size_t memsize;
|
||||
u64 memstart;
|
||||
|
||||
static struct sh7786_pcie_hwops {
|
||||
int (*core_init)(void);
|
||||
@ -301,7 +304,6 @@ static int __init pcie_init(struct sh7786_pcie_port *port)
|
||||
struct pci_channel *chan = port->hose;
|
||||
unsigned int data;
|
||||
phys_addr_t memstart, memend;
|
||||
size_t memsize;
|
||||
int ret, i, win;
|
||||
|
||||
/* Begin initialization */
|
||||
@ -368,8 +370,6 @@ static int __init pcie_init(struct sh7786_pcie_port *port)
|
||||
memstart = ALIGN_DOWN(memstart, memsize);
|
||||
memsize = roundup_pow_of_two(memend - memstart);
|
||||
|
||||
dma_pfn_offset = memstart >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* If there's more than 512MB of memory, we need to roll over to
|
||||
* LAR1/LAMR1.
|
||||
@ -487,7 +487,8 @@ int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
|
||||
|
||||
void pcibios_bus_add_device(struct pci_dev *pdev)
|
||||
{
|
||||
pdev->dev.dma_pfn_offset = dma_pfn_offset;
|
||||
dma_direct_set_offset(&pdev->dev, __pa(memory_start),
|
||||
__pa(memory_start) - memstart, memsize);
|
||||
}
|
||||
|
||||
static int __init sh7786_pcie_core_init(void)
|
||||
|
@ -3,7 +3,7 @@
|
||||
* Copyright (C) 2004 - 2007 Paul Mundt
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/addrspace.h>
|
||||
|
||||
|
@ -166,13 +166,6 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
if (dev)
|
||||
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
1 << iommu->table_shift);
|
||||
else
|
||||
boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
|
||||
|
||||
boundary_size = boundary_size >> iommu->table_shift;
|
||||
/*
|
||||
* if the skip_span_boundary_check had been set during init, we set
|
||||
* things up so that iommu_is_span_boundary() merely checks if the
|
||||
@ -181,6 +174,9 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
|
||||
if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
|
||||
shift = 0;
|
||||
boundary_size = iommu->poolsize * iommu->nr_pools;
|
||||
} else {
|
||||
boundary_size = dma_get_seg_boundary_nr_pages(dev,
|
||||
iommu->table_shift);
|
||||
}
|
||||
n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
|
||||
boundary_size, align_mask);
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#include <linux/bitmap.h>
|
||||
@ -472,8 +472,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
outs->dma_length = 0;
|
||||
|
||||
max_seg_size = dma_get_max_seg_size(dev);
|
||||
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
||||
seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
|
||||
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
unsigned long paddr, npages, entry, out_entry = 0, slen;
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <asm/iommu-common.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
@ -508,8 +509,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
iommu_batch_start(dev, prot, ~0UL);
|
||||
|
||||
max_seg_size = dma_get_max_seg_size(dev);
|
||||
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
||||
seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
|
||||
|
||||
mask = *dev->dma_mask;
|
||||
if (!iommu_use_atu(iommu, mask))
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
|
@ -8,10 +8,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
|
||||
extern int iommu_merge;
|
||||
extern int panic_on_overflow;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/iommu.h>
|
||||
@ -96,8 +97,7 @@ static unsigned long alloc_iommu(struct device *dev, int size,
|
||||
|
||||
base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
|
||||
PAGE_SIZE) >> PAGE_SHIFT;
|
||||
boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
|
||||
PAGE_SIZE) >> PAGE_SHIFT;
|
||||
boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
|
||||
|
||||
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
||||
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
|
||||
@ -468,7 +468,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs);
|
||||
vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
|
||||
if (!vaddr ||
|
||||
!force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
|
||||
return vaddr;
|
||||
@ -480,7 +480,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
|
||||
goto out_free;
|
||||
return vaddr;
|
||||
out_free:
|
||||
dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs);
|
||||
dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -490,7 +490,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
|
||||
dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
|
||||
dma_direct_free(dev, size, vaddr, dma_addr, attrs);
|
||||
}
|
||||
|
||||
static int no_agp;
|
||||
@ -678,6 +678,8 @@ static const struct dma_map_ops gart_dma_ops = {
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
.dma_supported = dma_direct_supported,
|
||||
.get_required_mask = dma_direct_get_required_mask,
|
||||
.alloc_pages = dma_direct_alloc_pages,
|
||||
.free_pages = dma_direct_free_pages,
|
||||
};
|
||||
|
||||
static void gart_iommu_shutdown(void)
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/dmar.h>
|
||||
#include <linux/export.h>
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
#include <linux/console.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/init_ohci1394_dma.h>
|
||||
@ -20,6 +21,7 @@
|
||||
#include <linux/tboot.h>
|
||||
#include <linux/usb/xhci-dbgp.h>
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#include <uapi/linux/mount.h>
|
||||
|
||||
|
@ -132,7 +132,7 @@ static void sta2x11_map_ep(struct pci_dev *pdev)
|
||||
struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
|
||||
struct device *dev = &pdev->dev;
|
||||
u32 amba_base, max_amba_addr;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
if (!instance)
|
||||
return;
|
||||
@ -140,7 +140,9 @@ static void sta2x11_map_ep(struct pci_dev *pdev)
|
||||
pci_read_config_dword(pdev, AHB_BASE(0), &amba_base);
|
||||
max_amba_addr = amba_base + STA2X11_AMBA_SIZE - 1;
|
||||
|
||||
dev->dma_pfn_offset = PFN_DOWN(-amba_base);
|
||||
ret = dma_direct_set_offset(dev, 0, amba_base, STA2X11_AMBA_SIZE);
|
||||
if (ret)
|
||||
dev_err(dev, "sta2x11: could not set DMA offset\n");
|
||||
|
||||
dev->bus_dma_limit = max_amba_addr;
|
||||
pci_set_consistent_dma_mask(pdev, max_amba_addr);
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
/* Glue code to lib/swiotlb-xen.c */
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/pci.h>
|
||||
#include <xen/swiotlb-xen.h>
|
||||
|
||||
|
@ -11,8 +11,7 @@
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
*/
|
||||
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/highmem.h>
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#define IORT_TYPE_MASK(type) (1 << (type))
|
||||
#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
|
||||
@ -1178,8 +1179,9 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
|
||||
*dma_addr = dmaaddr;
|
||||
*dma_size = size;
|
||||
|
||||
dev->dma_pfn_offset = PFN_DOWN(offset);
|
||||
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
|
||||
ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
|
||||
|
||||
dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
|
||||
}
|
||||
|
||||
static void __init acpi_iort_register_irq(int hwirq, const char *name,
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/nls.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/platform_data/x86/apple.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
|
@ -1798,6 +1798,8 @@ static void device_release(struct kobject *kobj)
|
||||
*/
|
||||
devres_release_all(dev);
|
||||
|
||||
kfree(dev->dma_range_map);
|
||||
|
||||
if (dev->release)
|
||||
dev->release(dev);
|
||||
else if (dev->type && dev->type->release)
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-heap.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/highmem.h>
|
||||
|
@ -674,17 +674,16 @@ static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
|
||||
|
||||
static void ar_context_release(struct ar_context *ctx)
|
||||
{
|
||||
struct device *dev = ctx->ohci->card.device;
|
||||
unsigned int i;
|
||||
|
||||
vunmap(ctx->buffer);
|
||||
|
||||
for (i = 0; i < AR_BUFFERS; i++)
|
||||
if (ctx->pages[i]) {
|
||||
dma_unmap_page(ctx->ohci->card.device,
|
||||
ar_buffer_bus(ctx, i),
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
__free_page(ctx->pages[i]);
|
||||
}
|
||||
for (i = 0; i < AR_BUFFERS; i++) {
|
||||
if (ctx->pages[i])
|
||||
dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
|
||||
ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
|
||||
@ -970,6 +969,7 @@ static void ar_context_tasklet(unsigned long data)
|
||||
static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
|
||||
unsigned int descriptors_offset, u32 regs)
|
||||
{
|
||||
struct device *dev = ohci->card.device;
|
||||
unsigned int i;
|
||||
dma_addr_t dma_addr;
|
||||
struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
|
||||
@ -980,17 +980,13 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
|
||||
tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
|
||||
|
||||
for (i = 0; i < AR_BUFFERS; i++) {
|
||||
ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
|
||||
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
|
||||
DMA_FROM_DEVICE, GFP_KERNEL);
|
||||
if (!ctx->pages[i])
|
||||
goto out_of_memory;
|
||||
dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
|
||||
0, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ohci->card.device, dma_addr)) {
|
||||
__free_page(ctx->pages[i]);
|
||||
ctx->pages[i] = NULL;
|
||||
goto out_of_memory;
|
||||
}
|
||||
set_page_private(ctx->pages[i], dma_addr);
|
||||
dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
for (i = 0; i < AR_BUFFERS; i++)
|
||||
|
@ -5,7 +5,7 @@
|
||||
// Author: Andrzej Hajda <a.hajda@samsung.com>
|
||||
|
||||
#include <linux/dma-iommu.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
|
@ -42,8 +42,6 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
|
||||
if (exynos_gem->flags & EXYNOS_BO_WC ||
|
||||
!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
|
||||
attr |= DMA_ATTR_WRITE_COMBINE;
|
||||
else
|
||||
attr |= DMA_ATTR_NON_CONSISTENT;
|
||||
|
||||
/* FBDev emulation requires kernel mapping */
|
||||
if (!kvmap)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user