mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-08 15:04:45 +00:00
add devm_memremap_pages
This behaves like devm_memremap except that it ensures we have page structures available that can back the region. Signed-off-by: Christoph Hellwig <hch@lst.de> [djbw: catch attempts to remap RAM, drop flags] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
033fbae988
commit
41e94a8513
@ -20,10 +20,13 @@
|
|||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/bug.h>
|
||||||
|
#include <linux/err.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
struct device;
|
struct device;
|
||||||
|
struct resource;
|
||||||
|
|
||||||
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
|
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
|
||||||
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
|
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
|
||||||
@ -84,6 +87,23 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
|
|||||||
size_t size, unsigned long flags);
|
size_t size, unsigned long flags);
|
||||||
void devm_memunmap(struct device *dev, void *addr);
|
void devm_memunmap(struct device *dev, void *addr);
|
||||||
|
|
||||||
|
void *__devm_memremap_pages(struct device *dev, struct resource *res);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ZONE_DEVICE
|
||||||
|
void *devm_memremap_pages(struct device *dev, struct resource *res);
|
||||||
|
#else
|
||||||
|
static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Fail attempts to call devm_memremap_pages() without
|
||||||
|
* ZONE_DEVICE support enabled, this requires callers to fall
|
||||||
|
* back to plain devm_memremap() based on config
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return ERR_PTR(-ENXIO);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some systems do not have legacy ISA devices.
|
* Some systems do not have legacy ISA devices.
|
||||||
* /dev/port is not a valid interface on these systems.
|
* /dev/port is not a valid interface on these systems.
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/memory_hotplug.h>
|
||||||
|
|
||||||
#ifndef ioremap_cache
|
#ifndef ioremap_cache
|
||||||
/* temporary while we convert existing ioremap_cache users to memremap */
|
/* temporary while we convert existing ioremap_cache users to memremap */
|
||||||
@ -135,3 +136,55 @@ void devm_memunmap(struct device *dev, void *addr)
|
|||||||
memunmap(addr);
|
memunmap(addr);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(devm_memunmap);
|
EXPORT_SYMBOL(devm_memunmap);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ZONE_DEVICE
|
||||||
|
struct page_map {
|
||||||
|
struct resource res;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void devm_memremap_pages_release(struct device *dev, void *res)
|
||||||
|
{
|
||||||
|
struct page_map *page_map = res;
|
||||||
|
|
||||||
|
/* pages are dead and unused, undo the arch mapping */
|
||||||
|
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
|
||||||
|
}
|
||||||
|
|
||||||
|
void *devm_memremap_pages(struct device *dev, struct resource *res)
|
||||||
|
{
|
||||||
|
int is_ram = region_intersects(res->start, resource_size(res),
|
||||||
|
"System RAM");
|
||||||
|
struct page_map *page_map;
|
||||||
|
int error, nid;
|
||||||
|
|
||||||
|
if (is_ram == REGION_MIXED) {
|
||||||
|
WARN_ONCE(1, "%s attempted on mixed region %pr\n",
|
||||||
|
__func__, res);
|
||||||
|
return ERR_PTR(-ENXIO);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_ram == REGION_INTERSECTS)
|
||||||
|
return __va(res->start);
|
||||||
|
|
||||||
|
page_map = devres_alloc(devm_memremap_pages_release,
|
||||||
|
sizeof(*page_map), GFP_KERNEL);
|
||||||
|
if (!page_map)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
memcpy(&page_map->res, res, sizeof(*res));
|
||||||
|
|
||||||
|
nid = dev_to_node(dev);
|
||||||
|
if (nid < 0)
|
||||||
|
nid = 0;
|
||||||
|
|
||||||
|
error = arch_add_memory(nid, res->start, resource_size(res), true);
|
||||||
|
if (error) {
|
||||||
|
devres_free(page_map);
|
||||||
|
return ERR_PTR(error);
|
||||||
|
}
|
||||||
|
|
||||||
|
devres_add(dev, page_map);
|
||||||
|
return __va(res->start);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(devm_memremap_pages);
|
||||||
|
#endif /* CONFIG_ZONE_DEVICE */
|
||||||
|
Loading…
Reference in New Issue
Block a user