mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-04 04:04:19 +00:00
zsmalloc: replace kmap_atomic with kmap_local_page
The use of kmap_atomic/kunmap_atomic is deprecated. Replace it will kmap_local_page/kunmap_local all over the place. Also fix SPDX missing license header. WARNING: Missing or malformed SPDX-License-Identifier tag in line 1 WARNING: Deprecated use of 'kmap_atomic', prefer 'kmap_local_page' instead + vaddr = kmap_atomic(page); Link: https://lkml.kernel.org/r/20241001175358.12970-1-quic_pintu@quicinc.com Signed-off-by: Pintu Kumar <quic_pintu@quicinc.com> Cc: Joe Perches <joe@perches.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Pintu Agarwal <pintu.ping@gmail.com> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b7fc16a16b
commit
91d0ec8347
@ -1,3 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
/*
|
||||
* zsmalloc memory allocator
|
||||
*
|
||||
@ -898,7 +900,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
|
||||
|
||||
set_first_obj_offset(page, off);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
vaddr = kmap_local_page(page);
|
||||
link = (struct link_free *)vaddr + off / sizeof(*link);
|
||||
|
||||
while ((off += class->size) < PAGE_SIZE) {
|
||||
@ -921,7 +923,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
|
||||
*/
|
||||
link->next = -1UL << OBJ_TAG_BITS;
|
||||
}
|
||||
kunmap_atomic(vaddr);
|
||||
kunmap_local(vaddr);
|
||||
page = next_page;
|
||||
off %= PAGE_SIZE;
|
||||
}
|
||||
@ -1059,12 +1061,12 @@ static void *__zs_map_object(struct mapping_area *area,
|
||||
sizes[1] = size - sizes[0];
|
||||
|
||||
/* copy object to per-cpu buffer */
|
||||
addr = kmap_atomic(pages[0]);
|
||||
addr = kmap_local_page(pages[0]);
|
||||
memcpy(buf, addr + off, sizes[0]);
|
||||
kunmap_atomic(addr);
|
||||
addr = kmap_atomic(pages[1]);
|
||||
kunmap_local(addr);
|
||||
addr = kmap_local_page(pages[1]);
|
||||
memcpy(buf + sizes[0], addr, sizes[1]);
|
||||
kunmap_atomic(addr);
|
||||
kunmap_local(addr);
|
||||
out:
|
||||
return area->vm_buf;
|
||||
}
|
||||
@ -1089,12 +1091,12 @@ static void __zs_unmap_object(struct mapping_area *area,
|
||||
sizes[1] = size - sizes[0];
|
||||
|
||||
/* copy per-cpu buffer to object */
|
||||
addr = kmap_atomic(pages[0]);
|
||||
addr = kmap_local_page(pages[0]);
|
||||
memcpy(addr + off, buf, sizes[0]);
|
||||
kunmap_atomic(addr);
|
||||
addr = kmap_atomic(pages[1]);
|
||||
kunmap_local(addr);
|
||||
addr = kmap_local_page(pages[1]);
|
||||
memcpy(addr, buf + sizes[0], sizes[1]);
|
||||
kunmap_atomic(addr);
|
||||
kunmap_local(addr);
|
||||
|
||||
out:
|
||||
/* enable page faults to match kunmap_atomic() return conditions */
|
||||
@ -1223,7 +1225,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
||||
area->vm_mm = mm;
|
||||
if (off + class->size <= PAGE_SIZE) {
|
||||
/* this object is contained entirely within a page */
|
||||
area->vm_addr = kmap_atomic(page);
|
||||
area->vm_addr = kmap_local_page(page);
|
||||
ret = area->vm_addr + off;
|
||||
goto out;
|
||||
}
|
||||
@ -1260,7 +1262,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
|
||||
|
||||
area = this_cpu_ptr(&zs_map_area);
|
||||
if (off + class->size <= PAGE_SIZE)
|
||||
kunmap_atomic(area->vm_addr);
|
||||
kunmap_local(area->vm_addr);
|
||||
else {
|
||||
struct page *pages[2];
|
||||
|
||||
@ -1318,7 +1320,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
|
||||
for (i = 0; i < nr_page; i++)
|
||||
m_page = get_next_page(m_page);
|
||||
|
||||
vaddr = kmap_atomic(m_page);
|
||||
vaddr = kmap_local_page(m_page);
|
||||
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
|
||||
set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
|
||||
if (likely(!ZsHugePage(zspage)))
|
||||
@ -1328,7 +1330,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
|
||||
/* record handle to page->index */
|
||||
zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
kunmap_local(vaddr);
|
||||
mod_zspage_inuse(zspage, 1);
|
||||
|
||||
obj = location_to_obj(m_page, obj);
|
||||
@ -1419,7 +1421,7 @@ static void obj_free(int class_size, unsigned long obj)
|
||||
f_offset = offset_in_page(class_size * f_objidx);
|
||||
zspage = get_zspage(f_page);
|
||||
|
||||
vaddr = kmap_atomic(f_page);
|
||||
vaddr = kmap_local_page(f_page);
|
||||
link = (struct link_free *)(vaddr + f_offset);
|
||||
|
||||
/* Insert this object in containing zspage's freelist */
|
||||
@ -1429,7 +1431,7 @@ static void obj_free(int class_size, unsigned long obj)
|
||||
f_page->index = 0;
|
||||
set_freeobj(zspage, f_objidx);
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
kunmap_local(vaddr);
|
||||
mod_zspage_inuse(zspage, -1);
|
||||
}
|
||||
|
||||
@ -1492,8 +1494,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
|
||||
if (d_off + class->size > PAGE_SIZE)
|
||||
d_size = PAGE_SIZE - d_off;
|
||||
|
||||
s_addr = kmap_atomic(s_page);
|
||||
d_addr = kmap_atomic(d_page);
|
||||
s_addr = kmap_local_page(s_page);
|
||||
d_addr = kmap_local_page(d_page);
|
||||
|
||||
while (1) {
|
||||
size = min(s_size, d_size);
|
||||
@ -1516,26 +1518,26 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
|
||||
* Documentation/mm/highmem.rst.
|
||||
*/
|
||||
if (s_off >= PAGE_SIZE) {
|
||||
kunmap_atomic(d_addr);
|
||||
kunmap_atomic(s_addr);
|
||||
kunmap_local(d_addr);
|
||||
kunmap_local(s_addr);
|
||||
s_page = get_next_page(s_page);
|
||||
s_addr = kmap_atomic(s_page);
|
||||
d_addr = kmap_atomic(d_page);
|
||||
s_addr = kmap_local_page(s_page);
|
||||
d_addr = kmap_local_page(d_page);
|
||||
s_size = class->size - written;
|
||||
s_off = 0;
|
||||
}
|
||||
|
||||
if (d_off >= PAGE_SIZE) {
|
||||
kunmap_atomic(d_addr);
|
||||
kunmap_local(d_addr);
|
||||
d_page = get_next_page(d_page);
|
||||
d_addr = kmap_atomic(d_page);
|
||||
d_addr = kmap_local_page(d_page);
|
||||
d_size = class->size - written;
|
||||
d_off = 0;
|
||||
}
|
||||
}
|
||||
|
||||
kunmap_atomic(d_addr);
|
||||
kunmap_atomic(s_addr);
|
||||
kunmap_local(d_addr);
|
||||
kunmap_local(s_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1548,7 +1550,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
|
||||
unsigned int offset;
|
||||
int index = *obj_idx;
|
||||
unsigned long handle = 0;
|
||||
void *addr = kmap_atomic(page);
|
||||
void *addr = kmap_local_page(page);
|
||||
|
||||
offset = get_first_obj_offset(page);
|
||||
offset += class->size * index;
|
||||
@ -1561,7 +1563,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
|
||||
index++;
|
||||
}
|
||||
|
||||
kunmap_atomic(addr);
|
||||
kunmap_local(addr);
|
||||
|
||||
*obj_idx = index;
|
||||
|
||||
@ -1798,14 +1800,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
migrate_write_lock(zspage);
|
||||
|
||||
offset = get_first_obj_offset(page);
|
||||
s_addr = kmap_atomic(page);
|
||||
s_addr = kmap_local_page(page);
|
||||
|
||||
/*
|
||||
* Here, any user cannot access all objects in the zspage so let's move.
|
||||
*/
|
||||
d_addr = kmap_atomic(newpage);
|
||||
d_addr = kmap_local_page(newpage);
|
||||
copy_page(d_addr, s_addr);
|
||||
kunmap_atomic(d_addr);
|
||||
kunmap_local(d_addr);
|
||||
|
||||
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
|
||||
addr += class->size) {
|
||||
@ -1818,7 +1820,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
record_obj(handle, new_obj);
|
||||
}
|
||||
}
|
||||
kunmap_atomic(s_addr);
|
||||
kunmap_local(s_addr);
|
||||
|
||||
replace_sub_page(class, zspage, newpage, page);
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user