mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
shmem: convert shmem_replace_page() to use folios throughout
Introduce folio_set_swap_entry() to abstract how both folio->private and swp_entry_t work. Use swap_address_space() directly instead of indirecting through folio_mapping(). Include an assertion that the old folio is not large as we only allocate a single-page folio to replace it. Use folio_put_refs() instead of calling folio_put() twice. Link: https://lkml.kernel.org/r/20220902194653.1739778-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4cd400fd1f
commit
907ea17eb2
@ -355,6 +355,11 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio)
|
||||
return entry;
|
||||
}
|
||||
|
||||
static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
|
||||
{
|
||||
folio->private = (void *)entry.val;
|
||||
}
|
||||
|
||||
/* linux/mm/workingset.c */
|
||||
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
|
||||
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
|
||||
|
67
mm/shmem.c
67
mm/shmem.c
@ -1560,12 +1560,6 @@ static struct folio *shmem_alloc_folio(gfp_t gfp,
|
||||
return folio;
|
||||
}
|
||||
|
||||
static struct page *shmem_alloc_page(gfp_t gfp,
|
||||
struct shmem_inode_info *info, pgoff_t index)
|
||||
{
|
||||
return &shmem_alloc_folio(gfp, info, index)->page;
|
||||
}
|
||||
|
||||
static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
|
||||
pgoff_t index, bool huge)
|
||||
{
|
||||
@ -1617,51 +1611,49 @@ static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
|
||||
static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
||||
struct shmem_inode_info *info, pgoff_t index)
|
||||
{
|
||||
struct page *oldpage, *newpage;
|
||||
struct folio *old, *new;
|
||||
struct address_space *swap_mapping;
|
||||
swp_entry_t entry;
|
||||
pgoff_t swap_index;
|
||||
int error;
|
||||
|
||||
oldpage = *pagep;
|
||||
entry.val = page_private(oldpage);
|
||||
old = page_folio(*pagep);
|
||||
entry = folio_swap_entry(old);
|
||||
swap_index = swp_offset(entry);
|
||||
swap_mapping = page_mapping(oldpage);
|
||||
swap_mapping = swap_address_space(entry);
|
||||
|
||||
/*
|
||||
* We have arrived here because our zones are constrained, so don't
|
||||
* limit chance of success by further cpuset and node constraints.
|
||||
*/
|
||||
gfp &= ~GFP_CONSTRAINT_MASK;
|
||||
newpage = shmem_alloc_page(gfp, info, index);
|
||||
if (!newpage)
|
||||
VM_BUG_ON_FOLIO(folio_test_large(old), old);
|
||||
new = shmem_alloc_folio(gfp, info, index);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
get_page(newpage);
|
||||
copy_highpage(newpage, oldpage);
|
||||
flush_dcache_page(newpage);
|
||||
folio_get(new);
|
||||
folio_copy(new, old);
|
||||
flush_dcache_folio(new);
|
||||
|
||||
__SetPageLocked(newpage);
|
||||
__SetPageSwapBacked(newpage);
|
||||
SetPageUptodate(newpage);
|
||||
set_page_private(newpage, entry.val);
|
||||
SetPageSwapCache(newpage);
|
||||
__folio_set_locked(new);
|
||||
__folio_set_swapbacked(new);
|
||||
folio_mark_uptodate(new);
|
||||
folio_set_swap_entry(new, entry);
|
||||
folio_set_swapcache(new);
|
||||
|
||||
/*
|
||||
* Our caller will very soon move newpage out of swapcache, but it's
|
||||
* a nice clean interface for us to replace oldpage by newpage there.
|
||||
*/
|
||||
xa_lock_irq(&swap_mapping->i_pages);
|
||||
error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
|
||||
error = shmem_replace_entry(swap_mapping, swap_index, old, new);
|
||||
if (!error) {
|
||||
old = page_folio(oldpage);
|
||||
new = page_folio(newpage);
|
||||
mem_cgroup_migrate(old, new);
|
||||
__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
|
||||
__inc_lruvec_page_state(newpage, NR_SHMEM);
|
||||
__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
|
||||
__dec_lruvec_page_state(oldpage, NR_SHMEM);
|
||||
__lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
|
||||
__lruvec_stat_mod_folio(new, NR_SHMEM, 1);
|
||||
__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
|
||||
__lruvec_stat_mod_folio(old, NR_SHMEM, -1);
|
||||
}
|
||||
xa_unlock_irq(&swap_mapping->i_pages);
|
||||
|
||||
@ -1671,18 +1663,17 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
||||
* both PageSwapCache and page_private after getting page lock;
|
||||
* but be defensive. Reverse old to newpage for clear and free.
|
||||
*/
|
||||
oldpage = newpage;
|
||||
old = new;
|
||||
} else {
|
||||
lru_cache_add(newpage);
|
||||
*pagep = newpage;
|
||||
folio_add_lru(new);
|
||||
*pagep = &new->page;
|
||||
}
|
||||
|
||||
ClearPageSwapCache(oldpage);
|
||||
set_page_private(oldpage, 0);
|
||||
folio_clear_swapcache(old);
|
||||
old->private = NULL;
|
||||
|
||||
unlock_page(oldpage);
|
||||
put_page(oldpage);
|
||||
put_page(oldpage);
|
||||
folio_unlock(old);
|
||||
folio_put_refs(old, 2);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -2383,6 +2374,12 @@ static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
static struct page *shmem_alloc_page(gfp_t gfp,
|
||||
struct shmem_inode_info *info, pgoff_t index)
|
||||
{
|
||||
return &shmem_alloc_folio(gfp, info, index)->page;
|
||||
}
|
||||
|
||||
int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
|
Loading…
Reference in New Issue
Block a user