mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
mm/migrate: Add filemap_migrate_folio()
There is nothing iomap-specific about iomap_migratepage(), and it fits a pattern used by several other filesystems, so move it to mm/migrate.c, convert it to be filemap_migrate_folio() and convert the iomap filesystems to use it. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
parent
541846502f
commit
2ec810d596
@ -774,7 +774,7 @@ static const struct address_space_operations gfs2_aops = {
|
||||
.invalidate_folio = iomap_invalidate_folio,
|
||||
.bmap = gfs2_bmap,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.migratepage = iomap_migrate_page,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
};
|
||||
|
@ -489,31 +489,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
int
|
||||
iomap_migrate_page(struct address_space *mapping, struct page *newpage,
|
||||
struct page *page, enum migrate_mode mode)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct folio *newfolio = page_folio(newpage);
|
||||
int ret;
|
||||
|
||||
ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
|
||||
if (ret != MIGRATEPAGE_SUCCESS)
|
||||
return ret;
|
||||
|
||||
if (folio_test_private(folio))
|
||||
folio_attach_private(newfolio, folio_detach_private(folio));
|
||||
|
||||
if (mode != MIGRATE_SYNC_NO_COPY)
|
||||
folio_migrate_copy(newfolio, folio);
|
||||
else
|
||||
folio_migrate_flags(newfolio, folio);
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_migrate_page);
|
||||
#endif /* CONFIG_MIGRATION */
|
||||
|
||||
static void
|
||||
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
|
||||
{
|
||||
|
@ -570,7 +570,7 @@ const struct address_space_operations xfs_address_space_operations = {
|
||||
.invalidate_folio = iomap_invalidate_folio,
|
||||
.bmap = xfs_vm_bmap,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.migratepage = iomap_migrate_page,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.swap_activate = xfs_iomap_swapfile_activate,
|
||||
|
@ -271,7 +271,7 @@ static const struct address_space_operations zonefs_file_aops = {
|
||||
.dirty_folio = filemap_dirty_folio,
|
||||
.release_folio = iomap_release_folio,
|
||||
.invalidate_folio = iomap_invalidate_folio,
|
||||
.migratepage = iomap_migrate_page,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
.direct_IO = noop_direct_IO,
|
||||
|
@ -231,12 +231,6 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
|
||||
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
|
||||
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
|
||||
#ifdef CONFIG_MIGRATION
|
||||
int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
|
||||
struct page *page, enum migrate_mode mode);
|
||||
#else
|
||||
#define iomap_migrate_page NULL
|
||||
#endif
|
||||
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||
const struct iomap_ops *ops);
|
||||
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
|
||||
|
@ -1078,6 +1078,12 @@ static inline int __must_check write_one_page(struct page *page)
|
||||
int __set_page_dirty_nobuffers(struct page *page);
|
||||
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
|
||||
struct folio *src, enum migrate_mode mode);
|
||||
#else
|
||||
#define filemap_migrate_folio NULL
|
||||
#endif
|
||||
void page_endio(struct page *page, bool is_write, int err);
|
||||
|
||||
void folio_end_private_2(struct folio *folio);
|
||||
|
20
mm/migrate.c
20
mm/migrate.c
@ -784,6 +784,26 @@ int buffer_migrate_folio_norefs(struct address_space *mapping,
|
||||
}
|
||||
#endif
|
||||
|
||||
int filemap_migrate_folio(struct address_space *mapping,
|
||||
struct folio *dst, struct folio *src, enum migrate_mode mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = folio_migrate_mapping(mapping, dst, src, 0);
|
||||
if (ret != MIGRATEPAGE_SUCCESS)
|
||||
return ret;
|
||||
|
||||
if (folio_get_private(src))
|
||||
folio_attach_private(dst, folio_detach_private(src));
|
||||
|
||||
if (mode != MIGRATE_SYNC_NO_COPY)
|
||||
folio_migrate_copy(dst, src);
|
||||
else
|
||||
folio_migrate_flags(dst, src);
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(filemap_migrate_folio);
|
||||
|
||||
/*
|
||||
* Writeback a folio to clean the dirty state
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user