gfs2: convert gfs2_write_jdata_page() to gfs2_write_jdata_folio()

Add support for large folios and remove some accesses to page->mapping and
page->index.

Link: https://lkml.kernel.org/r/20230612210141.730128-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: Bob Peterson <rpeterso@redhat.com>
Reviewed-by: Bob Peterson <rpeterso@redhat.com>
Reviewed-by: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-06-12 22:01:30 +01:00 committed by Andrew Morton
parent d0cfcaee0a
commit c1401fd18f

View File

@ -82,33 +82,33 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
}
/**
* gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
* @page: The page to write
* gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page
* @folio: The folio to write
* @wbc: The writeback control
*
* This is the same as calling block_write_full_page, but it also
* writes pages outside of i_size
*/
static int gfs2_write_jdata_page(struct page *page,
static int gfs2_write_jdata_folio(struct folio *folio,
struct writeback_control *wbc)
{
struct inode * const inode = page->mapping->host;
struct inode * const inode = folio->mapping->host;
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_SHIFT;
unsigned offset;
/*
* The page straddles i_size. It must be zeroed out on each and every
* The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
offset = i_size & (PAGE_SIZE - 1);
if (page->index == end_index && offset)
zero_user_segment(page, offset, PAGE_SIZE);
if (folio_pos(folio) < i_size &&
i_size < folio_pos(folio) + folio_size(folio))
folio_zero_segment(folio, offset_in_folio(folio, i_size),
folio_size(folio));
return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
return __block_write_full_page(inode, &folio->page,
gfs2_get_block_noalloc, wbc,
end_buffer_async_write);
}
@ -137,7 +137,7 @@ static int __gfs2_jdata_write_folio(struct folio *folio,
}
gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
}
return gfs2_write_jdata_page(&folio->page, wbc);
return gfs2_write_jdata_folio(folio, wbc);
}
/**