mm: convert page_to_pgoff() to page_pgoff()

Patch series "page->index removals in mm", v2.

As part of shrinking struct page, we need to stop using page->index.  This
patchset gets rid of most of the remaining references to page->index in
mm, as well as increasing the number of functions which take a const
folio/page pointer.  It shrinks the text segment of mm by a few hundred
bytes in my test config, probably mostly from removing calls to
compound_head() in page_to_pgoff().


This patch (of 7):

Change the function signature to pass in the folio as all three callers
have it.  This removes a reference to page->index, which we're trying to
get rid of.  And add kernel-doc.

Link: https://lkml.kernel.org/r/20241005200121.3231142-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20241005200121.3231142-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-10-05 21:01:12 +01:00 committed by Andrew Morton
parent e664c2cd98
commit f7470591f8
4 changed files with 21 additions and 18 deletions

View File

@ -1895,7 +1895,7 @@ static inline unsigned long page_to_section(const struct page *page)
* *
* Return: The Page Frame Number of the first page in the folio. * Return: The Page Frame Number of the first page in the folio.
*/ */
static inline unsigned long folio_pfn(struct folio *folio) static inline unsigned long folio_pfn(const struct folio *folio)
{ {
return page_to_pfn(&folio->page); return page_to_pfn(&folio->page);
} }

View File

@ -1011,22 +1011,25 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping,
return read_cache_folio(mapping, index, NULL, file); return read_cache_folio(mapping, index, NULL, file);
} }
/* /**
* Get the offset in PAGE_SIZE (even for hugetlb pages). * page_pgoff - Calculate the logical page offset of this page.
* @folio: The folio containing this page.
* @page: The page which we need the offset of.
*
* For file pages, this is the offset from the beginning of the file
* in units of PAGE_SIZE. For anonymous pages, this is the offset from
* the beginning of the anon_vma in units of PAGE_SIZE. This will
* return nonsense for KSM pages.
*
* Context: Caller must have a reference on the folio or otherwise
* prevent it from being split or freed.
*
* Return: The offset in units of PAGE_SIZE.
*/ */
static inline pgoff_t page_to_pgoff(struct page *page) static inline pgoff_t page_pgoff(const struct folio *folio,
const struct page *page)
{ {
struct page *head; return folio->index + folio_page_idx(folio, page);
if (likely(!PageTransTail(page)))
return page->index;
head = compound_head(page);
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
return head->index + page - head;
} }
/* /*

View File

@ -617,7 +617,7 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
if (av == NULL) /* Not actually mapped anymore */ if (av == NULL) /* Not actually mapped anymore */
return; return;
pgoff = page_to_pgoff(page); pgoff = page_pgoff(folio, page);
rcu_read_lock(); rcu_read_lock();
for_each_process(tsk) { for_each_process(tsk) {
struct vm_area_struct *vma; struct vm_area_struct *vma;
@ -653,7 +653,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
i_mmap_lock_read(mapping); i_mmap_lock_read(mapping);
rcu_read_lock(); rcu_read_lock();
pgoff = page_to_pgoff(page); pgoff = page_pgoff(folio, page);
for_each_process(tsk) { for_each_process(tsk) {
struct task_struct *t = task_early_kill(tsk, force_early); struct task_struct *t = task_early_kill(tsk, force_early);
unsigned long addr; unsigned long addr;

View File

@ -1276,7 +1276,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
*/ */
VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
folio); folio);
VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address),
page); page);
} }