mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
mm/khugepaged: convert is_refcount_suitable() to use folios
Both callers of is_refcount_suitable() have been converted to use folios, so convert it to take in a folio. Both callers only operate on head pages of folios so mapcount/refcount conversions here are trivial. Removes 3 calls to compound head, and removes 315 bytes of kernel text. Link: https://lkml.kernel.org/r/20231020183331.10770-4-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Yang Shi <shy828301@gmail.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5c07ebb372
commit
dbf85c21e4
@ -524,15 +524,15 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_refcount_suitable(struct page *page)
|
||||
static bool is_refcount_suitable(struct folio *folio)
|
||||
{
|
||||
int expected_refcount;
|
||||
|
||||
expected_refcount = total_mapcount(page);
|
||||
if (PageSwapCache(page))
|
||||
expected_refcount += compound_nr(page);
|
||||
expected_refcount = folio_mapcount(folio);
|
||||
if (folio_test_swapcache(folio))
|
||||
expected_refcount += folio_nr_pages(folio);
|
||||
|
||||
return page_count(page) == expected_refcount;
|
||||
return folio_ref_count(folio) == expected_refcount;
|
||||
}
|
||||
|
||||
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
|
||||
@ -625,7 +625,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
|
||||
* but not from this process. The other process cannot write to
|
||||
* the page, only trigger CoW.
|
||||
*/
|
||||
if (!is_refcount_suitable(&folio->page)) {
|
||||
if (!is_refcount_suitable(folio)) {
|
||||
folio_unlock(folio);
|
||||
result = SCAN_PAGE_COUNT;
|
||||
goto out;
|
||||
@ -1371,7 +1371,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
|
||||
* has excessive GUP pins (i.e. 512). Anyway the same check
|
||||
* will be done again later the risk seems low.
|
||||
*/
|
||||
if (!is_refcount_suitable(&folio->page)) {
|
||||
if (!is_refcount_suitable(folio)) {
|
||||
result = SCAN_PAGE_COUNT;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user