mm: compaction: have compaction_suitable() return bool

Since it only returns COMPACT_CONTINUE or COMPACT_SKIPPED now, a bool
return value simplifies the callsites.

Link: https://lkml.kernel.org/r/20230602151204.GD161817@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2023-06-02 11:12:04 -04:00 committed by Andrew Morton
parent 1c9568e806
commit 3cf0493752
3 changed files with 36 additions and 40 deletions

View File

@ -89,7 +89,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
const struct alloc_context *ac, enum compact_priority prio,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
extern bool compaction_suitable(struct zone *zone, int order,
int highest_zoneidx);
extern void compaction_defer_reset(struct zone *zone, int order,
@ -107,10 +107,10 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
static inline enum compact_result compaction_suitable(struct zone *zone, int order,
static inline bool compaction_suitable(struct zone *zone, int order,
int highest_zoneidx)
{
return COMPACT_SKIPPED;
return false;
}
static inline void kcompactd_run(int nid)

View File

@ -2193,9 +2193,9 @@ static enum compact_result compact_finished(struct compact_control *cc)
return ret;
}
static enum compact_result __compaction_suitable(struct zone *zone, int order,
int highest_zoneidx,
unsigned long wmark_target)
static bool __compaction_suitable(struct zone *zone, int order,
int highest_zoneidx,
unsigned long wmark_target)
{
unsigned long watermark;
/*
@ -2215,27 +2215,20 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
low_wmark_pages(zone) : min_wmark_pages(zone);
watermark += compact_gap(order);
if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
ALLOC_CMA, wmark_target))
return COMPACT_SKIPPED;
return COMPACT_CONTINUE;
return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
ALLOC_CMA, wmark_target);
}
/*
* compaction_suitable: Is this suitable to run compaction on this zone now?
* Returns
* COMPACT_SKIPPED - If there are too few free pages for compaction
* COMPACT_CONTINUE - If compaction should run now
*/
enum compact_result compaction_suitable(struct zone *zone, int order,
int highest_zoneidx)
bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
{
enum compact_result ret;
int fragindex;
enum compact_result compact_result;
bool suitable;
ret = __compaction_suitable(zone, order, highest_zoneidx,
zone_page_state(zone, NR_FREE_PAGES));
suitable = __compaction_suitable(zone, order, highest_zoneidx,
zone_page_state(zone, NR_FREE_PAGES));
/*
* fragmentation index determines if allocation failures are due to
* low memory or external fragmentation
@ -2252,17 +2245,24 @@ enum compact_result compaction_suitable(struct zone *zone, int order,
* excessive compaction for costly orders, but it should not be at the
* expense of system stability.
*/
if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
fragindex = fragmentation_index(zone, order);
if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
ret = COMPACT_NOT_SUITABLE_ZONE;
if (suitable) {
compact_result = COMPACT_CONTINUE;
if (order > PAGE_ALLOC_COSTLY_ORDER) {
int fragindex = fragmentation_index(zone, order);
if (fragindex >= 0 &&
fragindex <= sysctl_extfrag_threshold) {
suitable = false;
compact_result = COMPACT_NOT_SUITABLE_ZONE;
}
}
} else {
compact_result = COMPACT_SKIPPED;
}
trace_mm_compaction_suitable(zone, order, ret);
if (ret == COMPACT_NOT_SUITABLE_ZONE)
ret = COMPACT_SKIPPED;
trace_mm_compaction_suitable(zone, order, compact_result);
return ret;
return suitable;
}
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
@ -2288,7 +2288,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
available = zone_reclaimable_pages(zone) / order;
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
if (__compaction_suitable(zone, order, ac->highest_zoneidx,
available) == COMPACT_CONTINUE)
available))
return true;
}
@ -2329,11 +2329,10 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->highest_zoneidx, cc->alloc_flags))
return COMPACT_SUCCESS;
ret = compaction_suitable(cc->zone, cc->order,
cc->highest_zoneidx);
/* Compaction is likely to fail */
if (ret == COMPACT_SKIPPED)
return ret;
if (!compaction_suitable(cc->zone, cc->order,
cc->highest_zoneidx))
return COMPACT_SKIPPED;
}
/*
@ -2845,7 +2844,7 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
continue;
if (compaction_suitable(zone, pgdat->kcompactd_max_order,
highest_zoneidx) == COMPACT_CONTINUE)
highest_zoneidx))
return true;
}
@ -2887,8 +2886,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
min_wmark_pages(zone), zoneid, 0))
continue;
if (compaction_suitable(zone, cc.order,
zoneid) != COMPACT_CONTINUE)
if (!compaction_suitable(zone, cc.order, zoneid))
continue;
if (kthread_should_stop())

View File

@ -6404,8 +6404,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
sc->reclaim_idx, 0))
return false;
if (compaction_suitable(zone, sc->order,
sc->reclaim_idx) == COMPACT_CONTINUE)
if (compaction_suitable(zone, sc->order, sc->reclaim_idx))
return false;
}
@ -6601,8 +6600,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
return true;
/* Compaction cannot yet proceed. Do reclaim. */
if (compaction_suitable(zone, sc->order,
sc->reclaim_idx) == COMPACT_SKIPPED)
if (!compaction_suitable(zone, sc->order, sc->reclaim_idx))
return false;
/*