mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
vmscan: factor out kswapd sleeping logic from kswapd()
Currently, kswapd() has deep nesting and is slightly hard to read. Clean this up. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c3f0da6315
commit
f0bc0a60b1
92
mm/vmscan.c
92
mm/vmscan.c
@ -2371,6 +2371,50 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
|
||||
return sc.nr_reclaimed;
|
||||
}
|
||||
|
||||
static void kswapd_try_to_sleep(pg_data_t *pgdat, int order)
|
||||
{
|
||||
long remaining = 0;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (freezing(current) || kthread_should_stop())
|
||||
return;
|
||||
|
||||
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
/* Try to sleep for a short interval */
|
||||
if (!sleeping_prematurely(pgdat, order, remaining)) {
|
||||
remaining = schedule_timeout(HZ/10);
|
||||
finish_wait(&pgdat->kswapd_wait, &wait);
|
||||
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* After a short sleep, check if it was a premature sleep. If not, then
|
||||
* go fully to sleep until explicitly woken up.
|
||||
*/
|
||||
if (!sleeping_prematurely(pgdat, order, remaining)) {
|
||||
trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
|
||||
|
||||
/*
|
||||
* vmstat counters are not perfectly accurate and the estimated
|
||||
* value for counters such as NR_FREE_PAGES can deviate from the
|
||||
* true value by nr_online_cpus * threshold. To avoid the zone
|
||||
* watermarks being breached while under pressure, we reduce the
|
||||
* per-cpu vmstat threshold while kswapd is awake and restore
|
||||
* them before going back to sleep.
|
||||
*/
|
||||
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
|
||||
schedule();
|
||||
set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
|
||||
} else {
|
||||
if (remaining)
|
||||
count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
|
||||
else
|
||||
count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
|
||||
}
|
||||
finish_wait(&pgdat->kswapd_wait, &wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* The background pageout daemon, started as a kernel thread
|
||||
* from the init process.
|
||||
@ -2389,7 +2433,7 @@ static int kswapd(void *p)
|
||||
unsigned long order;
|
||||
pg_data_t *pgdat = (pg_data_t*)p;
|
||||
struct task_struct *tsk = current;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
struct reclaim_state reclaim_state = {
|
||||
.reclaimed_slab = 0,
|
||||
};
|
||||
@ -2421,7 +2465,6 @@ static int kswapd(void *p)
|
||||
unsigned long new_order;
|
||||
int ret;
|
||||
|
||||
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
new_order = pgdat->kswapd_max_order;
|
||||
pgdat->kswapd_max_order = 0;
|
||||
if (order < new_order) {
|
||||
@ -2431,52 +2474,9 @@ static int kswapd(void *p)
|
||||
*/
|
||||
order = new_order;
|
||||
} else {
|
||||
if (!freezing(current) && !kthread_should_stop()) {
|
||||
long remaining = 0;
|
||||
|
||||
/* Try to sleep for a short interval */
|
||||
if (!sleeping_prematurely(pgdat, order, remaining)) {
|
||||
remaining = schedule_timeout(HZ/10);
|
||||
finish_wait(&pgdat->kswapd_wait, &wait);
|
||||
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* After a short sleep, check if it was a
|
||||
* premature sleep. If not, then go fully
|
||||
* to sleep until explicitly woken up
|
||||
*/
|
||||
if (!sleeping_prematurely(pgdat, order, remaining)) {
|
||||
trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
|
||||
|
||||
/*
|
||||
* vmstat counters are not perfectly
|
||||
* accurate and the estimated value
|
||||
* for counters such as NR_FREE_PAGES
|
||||
* can deviate from the true value by
|
||||
* nr_online_cpus * threshold. To
|
||||
* avoid the zone watermarks being
|
||||
* breached while under pressure, we
|
||||
* reduce the per-cpu vmstat threshold
|
||||
* while kswapd is awake and restore
|
||||
* them before going back to sleep.
|
||||
*/
|
||||
set_pgdat_percpu_threshold(pgdat,
|
||||
calculate_normal_threshold);
|
||||
schedule();
|
||||
set_pgdat_percpu_threshold(pgdat,
|
||||
calculate_pressure_threshold);
|
||||
} else {
|
||||
if (remaining)
|
||||
count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
|
||||
else
|
||||
count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
|
||||
}
|
||||
}
|
||||
|
||||
kswapd_try_to_sleep(pgdat, order);
|
||||
order = pgdat->kswapd_max_order;
|
||||
}
|
||||
finish_wait(&pgdat->kswapd_wait, &wait);
|
||||
|
||||
ret = try_to_freeze();
|
||||
if (kthread_should_stop())
|
||||
|
Loading…
Reference in New Issue
Block a user