mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-11 15:49:56 +00:00
mm/vmscan: update stale comments
Update some comments that became stale since transiton from per-zone to per-node reclaim. Link: http://lkml.kernel.org/r/20180315164553.17856-2-aryabinin@virtuozzo.com Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Tejun Heo <tj@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d79f7aa496
commit
894befec4d
10
mm/vmscan.c
10
mm/vmscan.c
@ -926,7 +926,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|||||||
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
|
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The number of dirty pages determines if a zone is marked
|
* The number of dirty pages determines if a node is marked
|
||||||
* reclaim_congested which affects wait_iff_congested. kswapd
|
* reclaim_congested which affects wait_iff_congested. kswapd
|
||||||
* will stall and start writing pages if the tail of the LRU
|
* will stall and start writing pages if the tail of the LRU
|
||||||
* is all dirty unqueued pages.
|
* is all dirty unqueued pages.
|
||||||
@ -1764,7 +1764,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|||||||
* as there is no guarantee the dirtying process is throttled in the
|
* as there is no guarantee the dirtying process is throttled in the
|
||||||
* same way balance_dirty_pages() manages.
|
* same way balance_dirty_pages() manages.
|
||||||
*
|
*
|
||||||
* Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
|
* Once a node is flagged PGDAT_WRITEBACK, kswapd will count the number
|
||||||
* of pages under pages flagged for immediate reclaim and stall if any
|
* of pages under pages flagged for immediate reclaim and stall if any
|
||||||
* are encountered in the nr_immediate check below.
|
* are encountered in the nr_immediate check below.
|
||||||
*/
|
*/
|
||||||
@ -1791,7 +1791,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|||||||
*/
|
*/
|
||||||
if (sane_reclaim(sc)) {
|
if (sane_reclaim(sc)) {
|
||||||
/*
|
/*
|
||||||
* Tag a zone as congested if all the dirty pages scanned were
|
* Tag a node as congested if all the dirty pages scanned were
|
||||||
* backed by a congested BDI and wait_iff_congested will stall.
|
* backed by a congested BDI and wait_iff_congested will stall.
|
||||||
*/
|
*/
|
||||||
if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
|
if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
|
||||||
@ -1812,7 +1812,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Stall direct reclaim for IO completions if underlying BDIs or zone
|
* Stall direct reclaim for IO completions if underlying BDIs and node
|
||||||
* is congested. Allow kswapd to continue until it starts encountering
|
* is congested. Allow kswapd to continue until it starts encountering
|
||||||
* unqueued dirty pages or cycling through the LRU too quickly.
|
* unqueued dirty pages or cycling through the LRU too quickly.
|
||||||
*/
|
*/
|
||||||
@ -3808,7 +3808,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
|
|||||||
|
|
||||||
if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
|
if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
|
||||||
/*
|
/*
|
||||||
* Free memory by calling shrink zone with increasing
|
* Free memory by calling shrink node with increasing
|
||||||
* priorities until we have enough memory freed.
|
* priorities until we have enough memory freed.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user