mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
zram: remove UNDER_WB and simplify writeback
We now have only one active post-processing at any time, so we don't have same race conditions that we had before. If slot selected for post-processing gets freed or freed and reallocated it loses its PP_SLOT flag and there is no way for such a slot to gain PP_SLOT flag again until current post-processing terminates. Link: https://lkml.kernel.org/r/20240917021020.883356-8-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
1a1d0f8992
commit
5e99893444
@ -390,10 +390,7 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
|
||||
|
||||
for (index = 0; index < nr_pages; index++) {
|
||||
/*
|
||||
* Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
|
||||
* See the comment in writeback_store.
|
||||
*
|
||||
* Also do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
|
||||
* Do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
|
||||
* post-processing (recompress, writeback) happens to the
|
||||
* ZRAM_SAME slot.
|
||||
*
|
||||
@ -402,7 +399,6 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
|
||||
zram_slot_lock(zram, index);
|
||||
if (!zram_allocated(zram, index) ||
|
||||
zram_test_flag(zram, index, ZRAM_WB) ||
|
||||
zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
|
||||
zram_test_flag(zram, index, ZRAM_SAME)) {
|
||||
zram_slot_unlock(zram, index);
|
||||
continue;
|
||||
@ -821,22 +817,17 @@ static ssize_t writeback_store(struct device *dev,
|
||||
|
||||
index = pps->index;
|
||||
zram_slot_lock(zram, index);
|
||||
/*
|
||||
* scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
|
||||
* slots can change in the meantime. If slots are accessed or
|
||||
* freed they lose ZRAM_PP_SLOT flag and hence we don't
|
||||
* post-process them.
|
||||
*/
|
||||
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
|
||||
goto next;
|
||||
/*
|
||||
* Clearing ZRAM_UNDER_WB is duty of caller.
|
||||
* IOW, zram_free_page never clear it.
|
||||
*/
|
||||
zram_set_flag(zram, index, ZRAM_UNDER_WB);
|
||||
/* Need for hugepage writeback racing */
|
||||
zram_set_flag(zram, index, ZRAM_IDLE);
|
||||
zram_slot_unlock(zram, index);
|
||||
if (zram_read_page(zram, page, index, NULL)) {
|
||||
zram_slot_lock(zram, index);
|
||||
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
|
||||
zram_clear_flag(zram, index, ZRAM_IDLE);
|
||||
zram_slot_unlock(zram, index);
|
||||
|
||||
if (zram_read_page(zram, page, index, NULL)) {
|
||||
release_pp_slot(zram, pps);
|
||||
continue;
|
||||
}
|
||||
@ -852,11 +843,6 @@ static ssize_t writeback_store(struct device *dev,
|
||||
*/
|
||||
err = submit_bio_wait(&bio);
|
||||
if (err) {
|
||||
zram_slot_lock(zram, index);
|
||||
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
|
||||
zram_clear_flag(zram, index, ZRAM_IDLE);
|
||||
zram_slot_unlock(zram, index);
|
||||
|
||||
release_pp_slot(zram, pps);
|
||||
/*
|
||||
* BIO errors are not fatal, we continue and simply
|
||||
@ -871,25 +857,19 @@ static ssize_t writeback_store(struct device *dev,
|
||||
}
|
||||
|
||||
atomic64_inc(&zram->stats.bd_writes);
|
||||
/*
|
||||
* We released zram_slot_lock so need to check if the slot was
|
||||
* changed. If there is freeing for the slot, we can catch it
|
||||
* easily by zram_allocated.
|
||||
* A subtle case is the slot is freed/reallocated/marked as
|
||||
* ZRAM_IDLE again. To close the race, idle_store doesn't
|
||||
* mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
|
||||
* Thus, we could close the race by checking ZRAM_IDLE bit.
|
||||
*/
|
||||
zram_slot_lock(zram, index);
|
||||
if (!zram_allocated(zram, index) ||
|
||||
!zram_test_flag(zram, index, ZRAM_IDLE)) {
|
||||
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
|
||||
zram_clear_flag(zram, index, ZRAM_IDLE);
|
||||
/*
|
||||
* Same as above, we release slot lock during writeback so
|
||||
* slot can change under us: slot_free() or slot_free() and
|
||||
* reallocation (zram_write_page()). In both cases slot loses
|
||||
* ZRAM_PP_SLOT flag. No concurrent post-processing can set
|
||||
* ZRAM_PP_SLOT on such slots until current post-processing
|
||||
* finishes.
|
||||
*/
|
||||
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
|
||||
goto next;
|
||||
}
|
||||
|
||||
zram_free_page(zram, index);
|
||||
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
|
||||
zram_set_flag(zram, index, ZRAM_WB);
|
||||
zram_set_element(zram, index, blk_idx);
|
||||
blk_idx = 0;
|
||||
@ -1538,7 +1518,6 @@ static void zram_free_page(struct zram *zram, size_t index)
|
||||
atomic64_dec(&zram->stats.pages_stored);
|
||||
zram_set_handle(zram, index, 0);
|
||||
zram_set_obj_size(zram, index, 0);
|
||||
WARN_ON_ONCE(zram->table[index].flags & ~(1UL << ZRAM_UNDER_WB));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -47,7 +47,6 @@
|
||||
enum zram_pageflags {
|
||||
ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
|
||||
ZRAM_WB, /* page is stored on backing_device */
|
||||
ZRAM_UNDER_WB, /* page is under writeback */
|
||||
ZRAM_PP_SLOT, /* Selected for post-processing */
|
||||
ZRAM_HUGE, /* Incompressible page */
|
||||
ZRAM_IDLE, /* not accessed page since last idle marking */
|
||||
|
Loading…
Reference in New Issue
Block a user