zram: permit only one post-processing operation at a time

Both recompress and writeback soon will unlock slots during processing,
which makes things too complex wrt possible race-conditions.  We still
want to clear PP_SLOT in slot_free, because this is how we figure out that
slot that was selected for post-processing has been released under us and
when we start post-processing we check if slot still has PP_SLOT set.  At
the same time, theoretically, we can have something like this:

CPU0			    CPU1

recompress
scan slots
set PP_SLOT
unlock slot
			slot_free
			clear PP_SLOT

			allocate PP_SLOT
			writeback
			scan slots
			set PP_SLOT
			unlock slot
select PP-slot
test PP_SLOT

So recompress will not detect that slot has been re-used and re-selected
for concurrent writeback post-processing.

Make sure that we only permit on post-processing operation at a time.  So
now recompress and writeback post-processing don't race against each
other, we only need to handle slot re-use (slot_free and write), which is
handled individually by each pp operation.

Having recompress and writeback competing for the same slots is not
exactly good anyway (can't imagine anyone doing that).

Link: https://lkml.kernel.org/r/20240917021020.883356-3-senozhatsky@chromium.org
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Sergey Senozhatsky 2024-09-17 11:09:07 +09:00 committed by Andrew Morton
parent bf779fb9af
commit 58652f2b6d
3 changed files with 19 additions and 0 deletions

View File

@ -47,6 +47,8 @@ The list of possible return codes:
-ENOMEM zram was not able to allocate enough memory to fulfil your
needs.
-EINVAL invalid input has been provided.
-EAGAIN re-try operation later (e.g. when attempting to run recompress
and writeback simultaneously).
======== =============================================================
If you use 'echo', the returned value is set by the 'echo' utility,

View File

@ -627,6 +627,12 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
/* Do not permit concurrent post-processing actions. */
if (atomic_xchg(&zram->pp_in_progress, 1)) {
up_read(&zram->init_lock);
return -EAGAIN;
}
if (!zram->backing_dev) {
ret = -ENODEV;
goto release_init_lock;
@ -753,6 +759,7 @@ static ssize_t writeback_store(struct device *dev,
free_block_bdev(zram, blk_idx);
__free_page(page);
release_init_lock:
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
@ -1883,6 +1890,12 @@ static ssize_t recompress_store(struct device *dev,
goto release_init_lock;
}
/* Do not permit concurrent post-processing actions. */
if (atomic_xchg(&zram->pp_in_progress, 1)) {
up_read(&zram->init_lock);
return -EAGAIN;
}
if (algo) {
bool found = false;
@ -1950,6 +1963,7 @@ static ssize_t recompress_store(struct device *dev,
__free_page(page);
release_init_lock:
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
}
@ -2146,6 +2160,7 @@ static void zram_reset_device(struct zram *zram)
zram->disksize = 0;
zram_destroy_comps(zram);
memset(&zram->stats, 0, sizeof(zram->stats));
atomic_set(&zram->pp_in_progress, 0);
reset_bdev(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
@ -2383,6 +2398,7 @@ static int zram_add(void)
zram->disk->fops = &zram_devops;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
atomic_set(&zram->pp_in_progress, 0);
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);

View File

@ -140,5 +140,6 @@ struct zram {
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
struct dentry *debugfs_dir;
#endif
atomic_t pp_in_progress;
};
#endif