PM: hibernate: Don't ignore return from set_memory_ro()

set_memory_ro() and set_memory_rw() can fail, leaving memory
unprotected.

Take the returned value into account and abort in case of
failure.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Christophe Leroy 2024-02-18 09:40:58 +01:00 committed by Rafael J. Wysocki
parent 3fec6e5961
commit f4311756a8
4 changed files with 24 additions and 15 deletions

View File

@ -153,7 +153,7 @@ extern unsigned int snapshot_additional_pages(struct zone *zone);
extern unsigned long snapshot_get_image_size(void); extern unsigned long snapshot_get_image_size(void);
extern int snapshot_read_next(struct snapshot_handle *handle); extern int snapshot_read_next(struct snapshot_handle *handle);
extern int snapshot_write_next(struct snapshot_handle *handle); extern int snapshot_write_next(struct snapshot_handle *handle);
extern void snapshot_write_finalize(struct snapshot_handle *handle); int snapshot_write_finalize(struct snapshot_handle *handle);
extern int snapshot_image_loaded(struct snapshot_handle *handle); extern int snapshot_image_loaded(struct snapshot_handle *handle);
extern bool hibernate_acquire(void); extern bool hibernate_acquire(void);

View File

@ -58,22 +58,24 @@ static inline void hibernate_restore_protection_end(void)
hibernate_restore_protection_active = false; hibernate_restore_protection_active = false;
} }
static inline void hibernate_restore_protect_page(void *page_address) static inline int __must_check hibernate_restore_protect_page(void *page_address)
{ {
if (hibernate_restore_protection_active) if (hibernate_restore_protection_active)
set_memory_ro((unsigned long)page_address, 1); return set_memory_ro((unsigned long)page_address, 1);
return 0;
} }
static inline void hibernate_restore_unprotect_page(void *page_address) static inline int hibernate_restore_unprotect_page(void *page_address)
{ {
if (hibernate_restore_protection_active) if (hibernate_restore_protection_active)
set_memory_rw((unsigned long)page_address, 1); return set_memory_rw((unsigned long)page_address, 1);
return 0;
} }
#else #else
static inline void hibernate_restore_protection_begin(void) {} static inline void hibernate_restore_protection_begin(void) {}
static inline void hibernate_restore_protection_end(void) {} static inline void hibernate_restore_protection_end(void) {}
static inline void hibernate_restore_protect_page(void *page_address) {} static inline int __must_check hibernate_restore_protect_page(void *page_address) {return 0; }
static inline void hibernate_restore_unprotect_page(void *page_address) {} static inline int hibernate_restore_unprotect_page(void *page_address) {return 0; }
#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */ #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
@ -2832,7 +2834,9 @@ next:
} }
} else { } else {
copy_last_highmem_page(); copy_last_highmem_page();
hibernate_restore_protect_page(handle->buffer); error = hibernate_restore_protect_page(handle->buffer);
if (error)
return error;
handle->buffer = get_buffer(&orig_bm, &ca); handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer)) if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer); return PTR_ERR(handle->buffer);
@ -2858,15 +2862,18 @@ next:
* stored in highmem. Additionally, it recycles bitmap memory that's not * stored in highmem. Additionally, it recycles bitmap memory that's not
* necessary any more. * necessary any more.
*/ */
void snapshot_write_finalize(struct snapshot_handle *handle) int snapshot_write_finalize(struct snapshot_handle *handle)
{ {
int error;
copy_last_highmem_page(); copy_last_highmem_page();
hibernate_restore_protect_page(handle->buffer); error = hibernate_restore_protect_page(handle->buffer);
/* Do that only if we have loaded the image entirely */ /* Do that only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) { if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
memory_bm_recycle(&orig_bm); memory_bm_recycle(&orig_bm);
free_highmem_data(); free_highmem_data();
} }
return error;
} }
int snapshot_image_loaded(struct snapshot_handle *handle) int snapshot_image_loaded(struct snapshot_handle *handle)

View File

@ -1134,8 +1134,8 @@ static int load_image(struct swap_map_handle *handle,
ret = err2; ret = err2;
if (!ret) { if (!ret) {
pr_info("Image loading done\n"); pr_info("Image loading done\n");
snapshot_write_finalize(snapshot); ret = snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot)) if (!ret && !snapshot_image_loaded(snapshot))
ret = -ENODATA; ret = -ENODATA;
} }
swsusp_show_speed(start, stop, nr_to_read, "Read"); swsusp_show_speed(start, stop, nr_to_read, "Read");
@ -1486,8 +1486,8 @@ out_finish:
stop = ktime_get(); stop = ktime_get();
if (!ret) { if (!ret) {
pr_info("Image loading done\n"); pr_info("Image loading done\n");
snapshot_write_finalize(snapshot); ret = snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot)) if (!ret && !snapshot_image_loaded(snapshot))
ret = -ENODATA; ret = -ENODATA;
if (!ret) { if (!ret) {
if (swsusp_header->flags & SF_CRC32_MODE) { if (swsusp_header->flags & SF_CRC32_MODE) {

View File

@ -317,7 +317,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break; break;
case SNAPSHOT_ATOMIC_RESTORE: case SNAPSHOT_ATOMIC_RESTORE:
snapshot_write_finalize(&data->handle); error = snapshot_write_finalize(&data->handle);
if (error)
break;
if (data->mode != O_WRONLY || !data->frozen || if (data->mode != O_WRONLY || !data->frozen ||
!snapshot_image_loaded(&data->handle)) { !snapshot_image_loaded(&data->handle)) {
error = -EPERM; error = -EPERM;