mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
f0d3302073
kvmalloc() doesn't support allocations > INT_MAX, but vmalloc() does - the limit should be lifted, but we can work around this for now. A user with a 75 TB filesystem reported the following journal replay error: https://github.com/koverstreet/bcachefs/issues/769 In journal replay we have to sort and dedup all the keys from the journal, which means we need a large contiguous allocation. Given that the user has 128GB of ram, the 2GB limit on allocation size has become far too small. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
39 lines
934 B
C
39 lines
934 B
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/log2.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
#include "darray.h"
|
|
|
|
int __bch2_darray_resize_noprof(darray_char *d, size_t element_size, size_t new_size, gfp_t gfp)
|
|
{
|
|
if (new_size > d->size) {
|
|
new_size = roundup_pow_of_two(new_size);
|
|
|
|
/*
|
|
* This is a workaround: kvmalloc() doesn't support > INT_MAX
|
|
* allocations, but vmalloc() does.
|
|
* The limit needs to be lifted from kvmalloc, and when it does
|
|
* we'll go back to just using that.
|
|
*/
|
|
size_t bytes;
|
|
if (unlikely(check_mul_overflow(new_size, element_size, &bytes)))
|
|
return -ENOMEM;
|
|
|
|
void *data = likely(bytes < INT_MAX)
|
|
? kvmalloc_noprof(bytes, gfp)
|
|
: vmalloc_noprof(bytes);
|
|
if (!data)
|
|
return -ENOMEM;
|
|
|
|
if (d->size)
|
|
memcpy(data, d->data, d->size * element_size);
|
|
if (d->data != d->preallocated)
|
|
kvfree(d->data);
|
|
d->data = data;
|
|
d->size = new_size;
|
|
}
|
|
|
|
return 0;
|
|
}
|