mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
Fix kmalloc slab creation sequence
This patch restores the slab creation sequence that was broken by commit4066c33d03
and also reverts the portions that introduced the KMALLOC_LOOP_XXX macros. Those can never really work since the slab creation is much more complex than just going from a minimum to a maximum number. The latest upstream kernel boots cleanly on my machine with a 64 bit x86 configuration under KVM using either SLAB or SLUB. Fixes:4066c33d03
("support the slub_debug boot option") Reported-by: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
88793e5c77
commit
a9730fca99
@ -153,30 +153,8 @@ size_t ksize(const void *);
|
||||
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
|
||||
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
|
||||
#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
|
||||
/*
|
||||
* The KMALLOC_LOOP_LOW is the definition for the for loop index start number
|
||||
* to create the kmalloc_caches object in create_kmalloc_caches(). The first
|
||||
* and the second are 96 and 192. You can see that in the kmalloc_index(), if
|
||||
* the KMALLOC_MIN_SIZE <= 32, then return 1 (96). If KMALLOC_MIN_SIZE <= 64,
|
||||
* then return 2 (192). If the KMALLOC_MIN_SIZE is bigger than 64, we don't
|
||||
* need to initialize 96 and 192. Go directly to start the KMALLOC_SHIFT_LOW.
|
||||
*/
|
||||
#if KMALLOC_MIN_SIZE <= 32
|
||||
#define KMALLOC_LOOP_LOW 1
|
||||
#elif KMALLOC_MIN_SIZE <= 64
|
||||
#define KMALLOC_LOOP_LOW 2
|
||||
#else
|
||||
#define KMALLOC_LOOP_LOW KMALLOC_SHIFT_LOW
|
||||
#endif
|
||||
|
||||
#else
|
||||
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
|
||||
/*
|
||||
* The KMALLOC_MIN_SIZE of slub/slab/slob is 2^3/2^5/2^3. So, even slab is used.
|
||||
* The KMALLOC_MIN_SIZE <= 32. The kmalloc-96 and kmalloc-192 should also be
|
||||
* initialized.
|
||||
*/
|
||||
#define KMALLOC_LOOP_LOW 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -855,6 +855,12 @@ void __init setup_kmalloc_cache_index_table(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void new_kmalloc_cache(int idx, unsigned long flags)
|
||||
{
|
||||
kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
|
||||
kmalloc_info[idx].size, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the kmalloc array. Some of the regular kmalloc arrays
|
||||
* may already have been created because they were needed to
|
||||
@ -864,25 +870,19 @@ void __init create_kmalloc_caches(unsigned long flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = KMALLOC_LOOP_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
|
||||
if (!kmalloc_caches[i]) {
|
||||
kmalloc_caches[i] = create_kmalloc_cache(
|
||||
kmalloc_info[i].name,
|
||||
kmalloc_info[i].size,
|
||||
flags);
|
||||
}
|
||||
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
|
||||
if (!kmalloc_caches[i])
|
||||
new_kmalloc_cache(i, flags);
|
||||
|
||||
/*
|
||||
* "i == 2" is the "kmalloc-192" case which is the last special
|
||||
* case for initialization and it's the point to jump to
|
||||
* allocate the minimize size of the object. In slab allocator,
|
||||
* the KMALLOC_SHIFT_LOW = 5. So, it needs to skip 2^3 and 2^4
|
||||
* and go straight to allocate 2^5. If the ARCH_DMA_MINALIGN is
|
||||
* defined, it may be larger than 2^5 and here is also the
|
||||
* trick to skip the empty gap.
|
||||
* Caches that are not of the two-to-the-power-of size.
|
||||
* These have to be created immediately after the
|
||||
* earlier power of two caches
|
||||
*/
|
||||
if (i == 2)
|
||||
i = (KMALLOC_SHIFT_LOW - 1);
|
||||
if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
|
||||
new_kmalloc_cache(1, flags);
|
||||
if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
|
||||
new_kmalloc_cache(2, flags);
|
||||
}
|
||||
|
||||
/* Kmalloc array is now usable */
|
||||
|
Loading…
Reference in New Issue
Block a user