mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
mm/vmalloc: invoke classify_va_fit_type() in adjust_va_to_fit_type()
Patch series "Cleanup patches of vmalloc", v2. Some cleanup patches found when reading vmalloc code. This patch (of 4): adjust_va_to_fit_type() checks all values of passed in fit type, including NOTHING_FIT in the else branch. However, the check of NOTHING_FIT has been done inside adjust_va_to_fit_type() and before it's called in all call sites. In fact, both of these functions are coupled tightly, since classify_va_fit_type() is doing the preparation work for adjust_va_to_fit_type(). So putting invocation of classify_va_fit_type() inside adjust_va_to_fit_type() can simplify code logic and the redundant check of NOTHING_FIT issue will go away. Link: https://lkml.kernel.org/r/20220607105958.382076-1-bhe@redhat.com Link: https://lkml.kernel.org/r/20220607105958.382076-2-bhe@redhat.com Signed-off-by: Baoquan He <bhe@redhat.com> Suggested-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
bcc728eb4f
commit
1b23ff80b3
23
mm/vmalloc.c
23
mm/vmalloc.c
@ -1335,10 +1335,10 @@ classify_va_fit_type(struct vmap_area *va,
|
||||
|
||||
static __always_inline int
|
||||
adjust_va_to_fit_type(struct vmap_area *va,
|
||||
unsigned long nva_start_addr, unsigned long size,
|
||||
enum fit_type type)
|
||||
unsigned long nva_start_addr, unsigned long size)
|
||||
{
|
||||
struct vmap_area *lva = NULL;
|
||||
enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
|
||||
|
||||
if (type == FL_FIT_TYPE) {
|
||||
/*
|
||||
@ -1444,7 +1444,6 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
|
||||
bool adjust_search_size = true;
|
||||
unsigned long nva_start_addr;
|
||||
struct vmap_area *va;
|
||||
enum fit_type type;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -1472,14 +1471,9 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
|
||||
if (nva_start_addr + size > vend)
|
||||
return vend;
|
||||
|
||||
/* Classify what we have found. */
|
||||
type = classify_va_fit_type(va, nva_start_addr, size);
|
||||
if (WARN_ON_ONCE(type == NOTHING_FIT))
|
||||
return vend;
|
||||
|
||||
/* Update the free vmap_area. */
|
||||
ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
|
||||
if (ret)
|
||||
ret = adjust_va_to_fit_type(va, nva_start_addr, size);
|
||||
if (WARN_ON_ONCE(ret))
|
||||
return vend;
|
||||
|
||||
#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
|
||||
@ -3735,7 +3729,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
|
||||
int area, area2, last_area, term_area;
|
||||
unsigned long base, start, size, end, last_end, orig_start, orig_end;
|
||||
bool purged = false;
|
||||
enum fit_type type;
|
||||
|
||||
/* verify parameters and allocate data structures */
|
||||
BUG_ON(offset_in_page(align) || !is_power_of_2(align));
|
||||
@ -3846,15 +3839,11 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
|
||||
/* It is a BUG(), but trigger recovery instead. */
|
||||
goto recovery;
|
||||
|
||||
type = classify_va_fit_type(va, start, size);
|
||||
if (WARN_ON_ONCE(type == NOTHING_FIT))
|
||||
ret = adjust_va_to_fit_type(va, start, size);
|
||||
if (WARN_ON_ONCE(unlikely(ret)))
|
||||
/* It is a BUG(), but trigger recovery instead. */
|
||||
goto recovery;
|
||||
|
||||
ret = adjust_va_to_fit_type(va, start, size, type);
|
||||
if (unlikely(ret))
|
||||
goto recovery;
|
||||
|
||||
/* Allocated area. */
|
||||
va = vas[area];
|
||||
va->va_start = start;
|
||||
|
Loading…
Reference in New Issue
Block a user