mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 21:35:07 +00:00
mm/mempolicy: do not duplicate policy if it is not applicable for set_mempolicy_home_node
set_mempolicy_home_node tries to duplicate a memory policy before checking it whether it is applicable for the operation. There is no real reason for doing that and it might actually be a pointless memory allocation and deallocation exercise for MPOL_INTERLEAVE. Not a big problem but we can do better. Simply check the policy before acting on it. Link: https://lkml.kernel.org/r/20221216194537.238047-2-mathieu.desnoyers@efficios.com Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Feng Tang <feng.tang@intel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andi Kleen <ak@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Huang Ying <ying.huang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a5fd8390d2
commit
e976936cfc
@ -1489,7 +1489,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
|
|||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct mempolicy *new;
|
struct mempolicy *new, *old;
|
||||||
unsigned long vmstart;
|
unsigned long vmstart;
|
||||||
unsigned long vmend;
|
unsigned long vmend;
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
@ -1521,31 +1521,27 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
|
|||||||
return 0;
|
return 0;
|
||||||
mmap_write_lock(mm);
|
mmap_write_lock(mm);
|
||||||
for_each_vma_range(vmi, vma, end) {
|
for_each_vma_range(vmi, vma, end) {
|
||||||
vmstart = max(start, vma->vm_start);
|
|
||||||
vmend = min(end, vma->vm_end);
|
|
||||||
new = mpol_dup(vma_policy(vma));
|
|
||||||
if (IS_ERR(new)) {
|
|
||||||
err = PTR_ERR(new);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* Only update home node if there is an existing vma policy
|
|
||||||
*/
|
|
||||||
if (!new)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If any vma in the range got policy other than MPOL_BIND
|
* If any vma in the range got policy other than MPOL_BIND
|
||||||
* or MPOL_PREFERRED_MANY we return error. We don't reset
|
* or MPOL_PREFERRED_MANY we return error. We don't reset
|
||||||
* the home node for vmas we already updated before.
|
* the home node for vmas we already updated before.
|
||||||
*/
|
*/
|
||||||
if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
|
old = vma_policy(vma);
|
||||||
mpol_put(new);
|
if (!old)
|
||||||
|
continue;
|
||||||
|
if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
new = mpol_dup(old);
|
||||||
|
if (IS_ERR(new)) {
|
||||||
|
err = PTR_ERR(new);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
new->home_node = home_node;
|
new->home_node = home_node;
|
||||||
|
vmstart = max(start, vma->vm_start);
|
||||||
|
vmend = min(end, vma->vm_end);
|
||||||
err = mbind_range(mm, vmstart, vmend, new);
|
err = mbind_range(mm, vmstart, vmend, new);
|
||||||
mpol_put(new);
|
mpol_put(new);
|
||||||
if (err)
|
if (err)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user