mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-04 04:04:19 +00:00
mm/mmu_notifier: init notifier if necessary
While registering MMU notifier, new instance of MMU notifier_mm will be allocated and later free'd if currrent mm_struct's MMU notifier_mm has been initialized. That causes some overhead. The patch tries to elominate that. Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com> Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Avi Kivity <avi@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Sagi Grimberg <sagig@mellanox.co.il> Cc: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
21a92735f6
commit
e0f3c3f78d
@ -207,22 +207,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
|||||||
*/
|
*/
|
||||||
BUG_ON(!srcu.per_cpu_ref);
|
BUG_ON(!srcu.per_cpu_ref);
|
||||||
|
|
||||||
ret = -ENOMEM;
|
|
||||||
mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
|
|
||||||
if (unlikely(!mmu_notifier_mm))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (take_mmap_sem)
|
if (take_mmap_sem)
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
ret = mm_take_all_locks(mm);
|
ret = mm_take_all_locks(mm);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
goto out_cleanup;
|
goto out;
|
||||||
|
|
||||||
if (!mm_has_notifiers(mm)) {
|
if (!mm_has_notifiers(mm)) {
|
||||||
|
mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (unlikely(!mmu_notifier_mm)) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_of_mem;
|
||||||
|
}
|
||||||
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
|
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
|
||||||
spin_lock_init(&mmu_notifier_mm->lock);
|
spin_lock_init(&mmu_notifier_mm->lock);
|
||||||
|
|
||||||
mm->mmu_notifier_mm = mmu_notifier_mm;
|
mm->mmu_notifier_mm = mmu_notifier_mm;
|
||||||
mmu_notifier_mm = NULL;
|
|
||||||
}
|
}
|
||||||
atomic_inc(&mm->mm_count);
|
atomic_inc(&mm->mm_count);
|
||||||
|
|
||||||
@ -238,13 +239,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
|||||||
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
|
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
|
||||||
spin_unlock(&mm->mmu_notifier_mm->lock);
|
spin_unlock(&mm->mmu_notifier_mm->lock);
|
||||||
|
|
||||||
|
out_of_mem:
|
||||||
mm_drop_all_locks(mm);
|
mm_drop_all_locks(mm);
|
||||||
out_cleanup:
|
out:
|
||||||
if (take_mmap_sem)
|
if (take_mmap_sem)
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
/* kfree() does nothing if mmu_notifier_mm is NULL */
|
|
||||||
kfree(mmu_notifier_mm);
|
|
||||||
out:
|
|
||||||
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user