mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
mm/mmu_notifiers: hoist do_mmu_notifier_register down_write to the caller
This simplifies the code to not have so many one line functions and extra logic. __mmu_notifier_register() simply becomes the entry point to register the notifier, and the other one calls it under lock. Also add a lockdep_assert to check that the callers are holding the lock as expected. Link: https://lore.kernel.org/r/20190806231548.25242-2-jgg@ziepe.ca Suggested-by: Christoph Hellwig <hch@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Tested-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
9c240a7bb3
commit
56c57103db
@ -236,22 +236,22 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
|
||||
|
||||
static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
int take_mmap_sem)
|
||||
/*
|
||||
* Same as mmu_notifier_register but here the caller must hold the
|
||||
* mmap_sem in write mode.
|
||||
*/
|
||||
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
struct mmu_notifier_mm *mmu_notifier_mm;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held_write(&mm->mmap_sem);
|
||||
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
||||
|
||||
ret = -ENOMEM;
|
||||
mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
|
||||
if (unlikely(!mmu_notifier_mm))
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
|
||||
if (take_mmap_sem)
|
||||
down_write(&mm->mmap_sem);
|
||||
ret = mm_take_all_locks(mm);
|
||||
if (unlikely(ret))
|
||||
goto out_clean;
|
||||
@ -279,13 +279,11 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
||||
|
||||
mm_drop_all_locks(mm);
|
||||
out_clean:
|
||||
if (take_mmap_sem)
|
||||
up_write(&mm->mmap_sem);
|
||||
kfree(mmu_notifier_mm);
|
||||
out:
|
||||
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mmu_notifier_register);
|
||||
|
||||
/*
|
||||
* Must not hold mmap_sem nor any other VM related lock when calling
|
||||
@ -302,20 +300,15 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
||||
*/
|
||||
int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
return do_mmu_notifier_register(mn, mm, 1);
|
||||
int ret;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
ret = __mmu_notifier_register(mn, mm);
|
||||
up_write(&mm->mmap_sem);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmu_notifier_register);
|
||||
|
||||
/*
|
||||
* Same as mmu_notifier_register but here the caller must hold the
|
||||
* mmap_sem in write mode.
|
||||
*/
|
||||
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
return do_mmu_notifier_register(mn, mm, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mmu_notifier_register);
|
||||
|
||||
/* this is called after the last mmu_notifier_unregister() returned */
|
||||
void __mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user