mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
mm: pgtable: remove pte_offset_map_nolock()
Now no users are using the pte_offset_map_nolock(), remove it. Link: https://lkml.kernel.org/r/d04f9bbbcde048fb6ffa6f2bdbc6f9b22d5286f9.1727332572.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> Reviewed-by: Muchun Song <muchun.song@linux.dev> Acked-by: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2441774f2d
commit
583e66debd
@ -16,9 +16,6 @@ There are helpers to lock/unlock a table and other accessor functions:
|
||||
- pte_offset_map_lock()
|
||||
maps PTE and takes PTE table lock, returns pointer to PTE with
|
||||
pointer to its PTE table lock, or returns NULL if no PTE table;
|
||||
- pte_offset_map_nolock()
|
||||
maps PTE, returns pointer to PTE with pointer to its PTE table
|
||||
lock (not taken), or returns NULL if no PTE table;
|
||||
- pte_offset_map_ro_nolock()
|
||||
maps PTE, returns pointer to PTE with pointer to its PTE table
|
||||
lock (not taken), or returns NULL if no PTE table;
|
||||
|
@ -3015,8 +3015,6 @@ static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, spinlock_t **ptlp);
|
||||
pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, spinlock_t **ptlp);
|
||||
pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
|
||||
|
@ -305,18 +305,6 @@ pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, spinlock_t **ptlp)
|
||||
{
|
||||
pmd_t pmdval;
|
||||
pte_t *pte;
|
||||
|
||||
pte = __pte_offset_map(pmd, addr, &pmdval);
|
||||
if (likely(pte))
|
||||
*ptlp = pte_lockptr(mm, &pmdval);
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, spinlock_t **ptlp)
|
||||
{
|
||||
@ -372,15 +360,6 @@ pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
|
||||
* and disconnected table. Until pte_unmap(pte) unmaps and rcu_read_unlock()s
|
||||
* afterwards.
|
||||
*
|
||||
* pte_offset_map_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map();
|
||||
* but when successful, it also outputs a pointer to the spinlock in ptlp - as
|
||||
* pte_offset_map_lock() does, but in this case without locking it. This helps
|
||||
* the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time
|
||||
* act on a changed *pmd: pte_offset_map_nolock() provides the correct spinlock
|
||||
* pointer for the page table that it returns. In principle, the caller should
|
||||
* recheck *pmd once the lock is taken; in practice, no callsite needs that -
|
||||
* either the mmap_lock for write, or pte_same() check on contents, is enough.
|
||||
*
|
||||
* pte_offset_map_ro_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map();
|
||||
* but when successful, it also outputs a pointer to the spinlock in ptlp - as
|
||||
* pte_offset_map_lock() does, but in this case without locking it. This helps
|
||||
|
Loading…
Reference in New Issue
Block a user