mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
mm: convert a few VM_BUG_ON callers to VM_BUG_ON_VMA
Trivially convert a few VM_BUG_ON calls to VM_BUG_ON_VMA to extract more information when they trigger. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Michel Lespinasse <walken@google.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fa3759ccd5
commit
81d1b09c6b
@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|||||||
static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
||||||
spinlock_t **ptl)
|
spinlock_t **ptl)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
|
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
|
||||||
if (pmd_trans_huge(*pmd))
|
if (pmd_trans_huge(*pmd))
|
||||||
return __pmd_trans_huge_lock(pmd, vma, ptl);
|
return __pmd_trans_huge_lock(pmd, vma, ptl);
|
||||||
else
|
else
|
||||||
|
@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
|
|||||||
static inline void anon_vma_merge(struct vm_area_struct *vma,
|
static inline void anon_vma_merge(struct vm_area_struct *vma,
|
||||||
struct vm_area_struct *next)
|
struct vm_area_struct *next)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(vma->anon_vma != next->anon_vma);
|
VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
|
||||||
unlink_anon_vmas(next);
|
unlink_anon_vmas(next);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1096,7 +1096,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
unsigned long mmun_end; /* For mmu_notifiers */
|
unsigned long mmun_end; /* For mmu_notifiers */
|
||||||
|
|
||||||
ptl = pmd_lockptr(mm, pmd);
|
ptl = pmd_lockptr(mm, pmd);
|
||||||
VM_BUG_ON(!vma->anon_vma);
|
VM_BUG_ON_VMA(!vma->anon_vma, vma);
|
||||||
haddr = address & HPAGE_PMD_MASK;
|
haddr = address & HPAGE_PMD_MASK;
|
||||||
if (is_huge_zero_pmd(orig_pmd))
|
if (is_huge_zero_pmd(orig_pmd))
|
||||||
goto alloc;
|
goto alloc;
|
||||||
@ -2083,7 +2083,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
|||||||
if (vma->vm_ops)
|
if (vma->vm_ops)
|
||||||
/* khugepaged not yet working on file or special mappings */
|
/* khugepaged not yet working on file or special mappings */
|
||||||
return 0;
|
return 0;
|
||||||
VM_BUG_ON(vma->vm_flags & VM_NO_THP);
|
VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
|
||||||
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
||||||
hend = vma->vm_end & HPAGE_PMD_MASK;
|
hend = vma->vm_end & HPAGE_PMD_MASK;
|
||||||
if (hstart < hend)
|
if (hstart < hend)
|
||||||
@ -2406,7 +2406,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
|
|||||||
return false;
|
return false;
|
||||||
if (is_vma_temporary_stack(vma))
|
if (is_vma_temporary_stack(vma))
|
||||||
return false;
|
return false;
|
||||||
VM_BUG_ON(vma->vm_flags & VM_NO_THP);
|
VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
14
mm/hugetlb.c
14
mm/hugetlb.c
@ -434,7 +434,7 @@ static inline struct resv_map *inode_resv_map(struct inode *inode)
|
|||||||
|
|
||||||
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
|
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(!is_vm_hugetlb_page(vma));
|
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
|
||||||
if (vma->vm_flags & VM_MAYSHARE) {
|
if (vma->vm_flags & VM_MAYSHARE) {
|
||||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
@ -449,8 +449,8 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
|
|||||||
|
|
||||||
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
|
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(!is_vm_hugetlb_page(vma));
|
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
|
||||||
VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
|
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
|
||||||
|
|
||||||
set_vma_private_data(vma, (get_vma_private_data(vma) &
|
set_vma_private_data(vma, (get_vma_private_data(vma) &
|
||||||
HPAGE_RESV_MASK) | (unsigned long)map);
|
HPAGE_RESV_MASK) | (unsigned long)map);
|
||||||
@ -458,15 +458,15 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
|
|||||||
|
|
||||||
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
|
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(!is_vm_hugetlb_page(vma));
|
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
|
||||||
VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
|
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
|
||||||
|
|
||||||
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
|
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
|
static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(!is_vm_hugetlb_page(vma));
|
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
|
||||||
|
|
||||||
return (get_vma_private_data(vma) & flag) != 0;
|
return (get_vma_private_data(vma) & flag) != 0;
|
||||||
}
|
}
|
||||||
@ -474,7 +474,7 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
|
|||||||
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
|
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
|
||||||
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(!is_vm_hugetlb_page(vma));
|
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
|
||||||
if (!(vma->vm_flags & VM_MAYSHARE))
|
if (!(vma->vm_flags & VM_MAYSHARE))
|
||||||
vma->vm_private_data = (void *)0;
|
vma->vm_private_data = (void *)0;
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node,
|
|||||||
struct vm_area_struct *parent;
|
struct vm_area_struct *parent;
|
||||||
unsigned long last = vma_last_pgoff(node);
|
unsigned long last = vma_last_pgoff(node);
|
||||||
|
|
||||||
VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev));
|
VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
|
||||||
|
|
||||||
if (!prev->shared.linear.rb.rb_right) {
|
if (!prev->shared.linear.rb.rb_right) {
|
||||||
parent = prev;
|
parent = prev;
|
||||||
|
@ -233,8 +233,8 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
|
|||||||
|
|
||||||
VM_BUG_ON(start & ~PAGE_MASK);
|
VM_BUG_ON(start & ~PAGE_MASK);
|
||||||
VM_BUG_ON(end & ~PAGE_MASK);
|
VM_BUG_ON(end & ~PAGE_MASK);
|
||||||
VM_BUG_ON(start < vma->vm_start);
|
VM_BUG_ON_VMA(start < vma->vm_start, vma);
|
||||||
VM_BUG_ON(end > vma->vm_end);
|
VM_BUG_ON_VMA(end > vma->vm_end, vma);
|
||||||
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
|
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
|
||||||
|
|
||||||
gup_flags = FOLL_TOUCH | FOLL_MLOCK;
|
gup_flags = FOLL_TOUCH | FOLL_MLOCK;
|
||||||
|
@ -786,8 +786,8 @@ again: remove_next = 1 + (end > next->vm_end);
|
|||||||
if (!anon_vma && adjust_next)
|
if (!anon_vma && adjust_next)
|
||||||
anon_vma = next->anon_vma;
|
anon_vma = next->anon_vma;
|
||||||
if (anon_vma) {
|
if (anon_vma) {
|
||||||
VM_BUG_ON(adjust_next && next->anon_vma &&
|
VM_BUG_ON_VMA(adjust_next && next->anon_vma &&
|
||||||
anon_vma != next->anon_vma);
|
anon_vma != next->anon_vma, next);
|
||||||
anon_vma_lock_write(anon_vma);
|
anon_vma_lock_write(anon_vma);
|
||||||
anon_vma_interval_tree_pre_update_vma(vma);
|
anon_vma_interval_tree_pre_update_vma(vma);
|
||||||
if (adjust_next)
|
if (adjust_next)
|
||||||
@ -2848,7 +2848,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|||||||
* safe. It is only safe to keep the vm_pgoff
|
* safe. It is only safe to keep the vm_pgoff
|
||||||
* linear if there are no pages mapped yet.
|
* linear if there are no pages mapped yet.
|
||||||
*/
|
*/
|
||||||
VM_BUG_ON(faulted_in_anon_vma);
|
VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
|
||||||
*vmap = vma = new_vma;
|
*vmap = vma = new_vma;
|
||||||
}
|
}
|
||||||
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
|
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
|
||||||
|
@ -195,7 +195,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|||||||
if (pmd_trans_huge(*old_pmd)) {
|
if (pmd_trans_huge(*old_pmd)) {
|
||||||
int err = 0;
|
int err = 0;
|
||||||
if (extent == HPAGE_PMD_SIZE) {
|
if (extent == HPAGE_PMD_SIZE) {
|
||||||
VM_BUG_ON(vma->vm_file || !vma->anon_vma);
|
VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
|
||||||
|
vma);
|
||||||
/* See comment in move_ptes() */
|
/* See comment in move_ptes() */
|
||||||
if (need_rmap_locks)
|
if (need_rmap_locks)
|
||||||
anon_vma_lock_write(vma->anon_vma);
|
anon_vma_lock_write(vma->anon_vma);
|
||||||
|
@ -527,7 +527,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
|
|||||||
unsigned long address = __vma_address(page, vma);
|
unsigned long address = __vma_address(page, vma);
|
||||||
|
|
||||||
/* page should be within @vma mapping range */
|
/* page should be within @vma mapping range */
|
||||||
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
|
||||||
|
|
||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
@ -897,7 +897,7 @@ void page_move_anon_rmap(struct page *page,
|
|||||||
struct anon_vma *anon_vma = vma->anon_vma;
|
struct anon_vma *anon_vma = vma->anon_vma;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||||
VM_BUG_ON(!anon_vma);
|
VM_BUG_ON_VMA(!anon_vma, vma);
|
||||||
VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
|
VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
|
||||||
|
|
||||||
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
|
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
|
||||||
@ -1024,7 +1024,7 @@ void do_page_add_anon_rmap(struct page *page,
|
|||||||
void page_add_new_anon_rmap(struct page *page,
|
void page_add_new_anon_rmap(struct page *page,
|
||||||
struct vm_area_struct *vma, unsigned long address)
|
struct vm_area_struct *vma, unsigned long address)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
|
||||||
SetPageSwapBacked(page);
|
SetPageSwapBacked(page);
|
||||||
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
|
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
|
||||||
if (PageTransHuge(page))
|
if (PageTransHuge(page))
|
||||||
@ -1670,7 +1670,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
|
|||||||
* structure at mapping cannot be freed and reused yet,
|
* structure at mapping cannot be freed and reused yet,
|
||||||
* so we can safely take mapping->i_mmap_mutex.
|
* so we can safely take mapping->i_mmap_mutex.
|
||||||
*/
|
*/
|
||||||
VM_BUG_ON(!PageLocked(page));
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||||
|
|
||||||
if (!mapping)
|
if (!mapping)
|
||||||
return ret;
|
return ret;
|
||||||
|
Loading…
Reference in New Issue
Block a user