mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-12 00:00:00 +00:00
mm: prevent mmap_cache race in find_vma()
find_vma() can be called by multiple threads with read lock held on mm->mmap_sem and any of them can update mm->mmap_cache. Prevent compiler from re-fetching mm->mmap_cache, because other readers could update it in the meantime: thread 1 thread 2 | find_vma() | find_vma() struct vm_area_struct *vma = NULL; | vma = mm->mmap_cache; | if (!(vma && vma->vm_end > addr | && vma->vm_start <= addr)) { | | mm->mmap_cache = vma; return vma; | ^^ compiler may optimize this | local variable out and re-read | mm->mmap_cache | This issue can be reproduced with gcc-4.8.0-1 on s390x by running mallocstress testcase from LTP, which triggers: kernel BUG at mm/rmap.c:1088! Call Trace: ([<000003d100c57000>] 0x3d100c57000) [<000000000023a1c0>] do_wp_page+0x2fc/0xa88 [<000000000023baae>] handle_pte_fault+0x41a/0xac8 [<000000000023d832>] handle_mm_fault+0x17a/0x268 [<000000000060507a>] do_protection_exception+0x1e2/0x394 [<0000000000603a04>] pgm_check_handler+0x138/0x13c [<000003fffcf1f07a>] 0x3fffcf1f07a Last Breaking-Event-Address: [<000000000024755e>] page_add_new_anon_rmap+0xc2/0x168 Thanks to Jakub Jelinek for his insight on gcc and helping to track this down. Signed-off-by: Jan Stancek <jstancek@redhat.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Hugh Dickins <hughd@google.com> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
22d1e6f4c5
commit
b6a9b7f6b1
@ -1940,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
||||
|
||||
/* Check the cache first. */
|
||||
/* (Cache hit rate is typically around 35%.) */
|
||||
vma = mm->mmap_cache;
|
||||
vma = ACCESS_ONCE(mm->mmap_cache);
|
||||
if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
|
||||
struct rb_node *rb_node;
|
||||
|
||||
|
@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
/* check the cache first */
|
||||
vma = mm->mmap_cache;
|
||||
vma = ACCESS_ONCE(mm->mmap_cache);
|
||||
if (vma && vma->vm_start <= addr && vma->vm_end > addr)
|
||||
return vma;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user