mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 10:43:43 +00:00
Revert "uprobes: use vm_special_mapping close() functionality"
This reverts commit 08e28de116
.
A malicious application can munmap() its "[uprobes]" vma and in this case
xol_mapping.close == uprobe_clear_state() will free the memory which can
be used by another thread, or the same thread when it hits the uprobe bp
afterwards.
Link: https://lkml.kernel.org/r/20240911131320.GA3448@redhat.com
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
242d12c981
commit
ed8d5b0ce1
@ -126,6 +126,7 @@ extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
|
||||
extern void uprobe_notify_resume(struct pt_regs *regs);
|
||||
extern bool uprobe_deny_signal(void);
|
||||
extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
extern void uprobe_clear_state(struct mm_struct *mm);
|
||||
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
|
||||
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
|
@ -1482,22 +1482,6 @@ void * __weak arch_uprobe_trampoline(unsigned long *psize)
|
||||
return &insn;
|
||||
}
|
||||
|
||||
/*
|
||||
* uprobe_clear_state - Free the area allocated for slots.
|
||||
*/
|
||||
static void uprobe_clear_state(const struct vm_special_mapping *sm, struct vm_area_struct *vma)
|
||||
{
|
||||
struct xol_area *area = container_of(vma->vm_private_data, struct xol_area, xol_mapping);
|
||||
|
||||
mutex_lock(&delayed_uprobe_lock);
|
||||
delayed_uprobe_remove(NULL, vma->vm_mm);
|
||||
mutex_unlock(&delayed_uprobe_lock);
|
||||
|
||||
put_page(area->pages[0]);
|
||||
kfree(area->bitmap);
|
||||
kfree(area);
|
||||
}
|
||||
|
||||
static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
@ -1516,7 +1500,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
|
||||
area->xol_mapping.name = "[uprobes]";
|
||||
area->xol_mapping.fault = NULL;
|
||||
area->xol_mapping.close = uprobe_clear_state;
|
||||
area->xol_mapping.pages = area->pages;
|
||||
area->pages[0] = alloc_page(GFP_HIGHUSER);
|
||||
if (!area->pages[0])
|
||||
@ -1562,6 +1545,25 @@ static struct xol_area *get_xol_area(void)
|
||||
return area;
|
||||
}
|
||||
|
||||
/*
|
||||
* uprobe_clear_state - Free the area allocated for slots.
|
||||
*/
|
||||
void uprobe_clear_state(struct mm_struct *mm)
|
||||
{
|
||||
struct xol_area *area = mm->uprobes_state.xol_area;
|
||||
|
||||
mutex_lock(&delayed_uprobe_lock);
|
||||
delayed_uprobe_remove(NULL, mm);
|
||||
mutex_unlock(&delayed_uprobe_lock);
|
||||
|
||||
if (!area)
|
||||
return;
|
||||
|
||||
put_page(area->pages[0]);
|
||||
kfree(area->bitmap);
|
||||
kfree(area);
|
||||
}
|
||||
|
||||
void uprobe_start_dup_mmap(void)
|
||||
{
|
||||
percpu_down_read(&dup_mmap_sem);
|
||||
|
@ -1338,6 +1338,7 @@ static inline void __mmput(struct mm_struct *mm)
|
||||
{
|
||||
VM_BUG_ON(atomic_read(&mm->mm_users));
|
||||
|
||||
uprobe_clear_state(mm);
|
||||
exit_aio(mm);
|
||||
ksm_exit(mm);
|
||||
khugepaged_exit(mm); /* must run before exit_mmap */
|
||||
|
Loading…
Reference in New Issue
Block a user