mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
Fix race in process_vm_rw_core
This fixes the race in process_vm_core found by Oleg (see http://article.gmane.org/gmane.linux.kernel/1235667/ for details). This has been updated since I last sent it as the creation of the new mm_access() function did almost exactly the same thing as parts of the previous version of this patch did. In order to use mm_access() even when /proc isn't enabled, we move it to kernel/fork.c where other related process mm access functions already are. Signed-off-by: Chris Yeoh <yeohc@au1.ibm.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
24b36da33c
commit
8cdb878dcb
@ -198,26 +198,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
|
||||
return result;
|
||||
}
|
||||
|
||||
static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
int err;
|
||||
|
||||
err = mutex_lock_killable(&task->signal->cred_guard_mutex);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
mm = get_task_mm(task);
|
||||
if (mm && mm != current->mm &&
|
||||
!ptrace_may_access(task, mode)) {
|
||||
mmput(mm);
|
||||
mm = ERR_PTR(-EACCES);
|
||||
}
|
||||
mutex_unlock(&task->signal->cred_guard_mutex);
|
||||
|
||||
return mm;
|
||||
}
|
||||
|
||||
struct mm_struct *mm_for_maps(struct task_struct *task)
|
||||
{
|
||||
return mm_access(task, PTRACE_MODE_READ);
|
||||
|
@ -2259,6 +2259,12 @@ static inline void mmdrop(struct mm_struct * mm)
|
||||
extern void mmput(struct mm_struct *);
|
||||
/* Grab a reference to a task's mm, if it is not already going away */
|
||||
extern struct mm_struct *get_task_mm(struct task_struct *task);
|
||||
/*
|
||||
* Grab a reference to a task's mm, if it is not already going away
|
||||
* and ptrace_may_access with the mode parameter passed to it
|
||||
* succeeds.
|
||||
*/
|
||||
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
|
||||
/* Remove the current tasks stale references to the old mm_struct */
|
||||
extern void mm_release(struct task_struct *, struct mm_struct *);
|
||||
/* Allocate a new mm structure and copy contents from tsk->mm */
|
||||
|
@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_task_mm);
|
||||
|
||||
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
int err;
|
||||
|
||||
err = mutex_lock_killable(&task->signal->cred_guard_mutex);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
mm = get_task_mm(task);
|
||||
if (mm && mm != current->mm &&
|
||||
!ptrace_may_access(task, mode)) {
|
||||
mmput(mm);
|
||||
mm = ERR_PTR(-EACCES);
|
||||
}
|
||||
mutex_unlock(&task->signal->cred_guard_mutex);
|
||||
|
||||
return mm;
|
||||
}
|
||||
|
||||
/* Please note the differences between mmput and mm_release.
|
||||
* mmput is called whenever we stop holding onto a mm_struct,
|
||||
* error success whatever.
|
||||
|
@ -298,22 +298,17 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
|
||||
goto free_proc_pages;
|
||||
}
|
||||
|
||||
task_lock(task);
|
||||
if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
|
||||
task_unlock(task);
|
||||
rc = -EPERM;
|
||||
mm = mm_access(task, PTRACE_MODE_ATTACH);
|
||||
if (!mm || IS_ERR(mm)) {
|
||||
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
|
||||
/*
|
||||
* Explicitly map EACCES to EPERM as EPERM is a more a
|
||||
* appropriate error code for process_vw_readv/writev
|
||||
*/
|
||||
if (rc == -EACCES)
|
||||
rc = -EPERM;
|
||||
goto put_task_struct;
|
||||
}
|
||||
mm = task->mm;
|
||||
|
||||
if (!mm || (task->flags & PF_KTHREAD)) {
|
||||
task_unlock(task);
|
||||
rc = -EINVAL;
|
||||
goto put_task_struct;
|
||||
}
|
||||
|
||||
atomic_inc(&mm->mm_users);
|
||||
task_unlock(task);
|
||||
|
||||
for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
|
||||
rc = process_vm_rw_single_vec(
|
||||
|
Loading…
Reference in New Issue
Block a user