mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
mempolicy: fix a race in shared_policy_replace()
commit b22d127a39
upstream.
shared_policy_replace() use of sp_alloc() is unsafe. 1) sp_node cannot
be dereferenced if sp->lock is not held and 2) another thread can modify
sp_node between spin_unlock for allocating a new sp node and next
spin_lock. The bug was introduced before 2.6.12-rc2.
Kosaki's original patch for this problem was to allocate an sp node and
policy within shared_policy_replace and initialise it when the lock is
reacquired. I was not keen on this approach because it partially
duplicates sp_alloc(). As the paths were sp->lock is taken are not that
performance critical this patch converts sp->lock to sp->mutex so it can
sleep when calling sp_alloc().
[kosaki.motohiro@jp.fujitsu.com: Original patch]
Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Cc: Josh Boyer <jwboyer@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
This commit is contained in:
parent
acc12b1b27
commit
c8d141b1aa
@ -180,7 +180,7 @@ struct sp_node {
|
||||
|
||||
struct shared_policy {
|
||||
struct rb_root root;
|
||||
spinlock_t lock;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
|
||||
|
@ -1835,7 +1835,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
|
||||
*/
|
||||
|
||||
/* lookup first element intersecting start-end */
|
||||
/* Caller holds sp->lock */
|
||||
/* Caller holds sp->mutex */
|
||||
static struct sp_node *
|
||||
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
|
||||
{
|
||||
@ -1899,13 +1899,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
|
||||
|
||||
if (!sp->root.rb_node)
|
||||
return NULL;
|
||||
spin_lock(&sp->lock);
|
||||
mutex_lock(&sp->mutex);
|
||||
sn = sp_lookup(sp, idx, idx+1);
|
||||
if (sn) {
|
||||
mpol_get(sn->policy);
|
||||
pol = sn->policy;
|
||||
}
|
||||
spin_unlock(&sp->lock);
|
||||
mutex_unlock(&sp->mutex);
|
||||
return pol;
|
||||
}
|
||||
|
||||
@ -1936,10 +1936,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
|
||||
static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
|
||||
unsigned long end, struct sp_node *new)
|
||||
{
|
||||
struct sp_node *n, *new2 = NULL;
|
||||
struct sp_node *n;
|
||||
int ret = 0;
|
||||
|
||||
restart:
|
||||
spin_lock(&sp->lock);
|
||||
mutex_lock(&sp->mutex);
|
||||
n = sp_lookup(sp, start, end);
|
||||
/* Take care of old policies in the same range. */
|
||||
while (n && n->start < end) {
|
||||
@ -1952,16 +1952,14 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
|
||||
} else {
|
||||
/* Old policy spanning whole new range. */
|
||||
if (n->end > end) {
|
||||
struct sp_node *new2;
|
||||
new2 = sp_alloc(end, n->end, n->policy);
|
||||
if (!new2) {
|
||||
spin_unlock(&sp->lock);
|
||||
new2 = sp_alloc(end, n->end, n->policy);
|
||||
if (!new2)
|
||||
return -ENOMEM;
|
||||
goto restart;
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
n->end = start;
|
||||
sp_insert(sp, new2);
|
||||
new2 = NULL;
|
||||
break;
|
||||
} else
|
||||
n->end = start;
|
||||
@ -1972,12 +1970,9 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
|
||||
}
|
||||
if (new)
|
||||
sp_insert(sp, new);
|
||||
spin_unlock(&sp->lock);
|
||||
if (new2) {
|
||||
mpol_put(new2->policy);
|
||||
kmem_cache_free(sn_cache, new2);
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
mutex_unlock(&sp->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1995,7 +1990,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
|
||||
int ret;
|
||||
|
||||
sp->root = RB_ROOT; /* empty tree == default mempolicy */
|
||||
spin_lock_init(&sp->lock);
|
||||
mutex_init(&sp->mutex);
|
||||
|
||||
if (mpol) {
|
||||
struct vm_area_struct pvma;
|
||||
@ -2063,7 +2058,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
|
||||
|
||||
if (!p->root.rb_node)
|
||||
return;
|
||||
spin_lock(&p->lock);
|
||||
mutex_lock(&p->mutex);
|
||||
next = rb_first(&p->root);
|
||||
while (next) {
|
||||
n = rb_entry(next, struct sp_node, nd);
|
||||
@ -2072,7 +2067,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
|
||||
mpol_put(n->policy);
|
||||
kmem_cache_free(sn_cache, n);
|
||||
}
|
||||
spin_unlock(&p->lock);
|
||||
mutex_unlock(&p->mutex);
|
||||
}
|
||||
|
||||
/* assumes fs == KERNEL_DS */
|
||||
|
Loading…
Reference in New Issue
Block a user