mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-06 05:02:31 +00:00
debugobjects: fix lockdep warning
Daniel J. Blueman reported: > ======================================================= > [ INFO: possible circular locking dependency detected ] > 2.6.27-rc4-224c #1 > ------------------------------------------------------- > hald/4680 is trying to acquire lock: > (&n->list_lock){++..}, at: [<ffffffff802bfa26>] add_partial+0x26/0x80 > > but task is already holding lock: > (&obj_hash[i].lock){++..}, at: [<ffffffff8041cfdc>] > debug_object_free+0x5c/0x120 We fix it by moving the actual freeing to outside the lock (the lock now only protects the list). The pool lock is also promoted to irq-safe (suggested by Dan). It's necessary because free_pool is now called outside the irq disabled region. So we need to protect against an interrupt handler which calls debug_object_init(). [tglx@linutronix.de: added hlist_move_list helper to avoid looping through the list twice] Reported-by: Daniel J Blueman <daniel.blueman@gmail.com> Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
bef69ea0dc
commit
673d62cc5e
@ -619,6 +619,19 @@ static inline void hlist_add_after(struct hlist_node *n,
|
|||||||
next->next->pprev = &next->next;
|
next->next->pprev = &next->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Move a list from one list head to another. Fixup the pprev
|
||||||
|
* reference of the first entry if it exists.
|
||||||
|
*/
|
||||||
|
static inline void hlist_move_list(struct hlist_head *old,
|
||||||
|
struct hlist_head *new)
|
||||||
|
{
|
||||||
|
new->first = old->first;
|
||||||
|
if (new->first)
|
||||||
|
new->first->pprev = &new->first;
|
||||||
|
old->first = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
|
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
|
||||||
|
|
||||||
#define hlist_for_each(pos, head) \
|
#define hlist_for_each(pos, head) \
|
||||||
|
@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a new object. If the pool is empty, switch off the debugger.
|
* Allocate a new object. If the pool is empty, switch off the debugger.
|
||||||
|
* Must be called with interrupts disabled.
|
||||||
*/
|
*/
|
||||||
static struct debug_obj *
|
static struct debug_obj *
|
||||||
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
|
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
|
||||||
@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
|
|||||||
static void free_object(struct debug_obj *obj)
|
static void free_object(struct debug_obj *obj)
|
||||||
{
|
{
|
||||||
unsigned long idx = (unsigned long)(obj - obj_static_pool);
|
unsigned long idx = (unsigned long)(obj - obj_static_pool);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
|
if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
|
||||||
spin_lock(&pool_lock);
|
spin_lock_irqsave(&pool_lock, flags);
|
||||||
hlist_add_head(&obj->node, &obj_pool);
|
hlist_add_head(&obj->node, &obj_pool);
|
||||||
obj_pool_free++;
|
obj_pool_free++;
|
||||||
obj_pool_used--;
|
obj_pool_used--;
|
||||||
spin_unlock(&pool_lock);
|
spin_unlock_irqrestore(&pool_lock, flags);
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&pool_lock);
|
spin_lock_irqsave(&pool_lock, flags);
|
||||||
obj_pool_used--;
|
obj_pool_used--;
|
||||||
spin_unlock(&pool_lock);
|
spin_unlock_irqrestore(&pool_lock, flags);
|
||||||
kmem_cache_free(obj_cache, obj);
|
kmem_cache_free(obj_cache, obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -171,6 +173,7 @@ static void debug_objects_oom(void)
|
|||||||
{
|
{
|
||||||
struct debug_bucket *db = obj_hash;
|
struct debug_bucket *db = obj_hash;
|
||||||
struct hlist_node *node, *tmp;
|
struct hlist_node *node, *tmp;
|
||||||
|
HLIST_HEAD(freelist);
|
||||||
struct debug_obj *obj;
|
struct debug_obj *obj;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
@ -179,11 +182,14 @@ static void debug_objects_oom(void)
|
|||||||
|
|
||||||
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
|
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
|
||||||
spin_lock_irqsave(&db->lock, flags);
|
spin_lock_irqsave(&db->lock, flags);
|
||||||
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
|
hlist_move_list(&db->list, &freelist);
|
||||||
|
spin_unlock_irqrestore(&db->lock, flags);
|
||||||
|
|
||||||
|
/* Now free them */
|
||||||
|
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
|
||||||
hlist_del(&obj->node);
|
hlist_del(&obj->node);
|
||||||
free_object(obj);
|
free_object(obj);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&db->lock, flags);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -498,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
|
|||||||
return;
|
return;
|
||||||
default:
|
default:
|
||||||
hlist_del(&obj->node);
|
hlist_del(&obj->node);
|
||||||
|
spin_unlock_irqrestore(&db->lock, flags);
|
||||||
free_object(obj);
|
free_object(obj);
|
||||||
break;
|
return;
|
||||||
}
|
}
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irqrestore(&db->lock, flags);
|
spin_unlock_irqrestore(&db->lock, flags);
|
||||||
@ -510,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
|
|||||||
{
|
{
|
||||||
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
|
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
|
||||||
struct hlist_node *node, *tmp;
|
struct hlist_node *node, *tmp;
|
||||||
|
HLIST_HEAD(freelist);
|
||||||
struct debug_obj_descr *descr;
|
struct debug_obj_descr *descr;
|
||||||
enum debug_obj_state state;
|
enum debug_obj_state state;
|
||||||
struct debug_bucket *db;
|
struct debug_bucket *db;
|
||||||
@ -545,11 +553,18 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
|
|||||||
goto repeat;
|
goto repeat;
|
||||||
default:
|
default:
|
||||||
hlist_del(&obj->node);
|
hlist_del(&obj->node);
|
||||||
free_object(obj);
|
hlist_add_head(&obj->node, &freelist);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&db->lock, flags);
|
spin_unlock_irqrestore(&db->lock, flags);
|
||||||
|
|
||||||
|
/* Now free them */
|
||||||
|
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
|
||||||
|
hlist_del(&obj->node);
|
||||||
|
free_object(obj);
|
||||||
|
}
|
||||||
|
|
||||||
if (cnt > debug_objects_maxchain)
|
if (cnt > debug_objects_maxchain)
|
||||||
debug_objects_maxchain = cnt;
|
debug_objects_maxchain = cnt;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user