mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
Two fixes for debugobjects:
- Prevent that the allocation path wakes up kswapd. That's a long standing issue due to the GFP_ATOMIC allocation flag. As debug objects can be invoked from pretty much any context waking kswapd can end up in arbitrary lock chains versus the waitqueue lock. - Correct the explicit lockdep wait-type violation in debug_object_fill_pool(). -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmRzCBQTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoa8FD/sFaHGSVtNTYgkV75umETMWbx+nR0Sp Y/i62MswIWU/DWmD9IKaBxlHpBByHgopBAozDnUix6RfQvf8V/GSU6PWa9HAR2QH rYwQCN/2/e8yQNAFv+9AiYGzPU3fRI/z7rYgfhhiWoLjivMFUCXypjBG0BAiCBxC pYKZDMhBeySIUjtEL6xjcflA8XXKuLUPGy1WeKBxRgJeNvM0GlbifNXoy0JaXBso NK+1FOG7zm05r2RqZjN0rAVRrrdgA4JYygpYC8YmzePoFQVXLeUnlbjjW9uYX+hz MoLuVeF+rKk9NHNu3NoD4kFgrNp3NXAAAzH1MJwIADy9THtsyWAeEgyUkkie9aiX Oa8eSjpJQjUv5h+VRKpMhh2RAAAhCYDuX/QC2FLImLy+GRF3dMhsAmuYgKXN2kHa CFkM84vStMiMVxKhwtLpxVE7VOrxzXxbqMO65kMrCXYxK1SfKtEZr8FrORvUjU7G MmH+D9sB034nkCBU+oGMsMYAAzB4rLp5Cw9qqvwWLfJvWLcUoPxjgUV6hLR6mNXx 6+2133Tf68Fz4TgyEDN9XhQ7QEsKKGTTDMJ5JYolnrRe54sUJSsX+44khrbocSde WcEfcwhR+mjDDx0eVB2oT9bedxMf639mqPNn//EqJkzS4s+sECC8OiHbdvL3ArUq S92nrMxvyMB42Q== =7B4m -----END PGP SIGNATURE----- Merge tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull debugobjects fixes from Thomas Gleixner: "Two fixes for debugobjects: - Prevent the allocation path from waking up kswapd. That's a long standing issue due to the GFP_ATOMIC allocation flag. As debug objects can be invoked from pretty much any context waking kswapd can end up in arbitrary lock chains versus the waitqueue lock - Correct the explicit lockdep wait-type violation in debug_object_fill_pool()" * tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: debugobjects: Don't wake up kswapd from fill_pool() debugobjects,locking: Annotate debug_object_fill_pool() wait type violation
This commit is contained in:
commit
d8f14b84fe
@ -344,6 +344,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
|
||||
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
|
||||
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
|
||||
|
||||
/*
|
||||
* Must use lock_map_aquire_try() with override maps to avoid
|
||||
* lockdep thinking they participate in the block chain.
|
||||
*/
|
||||
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
|
||||
struct lockdep_map _name = { \
|
||||
.name = #_name "-wait-type-override", \
|
||||
.wait_type_inner = _wait_type, \
|
||||
.lock_type = LD_LOCK_WAIT_OVERRIDE, }
|
||||
|
||||
#else /* !CONFIG_LOCKDEP */
|
||||
|
||||
static inline void lockdep_init_task(struct task_struct *task)
|
||||
@ -432,6 +442,9 @@ extern int lockdep_is_held(const void *);
|
||||
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
|
||||
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
|
||||
|
||||
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
|
||||
struct lockdep_map __maybe_unused _name = {}
|
||||
|
||||
#endif /* !LOCKDEP */
|
||||
|
||||
enum xhlock_context_t {
|
||||
@ -556,6 +569,7 @@ do { \
|
||||
#define rwsem_release(l, i) lock_release(l, i)
|
||||
|
||||
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
|
||||
#define lock_map_release(l) lock_release(l, _THIS_IP_)
|
||||
|
@ -33,6 +33,7 @@ enum lockdep_wait_type {
|
||||
enum lockdep_lock_type {
|
||||
LD_LOCK_NORMAL = 0, /* normal, catch all */
|
||||
LD_LOCK_PERCPU, /* percpu */
|
||||
LD_LOCK_WAIT_OVERRIDE, /* annotation */
|
||||
LD_LOCK_MAX,
|
||||
};
|
||||
|
||||
|
@ -2263,6 +2263,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
|
||||
|
||||
static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||
{
|
||||
if (entry->class->lock_type == LD_LOCK_NORMAL)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Skip local_lock() for irq inversion detection.
|
||||
*
|
||||
@ -2289,14 +2292,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||
* As a result, we will skip local_lock(), when we search for irq
|
||||
* inversion bugs.
|
||||
*/
|
||||
if (entry->class->lock_type == LD_LOCK_PERCPU) {
|
||||
if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
|
||||
return false;
|
||||
if (entry->class->lock_type == LD_LOCK_PERCPU &&
|
||||
DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
/*
|
||||
* Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
|
||||
* a lock and only used to override the wait_type.
|
||||
*/
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4768,7 +4773,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||
|
||||
for (; depth < curr->lockdep_depth; depth++) {
|
||||
struct held_lock *prev = curr->held_locks + depth;
|
||||
u8 prev_inner = hlock_class(prev)->wait_type_inner;
|
||||
struct lock_class *class = hlock_class(prev);
|
||||
u8 prev_inner = class->wait_type_inner;
|
||||
|
||||
if (prev_inner) {
|
||||
/*
|
||||
@ -4778,6 +4784,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||
* Also due to trylocks.
|
||||
*/
|
||||
curr_inner = min(curr_inner, prev_inner);
|
||||
|
||||
/*
|
||||
* Allow override for annotations -- this is typically
|
||||
* only valid/needed for code that only exists when
|
||||
* CONFIG_PREEMPT_RT=n.
|
||||
*/
|
||||
if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
|
||||
curr_inner = prev_inner;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,7 +126,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
|
||||
|
||||
static void fill_pool(void)
|
||||
{
|
||||
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
|
||||
gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
|
||||
struct debug_obj *obj;
|
||||
unsigned long flags;
|
||||
|
||||
@ -591,10 +591,21 @@ static void debug_objects_fill_pool(void)
|
||||
{
|
||||
/*
|
||||
* On RT enabled kernels the pool refill must happen in preemptible
|
||||
* context:
|
||||
* context -- for !RT kernels we rely on the fact that spinlock_t and
|
||||
* raw_spinlock_t are basically the same type and this lock-type
|
||||
* inversion works just fine.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
|
||||
/*
|
||||
* Annotate away the spinlock_t inside raw_spinlock_t warning
|
||||
* by temporarily raising the wait-type to WAIT_SLEEP, matching
|
||||
* the preemptible() condition above.
|
||||
*/
|
||||
static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
|
||||
lock_map_acquire_try(&fill_pool_map);
|
||||
fill_pool();
|
||||
lock_map_release(&fill_pool_map);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
Loading…
Reference in New Issue
Block a user