mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-06 14:05:39 +00:00
lockdep: move held_lock to lockdep_types.h
held_lock is embedded in task_struct, and we don't want sched.h pulling in all of lockdep.h Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Acked-by: Waiman Long <longman@redhat.com>
This commit is contained in:
parent
e034d49eb0
commit
99bac36667
@ -82,63 +82,6 @@ struct lock_chain {
|
||||
u64 chain_key;
|
||||
};
|
||||
|
||||
#define MAX_LOCKDEP_KEYS_BITS 13
|
||||
#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
|
||||
#define INITIAL_CHAIN_KEY -1
|
||||
|
||||
struct held_lock {
|
||||
/*
|
||||
* One-way hash of the dependency chain up to this point. We
|
||||
* hash the hashes step by step as the dependency chain grows.
|
||||
*
|
||||
* We use it for dependency-caching and we skip detection
|
||||
* passes and dependency-updates if there is a cache-hit, so
|
||||
* it is absolutely critical for 100% coverage of the validator
|
||||
* to have a unique key value for every unique dependency path
|
||||
* that can occur in the system, to make a unique hash value
|
||||
* as likely as possible - hence the 64-bit width.
|
||||
*
|
||||
* The task struct holds the current hash value (initialized
|
||||
* with zero), here we store the previous hash value:
|
||||
*/
|
||||
u64 prev_chain_key;
|
||||
unsigned long acquire_ip;
|
||||
struct lockdep_map *instance;
|
||||
struct lockdep_map *nest_lock;
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
u64 waittime_stamp;
|
||||
u64 holdtime_stamp;
|
||||
#endif
|
||||
/*
|
||||
* class_idx is zero-indexed; it points to the element in
|
||||
* lock_classes this held lock instance belongs to. class_idx is in
|
||||
* the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
|
||||
*/
|
||||
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
|
||||
/*
|
||||
* The lock-stack is unified in that the lock chains of interrupt
|
||||
* contexts nest ontop of process context chains, but we 'separate'
|
||||
* the hashes by starting with 0 if we cross into an interrupt
|
||||
* context, and we also keep do not add cross-context lock
|
||||
* dependencies - the lock usage graph walking covers that area
|
||||
* anyway, and we'd just unnecessarily increase the number of
|
||||
* dependencies otherwise. [Note: hardirq and softirq contexts
|
||||
* are separated from each other too.]
|
||||
*
|
||||
* The following field is used to detect when we cross into an
|
||||
* interrupt context:
|
||||
*/
|
||||
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
|
||||
unsigned int trylock:1; /* 16 bits */
|
||||
|
||||
unsigned int read:2; /* see lock_acquire() comment */
|
||||
unsigned int check:1; /* see lock_acquire() comment */
|
||||
unsigned int hardirqs_off:1;
|
||||
unsigned int sync:1;
|
||||
unsigned int references:11; /* 32 bits */
|
||||
unsigned int pin_count;
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialization, self-test and debugging-output methods:
|
||||
*/
|
||||
|
@ -198,6 +198,63 @@ struct lockdep_map {
|
||||
|
||||
struct pin_cookie { unsigned int val; };
|
||||
|
||||
#define MAX_LOCKDEP_KEYS_BITS 13
|
||||
#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
|
||||
#define INITIAL_CHAIN_KEY -1
|
||||
|
||||
struct held_lock {
|
||||
/*
|
||||
* One-way hash of the dependency chain up to this point. We
|
||||
* hash the hashes step by step as the dependency chain grows.
|
||||
*
|
||||
* We use it for dependency-caching and we skip detection
|
||||
* passes and dependency-updates if there is a cache-hit, so
|
||||
* it is absolutely critical for 100% coverage of the validator
|
||||
* to have a unique key value for every unique dependency path
|
||||
* that can occur in the system, to make a unique hash value
|
||||
* as likely as possible - hence the 64-bit width.
|
||||
*
|
||||
* The task struct holds the current hash value (initialized
|
||||
* with zero), here we store the previous hash value:
|
||||
*/
|
||||
u64 prev_chain_key;
|
||||
unsigned long acquire_ip;
|
||||
struct lockdep_map *instance;
|
||||
struct lockdep_map *nest_lock;
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
u64 waittime_stamp;
|
||||
u64 holdtime_stamp;
|
||||
#endif
|
||||
/*
|
||||
* class_idx is zero-indexed; it points to the element in
|
||||
* lock_classes this held lock instance belongs to. class_idx is in
|
||||
* the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
|
||||
*/
|
||||
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
|
||||
/*
|
||||
* The lock-stack is unified in that the lock chains of interrupt
|
||||
* contexts nest ontop of process context chains, but we 'separate'
|
||||
* the hashes by starting with 0 if we cross into an interrupt
|
||||
* context, and we also keep do not add cross-context lock
|
||||
* dependencies - the lock usage graph walking covers that area
|
||||
* anyway, and we'd just unnecessarily increase the number of
|
||||
* dependencies otherwise. [Note: hardirq and softirq contexts
|
||||
* are separated from each other too.]
|
||||
*
|
||||
* The following field is used to detect when we cross into an
|
||||
* interrupt context:
|
||||
*/
|
||||
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
|
||||
unsigned int trylock:1; /* 16 bits */
|
||||
|
||||
unsigned int read:2; /* see lock_acquire() comment */
|
||||
unsigned int check:1; /* see lock_acquire() comment */
|
||||
unsigned int hardirqs_off:1;
|
||||
unsigned int sync:1;
|
||||
unsigned int references:11; /* 32 bits */
|
||||
unsigned int pin_count;
|
||||
};
|
||||
|
||||
#else /* !CONFIG_LOCKDEP */
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user