sched.h: Move (spin|rwlock)_needbreak() to spinlock.h

This lets us kill the dependency on spinlock.h.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-12-11 14:05:04 -05:00
parent d7a73e3f08
commit d1d71b30e1
2 changed files with 31 additions and 31 deletions

View File

@ -2227,37 +2227,6 @@ static inline bool preempt_model_preemptible(void)
return preempt_model_full() || preempt_model_rt();
}
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPTION
return spin_is_contended(lock);
#else
return 0;
#endif
}
/*
* Check if a rwlock is contended.
* Returns non-zero if there is another task waiting on the rwlock.
* Returns zero if the lock is not contended or the system / underlying
* rwlock implementation does not support contention detection.
* Technically does not depend on CONFIG_PREEMPTION, but a general need
* for low latency.
*/
static inline int rwlock_needbreak(rwlock_t *lock)
{
#ifdef CONFIG_PREEMPTION
return rwlock_is_contended(lock);
#else
return 0;
#endif
}
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());

View File

@ -449,6 +449,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
return raw_spin_is_contended(&lock->rlock);
}
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPTION
return spin_is_contended(lock);
#else
return 0;
#endif
}
/*
* Check if a rwlock is contended.
* Returns non-zero if there is another task waiting on the rwlock.
* Returns zero if the lock is not contended or the system / underlying
* rwlock implementation does not support contention detection.
* Technically does not depend on CONFIG_PREEMPTION, but a general need
* for low latency.
*/
static inline int rwlock_needbreak(rwlock_t *lock)
{
#ifdef CONFIG_PREEMPTION
return rwlock_is_contended(lock);
#else
return 0;
#endif
}
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
#else /* !CONFIG_PREEMPT_RT */