mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 10:43:43 +00:00
- Lock the proper critical section when dealing with perf event context
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmPfdlAACgkQEsHwGGHe VUrG1BAAvbH5AHHgjiF2WkfPpJ7v4/GhYerks/YTq3uKgnAtCOnsDBS18oRVj63A iDy6VzZUOQ3NcarJoz+eGLjSnLQ4xZY9qm42uHVGKol1Nz9Weu2loIOOSUsINe7S 6qNE6HASM4GHUGJ1uuMxnOt0I0o8d01Eo9ZPd6ieAsmsGc4GLNOgC+h8eKDDlvOz gSTWzQUF29DSIY2JVyZ9lc5pIZ6E+gHnjIUjPdwAbYSgMpjGekNFn/OTkB4ly5G4 ehoXUudTHG/fXQ0fKXmQt4aGbJaplVxf86f/9hpuCaHP8/48Zq/eNf5udNrlhzVU HAkpZcWomtGIeu+y5dyXsh1jm3tQOc5MCSV/LI7+pVl/5jMMn48lyL7HT8K2gJzd XNFrO1KxE0Sk3d1CZKgBXjLSaV5ey8uphlpAEQpbv7zbEYlInpo+SGvUmapCkyYp JFNDK7cCmP1vSaS4DkYbK3YxiGfWgbN/o7tRAFO8yHRl/yjsjNqz0BESpM8AsDz6 UbrluPbjfbkV4HYXEXHlKg+qfgUX4qaTHNNk1m2JUVkRvVgwF5aFEBrZ6IVtNT9S 8KXrOfjXruRSWtcJP9pIeMN/d4Uq7ldkcRHu/yyHHTJqifYk8z8jT/kGs2AQqecO Thh7Iruu3b6HUz2nLRmdeBIRsZn6oAqI+vNLs42l7og2BjQ4QU0= =Yway -----END PGP SIGNATURE----- Merge tag 'perf_urgent_for_v6.2_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull perf fix from Borislav Petkov: - Lock the proper critical section when dealing with perf event context * tag 'perf_urgent_for_v6.2_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Fix perf_event_pmu_context serialization
This commit is contained in:
commit
de506eec89
@ -476,6 +476,15 @@ extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
|
||||
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
|
||||
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
|
||||
|
||||
extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
|
||||
#define atomic_dec_and_raw_lock(atomic, lock) \
|
||||
__cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
|
||||
|
||||
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
|
||||
unsigned long *flags);
|
||||
#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
|
||||
__cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
|
||||
|
||||
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
|
||||
size_t max_size, unsigned int cpu_mult,
|
||||
gfp_t gfp, const char *name,
|
||||
|
@ -4813,19 +4813,17 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
|
||||
|
||||
cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
|
||||
epc = &cpc->epc;
|
||||
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
if (!epc->ctx) {
|
||||
atomic_set(&epc->refcount, 1);
|
||||
epc->embedded = 1;
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
|
||||
epc->ctx = ctx;
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
} else {
|
||||
WARN_ON_ONCE(epc->ctx != ctx);
|
||||
atomic_inc(&epc->refcount);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
return epc;
|
||||
}
|
||||
|
||||
@ -4896,33 +4894,30 @@ static void free_epc_rcu(struct rcu_head *head)
|
||||
|
||||
static void put_pmu_ctx(struct perf_event_pmu_context *epc)
|
||||
{
|
||||
struct perf_event_context *ctx = epc->ctx;
|
||||
unsigned long flags;
|
||||
|
||||
if (!atomic_dec_and_test(&epc->refcount))
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* lockdep_assert_held(&ctx->mutex);
|
||||
*
|
||||
* can't because of the call-site in _free_event()/put_event()
|
||||
* which isn't always called under ctx->mutex.
|
||||
*/
|
||||
if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
|
||||
return;
|
||||
|
||||
if (epc->ctx) {
|
||||
struct perf_event_context *ctx = epc->ctx;
|
||||
WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
|
||||
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* lockdep_assert_held(&ctx->mutex);
|
||||
*
|
||||
* can't because of the call-site in _free_event()/put_event()
|
||||
* which isn't always called under ctx->mutex.
|
||||
*/
|
||||
|
||||
WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
|
||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||
list_del_init(&epc->pmu_ctx_entry);
|
||||
epc->ctx = NULL;
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
}
|
||||
list_del_init(&epc->pmu_ctx_entry);
|
||||
epc->ctx = NULL;
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&epc->pinned_active));
|
||||
WARN_ON_ONCE(!list_empty(&epc->flexible_active));
|
||||
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
|
||||
if (epc->embedded)
|
||||
return;
|
||||
|
||||
|
@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
|
||||
|
||||
int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
|
||||
{
|
||||
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
||||
if (atomic_add_unless(atomic, -1, 1))
|
||||
return 0;
|
||||
|
||||
/* Otherwise do it the slow way */
|
||||
raw_spin_lock(lock);
|
||||
if (atomic_dec_and_test(atomic))
|
||||
return 1;
|
||||
raw_spin_unlock(lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
|
||||
|
||||
int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
|
||||
unsigned long *flags)
|
||||
{
|
||||
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
||||
if (atomic_add_unless(atomic, -1, 1))
|
||||
return 0;
|
||||
|
||||
/* Otherwise do it the slow way */
|
||||
raw_spin_lock_irqsave(lock, *flags);
|
||||
if (atomic_dec_and_test(atomic))
|
||||
return 1;
|
||||
raw_spin_unlock_irqrestore(lock, *flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
|
||||
|
Loading…
Reference in New Issue
Block a user