mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-13 17:28:56 +00:00
perfcounters: remove warnings
Impact: remove debug checks Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
94c46572a6
commit
8fb9331391
@ -64,7 +64,6 @@ x86_perf_counter_update(struct perf_counter *counter,
|
||||
{
|
||||
u64 prev_raw_count, new_raw_count, delta;
|
||||
|
||||
WARN_ON_ONCE(counter->state != PERF_COUNTER_STATE_ACTIVE);
|
||||
/*
|
||||
* Careful: an NMI might modify the previous counter value.
|
||||
*
|
||||
@ -89,7 +88,6 @@ again:
|
||||
* of the count, so we do that by clipping the delta to 32 bits:
|
||||
*/
|
||||
delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
|
||||
WARN_ON_ONCE((int)delta < 0);
|
||||
|
||||
atomic64_add(delta, &counter->count);
|
||||
atomic64_sub(delta, &hwc->period_left);
|
||||
@ -193,7 +191,6 @@ __x86_perf_counter_disable(struct perf_counter *counter,
|
||||
int err;
|
||||
|
||||
err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
|
||||
WARN_ON_ONCE(err);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]);
|
||||
@ -209,8 +206,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
|
||||
s32 left = atomic64_read(&hwc->period_left);
|
||||
s32 period = hwc->irq_period;
|
||||
|
||||
WARN_ON_ONCE(period <= 0);
|
||||
|
||||
/*
|
||||
* If we are way outside a reasoable range then just skip forward:
|
||||
*/
|
||||
@ -224,8 +219,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
|
||||
atomic64_set(&hwc->period_left, left);
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(left <= 0);
|
||||
|
||||
per_cpu(prev_left[idx], smp_processor_id()) = left;
|
||||
|
||||
/*
|
||||
|
@ -218,8 +218,6 @@ struct perf_cpu_context {
|
||||
extern int perf_max_counters;
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
extern void
|
||||
perf_counter_show(struct perf_counter *counter, char *str, int trace);
|
||||
extern const struct hw_perf_counter_ops *
|
||||
hw_perf_counter_init(struct perf_counter *counter);
|
||||
|
||||
@ -237,8 +235,6 @@ extern int perf_counter_task_enable(void);
|
||||
|
||||
#else
|
||||
static inline void
|
||||
perf_counter_show(struct perf_counter *counter, char *str, int trace) { }
|
||||
static inline void
|
||||
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
|
||||
static inline void
|
||||
perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
|
||||
|
@ -861,8 +861,6 @@ static void task_clock_perf_counter_update(struct perf_counter *counter)
|
||||
atomic64_set(&counter->hw.prev_count, now);
|
||||
|
||||
delta = now - prev;
|
||||
if (WARN_ON_ONCE(delta < 0))
|
||||
delta = 0;
|
||||
|
||||
atomic64_add(delta, &counter->count);
|
||||
}
|
||||
@ -906,8 +904,6 @@ static void page_faults_perf_counter_update(struct perf_counter *counter)
|
||||
atomic64_set(&counter->hw.prev_count, now);
|
||||
|
||||
delta = now - prev;
|
||||
if (WARN_ON_ONCE(delta < 0))
|
||||
delta = 0;
|
||||
|
||||
atomic64_add(delta, &counter->count);
|
||||
}
|
||||
@ -954,8 +950,6 @@ static void context_switches_perf_counter_update(struct perf_counter *counter)
|
||||
atomic64_set(&counter->hw.prev_count, now);
|
||||
|
||||
delta = now - prev;
|
||||
if (WARN_ON_ONCE(delta < 0))
|
||||
delta = 0;
|
||||
|
||||
atomic64_add(delta, &counter->count);
|
||||
}
|
||||
@ -1000,8 +994,6 @@ static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
|
||||
atomic64_set(&counter->hw.prev_count, now);
|
||||
|
||||
delta = now - prev;
|
||||
if (WARN_ON_ONCE(delta < 0))
|
||||
delta = 0;
|
||||
|
||||
atomic64_add(delta, &counter->count);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user