mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-08 06:03:24 +00:00
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull leftover perf fixes from Ingo Molnar: "Two perf fixes left over from the previous cycle" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf session: Do not fail on processing out of order event x86/asm/traps: Disable tracing and kprobes in fixup_bad_iret and sync_regs
This commit is contained in:
commit
bee2782f30
@ -387,7 +387,7 @@ NOKPROBE_SYMBOL(do_int3);
|
|||||||
* for scheduling or signal handling. The actual stack switch is done in
|
* for scheduling or signal handling. The actual stack switch is done in
|
||||||
* entry.S
|
* entry.S
|
||||||
*/
|
*/
|
||||||
asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
|
asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
|
||||||
{
|
{
|
||||||
struct pt_regs *regs = eregs;
|
struct pt_regs *regs = eregs;
|
||||||
/* Did already sync */
|
/* Did already sync */
|
||||||
@ -413,7 +413,7 @@ struct bad_iret_stack {
|
|||||||
struct pt_regs regs;
|
struct pt_regs regs;
|
||||||
};
|
};
|
||||||
|
|
||||||
asmlinkage __visible
|
asmlinkage __visible notrace
|
||||||
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
|
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -436,6 +436,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
|
|||||||
BUG_ON(!user_mode_vm(&new_stack->regs));
|
BUG_ON(!user_mode_vm(&new_stack->regs));
|
||||||
return new_stack;
|
return new_stack;
|
||||||
}
|
}
|
||||||
|
NOKPROBE_SYMBOL(fixup_bad_iret);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -242,6 +242,7 @@ struct events_stats {
|
|||||||
u32 nr_invalid_chains;
|
u32 nr_invalid_chains;
|
||||||
u32 nr_unknown_id;
|
u32 nr_unknown_id;
|
||||||
u32 nr_unprocessable_samples;
|
u32 nr_unprocessable_samples;
|
||||||
|
u32 nr_unordered_events;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct attr_event {
|
struct attr_event {
|
||||||
|
@ -533,15 +533,11 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event,
|
|||||||
return -ETIME;
|
return -ETIME;
|
||||||
|
|
||||||
if (timestamp < oe->last_flush) {
|
if (timestamp < oe->last_flush) {
|
||||||
WARN_ONCE(1, "Timestamp below last timeslice flush\n");
|
pr_oe_time(timestamp, "out of order event\n");
|
||||||
|
|
||||||
pr_oe_time(timestamp, "out of order event");
|
|
||||||
pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
|
pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
|
||||||
oe->last_flush_type);
|
oe->last_flush_type);
|
||||||
|
|
||||||
/* We could get out of order messages after forced flush. */
|
s->stats.nr_unordered_events++;
|
||||||
if (oe->last_flush_type != OE_FLUSH__HALF)
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
new = ordered_events__new(oe, timestamp, event);
|
new = ordered_events__new(oe, timestamp, event);
|
||||||
@ -1118,6 +1114,9 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
|
|||||||
"Do you have a KVM guest running and not using 'perf kvm'?\n",
|
"Do you have a KVM guest running and not using 'perf kvm'?\n",
|
||||||
session->stats.nr_unprocessable_samples);
|
session->stats.nr_unprocessable_samples);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (session->stats.nr_unordered_events != 0)
|
||||||
|
ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
|
||||||
}
|
}
|
||||||
|
|
||||||
volatile int session_done;
|
volatile int session_done;
|
||||||
|
Loading…
Reference in New Issue
Block a user