mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
- Handle the case where clocksources with small counter width can, in
conjunction with overly long idle sleeps, falsely trigger the negative motion detection of clocksources -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmdVb44ACgkQEsHwGGHe VUr/bA/+L5vH39rL4IGQ05LZ/TaZC5iSadsWeW26UccbV+5jw4q2hmVterH3TXDp ujsvPmRLPN/tjCdwMNKDRmVTc5nBoO+OHBErf+o3mqZMI8XdXz7bOEvNhjnlIPnT ILu/c8h2g3bHpFBYHsJjyhrIkfvfspG/yeD6V7SXI1r0StqRfoo6QmbbZwYpdrn3 +ORs8TdW6GEp7gJhdCzSXxzbfXnvtRsBZvsBLxoIter9Kqd+pFpVxj7CqoHWEiBM NQHN/2DG3uczoVVtOB7VK9edAYlpe9mzokB4wRClXo21D7JFze0m2TJGJ3hf9eRZ RbzZea0CQNa11NUlxoRUrN+jG/CHjnptNFycRJIEtb6YgKyoizJ/x8CBOWI3a8nU NTGBIwXAeYcYrrsP5f3bmDRcks9OO+E2quZiGJZorq1zDxzxnPs8ALmtwCB64UhD ro0VAT1d7JuMdnmFUKSwf35nLydnUiBqRC2cb03jMZAze+YmFCgMh5xjtzTPw+WE QDIR9Eu6ebSM80ldXGISHYn1wHxQVFtJ2cN9gmV+Lnaekys6huDQkARCoWfLhdYc CiqZSvlMPis+VAhkglwnHlxc/mGpfFTZXh47oXzNbXw2J2bWhhbyzl9X8UiSqw+A UOfq/JRis2j6NsmKkGvTU/hGtEPOzXw2EStAuXm+OVg5TDKwr74= =mARf -----END PGP SIGNATURE----- Merge tag 'timers_urgent_for_v6.13_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull timer fix from Borislav Petkov: - Handle the case where clocksources with small counter width can, in conjunction with overly long idle sleeps, falsely trigger the negative motion detection of clocksources * tag 'timers_urgent_for_v6.13_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: clocksource: Make negative motion detection more robust
This commit is contained in:
commit
c25ca0c2e4
@ -49,6 +49,7 @@ struct module;
|
||||
* @archdata: Optional arch-specific data
|
||||
* @max_cycles: Maximum safe cycle value which won't overflow on
|
||||
* multiplication
|
||||
* @max_raw_delta: Maximum safe delta value for negative motion detection
|
||||
* @name: Pointer to clocksource name
|
||||
* @list: List head for registration (internal)
|
||||
* @freq_khz: Clocksource frequency in khz.
|
||||
@ -109,6 +110,7 @@ struct clocksource {
|
||||
struct arch_clocksource_data archdata;
|
||||
#endif
|
||||
u64 max_cycles;
|
||||
u64 max_raw_delta;
|
||||
const char *name;
|
||||
struct list_head list;
|
||||
u32 freq_khz;
|
||||
|
@ -24,7 +24,7 @@ static void clocksource_enqueue(struct clocksource *cs);
|
||||
|
||||
static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
|
||||
{
|
||||
u64 delta = clocksource_delta(end, start, cs->mask);
|
||||
u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta);
|
||||
|
||||
if (likely(delta < cs->max_cycles))
|
||||
return clocksource_cyc2ns(delta, cs->mult, cs->shift);
|
||||
@ -993,6 +993,15 @@ static inline void clocksource_update_max_deferment(struct clocksource *cs)
|
||||
cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
|
||||
cs->maxadj, cs->mask,
|
||||
&cs->max_cycles);
|
||||
|
||||
/*
|
||||
* Threshold for detecting negative motion in clocksource_delta().
|
||||
*
|
||||
* Allow for 0.875 of the counter width so that overly long idle
|
||||
* sleeps, which go slightly over mask/2, do not trigger the
|
||||
* negative motion detection.
|
||||
*/
|
||||
cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3);
|
||||
}
|
||||
|
||||
static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
|
||||
|
@ -755,7 +755,8 @@ static void timekeeping_forward_now(struct timekeeper *tk)
|
||||
u64 cycle_now, delta;
|
||||
|
||||
cycle_now = tk_clock_read(&tk->tkr_mono);
|
||||
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
||||
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
|
||||
tk->tkr_mono.clock->max_raw_delta);
|
||||
tk->tkr_mono.cycle_last = cycle_now;
|
||||
tk->tkr_raw.cycle_last = cycle_now;
|
||||
|
||||
@ -2230,7 +2231,8 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
|
||||
return false;
|
||||
|
||||
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
|
||||
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
||||
tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
|
||||
tk->tkr_mono.clock->max_raw_delta);
|
||||
|
||||
/* Check if there's really nothing to do */
|
||||
if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
|
||||
|
@ -30,15 +30,15 @@ static inline void timekeeping_inc_mg_floor_swaps(void)
|
||||
|
||||
#endif
|
||||
|
||||
static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
|
||||
static inline u64 clocksource_delta(u64 now, u64 last, u64 mask, u64 max_delta)
|
||||
{
|
||||
u64 ret = (now - last) & mask;
|
||||
|
||||
/*
|
||||
* Prevent time going backwards by checking the MSB of mask in
|
||||
* the result. If set, return 0.
|
||||
* Prevent time going backwards by checking the result against
|
||||
* @max_delta. If greater, return 0.
|
||||
*/
|
||||
return ret & ~(mask >> 1) ? 0 : ret;
|
||||
return ret > max_delta ? 0 : ret;
|
||||
}
|
||||
|
||||
/* Semi public for serialization of non timekeeper VDSO updates. */
|
||||
|
Loading…
Reference in New Issue
Block a user