mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
timekeeping, clocksource: Fix various typos in comments
Fix ~56 single-word typos in timekeeping & clocksource code comments. Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Stultz <john.stultz@linaro.org> Cc: Stephen Boyd <sboyd@kernel.org> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: linux-kernel@vger.kernel.org
This commit is contained in:
parent
44511ab344
commit
4bf07f6562
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
#define RATE_32K 32768
|
#define RATE_32K 32768
|
||||||
|
|
||||||
#define TIMER_MODE_CONTINOUS 0x1
|
#define TIMER_MODE_CONTINUOUS 0x1
|
||||||
#define TIMER_DOWNCOUNT_VAL 0xffffffff
|
#define TIMER_DOWNCOUNT_VAL 0xffffffff
|
||||||
|
|
||||||
#define PRCMU_TIMER_REF 0
|
#define PRCMU_TIMER_REF 0
|
||||||
@ -55,13 +55,13 @@ static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The A9 sub system expects the timer to be configured as
|
* The A9 sub system expects the timer to be configured as
|
||||||
* a continous looping timer.
|
* a continuous looping timer.
|
||||||
* The PRCMU should configure it but if it for some reason
|
* The PRCMU should configure it but if it for some reason
|
||||||
* don't we do it here.
|
* don't we do it here.
|
||||||
*/
|
*/
|
||||||
if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) !=
|
if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) !=
|
||||||
TIMER_MODE_CONTINOUS) {
|
TIMER_MODE_CONTINUOUS) {
|
||||||
writel(TIMER_MODE_CONTINOUS,
|
writel(TIMER_MODE_CONTINUOUS,
|
||||||
clksrc_dbx500_timer_base + PRCMU_TIMER_MODE);
|
clksrc_dbx500_timer_base + PRCMU_TIMER_MODE);
|
||||||
writel(TIMER_DOWNCOUNT_VAL,
|
writel(TIMER_DOWNCOUNT_VAL,
|
||||||
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
|
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
|
||||||
|
@ -38,7 +38,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not all implementations use a periphal clock, so don't panic
|
* Not all implementations use a peripheral clock, so don't panic
|
||||||
* if it's not present
|
* if it's not present
|
||||||
*/
|
*/
|
||||||
pclk = of_clk_get_by_name(np, "pclk");
|
pclk = of_clk_get_by_name(np, "pclk");
|
||||||
|
@ -457,7 +457,7 @@ void __init hv_init_clocksource(void)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Try to set up the TSC page clocksource. If it succeeds, we're
|
* Try to set up the TSC page clocksource. If it succeeds, we're
|
||||||
* done. Otherwise, set up the MSR clocksoruce. At least one of
|
* done. Otherwise, set up the MSR clocksource. At least one of
|
||||||
* these will always be available except on very old versions of
|
* these will always be available except on very old versions of
|
||||||
* Hyper-V on x86. In that case we won't have a Hyper-V
|
* Hyper-V on x86. In that case we won't have a Hyper-V
|
||||||
* clocksource, but Linux will still run with a clocksource based
|
* clocksource, but Linux will still run with a clocksource based
|
||||||
|
@ -455,9 +455,9 @@ static int __init tcb_clksrc_init(struct device_node *node)
|
|||||||
tcaddr = tc.regs;
|
tcaddr = tc.regs;
|
||||||
|
|
||||||
if (bits == 32) {
|
if (bits == 32) {
|
||||||
/* use apropriate function to read 32 bit counter */
|
/* use appropriate function to read 32 bit counter */
|
||||||
clksrc.read = tc_get_cycles32;
|
clksrc.read = tc_get_cycles32;
|
||||||
/* setup ony channel 0 */
|
/* setup only channel 0 */
|
||||||
tcb_setup_single_chan(&tc, best_divisor_idx);
|
tcb_setup_single_chan(&tc, best_divisor_idx);
|
||||||
tc_sched_clock = tc_sched_clock_read32;
|
tc_sched_clock = tc_sched_clock_read32;
|
||||||
tc_delay_timer.read_current_timer = tc_delay_timer_read32;
|
tc_delay_timer.read_current_timer = tc_delay_timer_read32;
|
||||||
|
@ -116,7 +116,7 @@ static int ftm_set_next_event(unsigned long delta,
|
|||||||
* to the MOD register latches the value into a buffer. The MOD
|
* to the MOD register latches the value into a buffer. The MOD
|
||||||
* register is updated with the value of its write buffer with
|
* register is updated with the value of its write buffer with
|
||||||
* the following scenario:
|
* the following scenario:
|
||||||
* a, the counter source clock is diabled.
|
* a, the counter source clock is disabled.
|
||||||
*/
|
*/
|
||||||
ftm_counter_disable(priv->clkevt_base);
|
ftm_counter_disable(priv->clkevt_base);
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Use the bigest prescaler if we didn't match one. */
|
/* Use the biggest prescaler if we didn't match one. */
|
||||||
if (*pres == MCHP_PIT64B_PRES_MAX)
|
if (*pres == MCHP_PIT64B_PRES_MAX)
|
||||||
*pres = MCHP_PIT64B_PRES_MAX - 1;
|
*pres = MCHP_PIT64B_PRES_MAX - 1;
|
||||||
}
|
}
|
||||||
|
@ -211,10 +211,10 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* timer_of_cleanup - release timer_of ressources
|
* timer_of_cleanup - release timer_of resources
|
||||||
* @to: timer_of structure
|
* @to: timer_of structure
|
||||||
*
|
*
|
||||||
* Release the ressources that has been used in timer_of_init().
|
* Release the resources that has been used in timer_of_init().
|
||||||
* This function should be called in init error cases
|
* This function should be called in init error cases
|
||||||
*/
|
*/
|
||||||
void __init timer_of_cleanup(struct timer_of *to)
|
void __init timer_of_cleanup(struct timer_of *to)
|
||||||
|
@ -589,7 +589,7 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
|
|||||||
"always-on " : "", t->rate, np->parent);
|
"always-on " : "", t->rate, np->parent);
|
||||||
|
|
||||||
clockevents_config_and_register(dev, t->rate,
|
clockevents_config_and_register(dev, t->rate,
|
||||||
3, /* Timer internal resynch latency */
|
3, /* Timer internal resync latency */
|
||||||
0xffffffff);
|
0xffffffff);
|
||||||
|
|
||||||
if (of_machine_is_compatible("ti,am33xx") ||
|
if (of_machine_is_compatible("ti,am33xx") ||
|
||||||
|
@ -136,7 +136,7 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
|
|||||||
/*
|
/*
|
||||||
* The value for the LDVAL register trigger is calculated as:
|
* The value for the LDVAL register trigger is calculated as:
|
||||||
* LDVAL trigger = (period / clock period) - 1
|
* LDVAL trigger = (period / clock period) - 1
|
||||||
* The pit is a 32-bit down count timer, when the conter value
|
* The pit is a 32-bit down count timer, when the counter value
|
||||||
* reaches 0, it will generate an interrupt, thus the minimal
|
* reaches 0, it will generate an interrupt, thus the minimal
|
||||||
* LDVAL trigger value is 1. And then the min_delta is
|
* LDVAL trigger value is 1. And then the min_delta is
|
||||||
* minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
|
* minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
|
||||||
|
@ -70,7 +70,7 @@ struct module;
|
|||||||
* @mark_unstable: Optional function to inform the clocksource driver that
|
* @mark_unstable: Optional function to inform the clocksource driver that
|
||||||
* the watchdog marked the clocksource unstable
|
* the watchdog marked the clocksource unstable
|
||||||
* @tick_stable: Optional function called periodically from the watchdog
|
* @tick_stable: Optional function called periodically from the watchdog
|
||||||
* code to provide stable syncrhonization points
|
* code to provide stable synchronization points
|
||||||
* @wd_list: List head to enqueue into the watchdog list (internal)
|
* @wd_list: List head to enqueue into the watchdog list (internal)
|
||||||
* @cs_last: Last clocksource value for clocksource watchdog
|
* @cs_last: Last clocksource value for clocksource watchdog
|
||||||
* @wd_last: Last watchdog value corresponding to @cs_last
|
* @wd_last: Last watchdog value corresponding to @cs_last
|
||||||
|
@ -133,7 +133,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* kernel variables
|
* kernel variables
|
||||||
* Note: maximum error = NTP synch distance = dispersion + delay / 2;
|
* Note: maximum error = NTP sync distance = dispersion + delay / 2;
|
||||||
* estimated error = NTP dispersion.
|
* estimated error = NTP dispersion.
|
||||||
*/
|
*/
|
||||||
extern unsigned long tick_usec; /* USER_HZ period (usec) */
|
extern unsigned long tick_usec; /* USER_HZ period (usec) */
|
||||||
|
@ -2,13 +2,13 @@
|
|||||||
/*
|
/*
|
||||||
* Alarmtimer interface
|
* Alarmtimer interface
|
||||||
*
|
*
|
||||||
* This interface provides a timer which is similarto hrtimers,
|
* This interface provides a timer which is similar to hrtimers,
|
||||||
* but triggers a RTC alarm if the box is suspend.
|
* but triggers a RTC alarm if the box is suspend.
|
||||||
*
|
*
|
||||||
* This interface is influenced by the Android RTC Alarm timer
|
* This interface is influenced by the Android RTC Alarm timer
|
||||||
* interface.
|
* interface.
|
||||||
*
|
*
|
||||||
* Copyright (C) 2010 IBM Corperation
|
* Copyright (C) 2010 IBM Corporation
|
||||||
*
|
*
|
||||||
* Author: John Stultz <john.stultz@linaro.org>
|
* Author: John Stultz <john.stultz@linaro.org>
|
||||||
*/
|
*/
|
||||||
@ -811,7 +811,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
|
|||||||
/**
|
/**
|
||||||
* alarm_timer_nsleep - alarmtimer nanosleep
|
* alarm_timer_nsleep - alarmtimer nanosleep
|
||||||
* @which_clock: clockid
|
* @which_clock: clockid
|
||||||
* @flags: determins abstime or relative
|
* @flags: determines abstime or relative
|
||||||
* @tsreq: requested sleep time (abs or rel)
|
* @tsreq: requested sleep time (abs or rel)
|
||||||
*
|
*
|
||||||
* Handles clock_nanosleep calls against _ALARM clockids
|
* Handles clock_nanosleep calls against _ALARM clockids
|
||||||
|
@ -38,7 +38,7 @@
|
|||||||
* calculated mult and shift factors. This guarantees that no 64bit
|
* calculated mult and shift factors. This guarantees that no 64bit
|
||||||
* overflow happens when the input value of the conversion is
|
* overflow happens when the input value of the conversion is
|
||||||
* multiplied with the calculated mult factor. Larger ranges may
|
* multiplied with the calculated mult factor. Larger ranges may
|
||||||
* reduce the conversion accuracy by chosing smaller mult and shift
|
* reduce the conversion accuracy by choosing smaller mult and shift
|
||||||
* factors.
|
* factors.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
@ -518,7 +518,7 @@ static void clocksource_suspend_select(bool fallback)
|
|||||||
* the suspend time when resuming system.
|
* the suspend time when resuming system.
|
||||||
*
|
*
|
||||||
* This function is called late in the suspend process from timekeeping_suspend(),
|
* This function is called late in the suspend process from timekeeping_suspend(),
|
||||||
* that means processes are freezed, non-boot cpus and interrupts are disabled
|
* that means processes are frozen, non-boot cpus and interrupts are disabled
|
||||||
* now. It is therefore possible to start the suspend timer without taking the
|
* now. It is therefore possible to start the suspend timer without taking the
|
||||||
* clocksource mutex.
|
* clocksource mutex.
|
||||||
*/
|
*/
|
||||||
|
@ -683,7 +683,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
|
|||||||
* T1 is removed, so this code is called and would reprogram
|
* T1 is removed, so this code is called and would reprogram
|
||||||
* the hardware to 5s from now. Any hrtimer_start after that
|
* the hardware to 5s from now. Any hrtimer_start after that
|
||||||
* will not reprogram the hardware due to hang_detected being
|
* will not reprogram the hardware due to hang_detected being
|
||||||
* set. So we'd effectivly block all timers until the T2 event
|
* set. So we'd effectively block all timers until the T2 event
|
||||||
* fires.
|
* fires.
|
||||||
*/
|
*/
|
||||||
if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
|
if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
|
||||||
@ -1019,7 +1019,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
|||||||
* cpu_base->next_timer. This happens when we remove the first
|
* cpu_base->next_timer. This happens when we remove the first
|
||||||
* timer on a remote cpu. No harm as we never dereference
|
* timer on a remote cpu. No harm as we never dereference
|
||||||
* cpu_base->next_timer. So the worst thing what can happen is
|
* cpu_base->next_timer. So the worst thing what can happen is
|
||||||
* an superflous call to hrtimer_force_reprogram() on the
|
* an superfluous call to hrtimer_force_reprogram() on the
|
||||||
* remote cpu later on if the same timer gets enqueued again.
|
* remote cpu later on if the same timer gets enqueued again.
|
||||||
*/
|
*/
|
||||||
if (reprogram && timer == cpu_base->next_timer)
|
if (reprogram && timer == cpu_base->next_timer)
|
||||||
@ -1212,7 +1212,7 @@ static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
|
|||||||
* The counterpart to hrtimer_cancel_wait_running().
|
* The counterpart to hrtimer_cancel_wait_running().
|
||||||
*
|
*
|
||||||
* If there is a waiter for cpu_base->expiry_lock, then it was waiting for
|
* If there is a waiter for cpu_base->expiry_lock, then it was waiting for
|
||||||
* the timer callback to finish. Drop expiry_lock and reaquire it. That
|
* the timer callback to finish. Drop expiry_lock and reacquire it. That
|
||||||
* allows the waiter to acquire the lock and make progress.
|
* allows the waiter to acquire the lock and make progress.
|
||||||
*/
|
*/
|
||||||
static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
|
static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
|
||||||
@ -1398,7 +1398,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
|
|||||||
int base;
|
int base;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On PREEMPT_RT enabled kernels hrtimers which are not explicitely
|
* On PREEMPT_RT enabled kernels hrtimers which are not explicitly
|
||||||
* marked for hard interrupt expiry mode are moved into soft
|
* marked for hard interrupt expiry mode are moved into soft
|
||||||
* interrupt context for latency reasons and because the callbacks
|
* interrupt context for latency reasons and because the callbacks
|
||||||
* can invoke functions which might sleep on RT, e.g. spin_lock().
|
* can invoke functions which might sleep on RT, e.g. spin_lock().
|
||||||
@ -1430,7 +1430,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
|
|||||||
* hrtimer_init - initialize a timer to the given clock
|
* hrtimer_init - initialize a timer to the given clock
|
||||||
* @timer: the timer to be initialized
|
* @timer: the timer to be initialized
|
||||||
* @clock_id: the clock to be used
|
* @clock_id: the clock to be used
|
||||||
* @mode: The modes which are relevant for intitialization:
|
* @mode: The modes which are relevant for initialization:
|
||||||
* HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
|
* HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
|
||||||
* HRTIMER_MODE_REL_SOFT
|
* HRTIMER_MODE_REL_SOFT
|
||||||
*
|
*
|
||||||
@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
|
|||||||
* insufficient for that.
|
* insufficient for that.
|
||||||
*
|
*
|
||||||
* The sequence numbers are required because otherwise we could still observe
|
* The sequence numbers are required because otherwise we could still observe
|
||||||
* a false negative if the read side got smeared over multiple consequtive
|
* a false negative if the read side got smeared over multiple consecutive
|
||||||
* __run_hrtimer() invocations.
|
* __run_hrtimer() invocations.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -1588,7 +1588,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
|
|||||||
* minimizing wakeups, not running timers at the
|
* minimizing wakeups, not running timers at the
|
||||||
* earliest interrupt after their soft expiration.
|
* earliest interrupt after their soft expiration.
|
||||||
* This allows us to avoid using a Priority Search
|
* This allows us to avoid using a Priority Search
|
||||||
* Tree, which can answer a stabbing querry for
|
* Tree, which can answer a stabbing query for
|
||||||
* overlapping intervals and instead use the simple
|
* overlapping intervals and instead use the simple
|
||||||
* BST we already have.
|
* BST we already have.
|
||||||
* We don't add extra wakeups by delaying timers that
|
* We don't add extra wakeups by delaying timers that
|
||||||
@ -1822,7 +1822,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
|
|||||||
clockid_t clock_id, enum hrtimer_mode mode)
|
clockid_t clock_id, enum hrtimer_mode mode)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* On PREEMPT_RT enabled kernels hrtimers which are not explicitely
|
* On PREEMPT_RT enabled kernels hrtimers which are not explicitly
|
||||||
* marked for hard interrupt expiry mode are moved into soft
|
* marked for hard interrupt expiry mode are moved into soft
|
||||||
* interrupt context either for latency reasons or because the
|
* interrupt context either for latency reasons or because the
|
||||||
* hrtimer callback takes regular spinlocks or invokes other
|
* hrtimer callback takes regular spinlocks or invokes other
|
||||||
@ -1835,7 +1835,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
|
|||||||
* the same CPU. That causes a latency spike due to the wakeup of
|
* the same CPU. That causes a latency spike due to the wakeup of
|
||||||
* a gazillion threads.
|
* a gazillion threads.
|
||||||
*
|
*
|
||||||
* OTOH, priviledged real-time user space applications rely on the
|
* OTOH, privileged real-time user space applications rely on the
|
||||||
* low latency of hard interrupt wakeups. If the current task is in
|
* low latency of hard interrupt wakeups. If the current task is in
|
||||||
* a real-time scheduling class, mark the mode for hard interrupt
|
* a real-time scheduling class, mark the mode for hard interrupt
|
||||||
* expiry.
|
* expiry.
|
||||||
|
@ -44,7 +44,7 @@ static u64 jiffies_read(struct clocksource *cs)
|
|||||||
* the timer interrupt frequency HZ and it suffers
|
* the timer interrupt frequency HZ and it suffers
|
||||||
* inaccuracies caused by missed or lost timer
|
* inaccuracies caused by missed or lost timer
|
||||||
* interrupts and the inability for the timer
|
* interrupts and the inability for the timer
|
||||||
* interrupt hardware to accuratly tick at the
|
* interrupt hardware to accurately tick at the
|
||||||
* requested HZ value. It is also not recommended
|
* requested HZ value. It is also not recommended
|
||||||
* for "tick-less" systems.
|
* for "tick-less" systems.
|
||||||
*/
|
*/
|
||||||
|
@ -544,7 +544,7 @@ static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec,
|
|||||||
struct timespec64 *to_set,
|
struct timespec64 *to_set,
|
||||||
const struct timespec64 *now)
|
const struct timespec64 *now)
|
||||||
{
|
{
|
||||||
/* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
|
/* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */
|
||||||
const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
|
const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
|
||||||
struct timespec64 delay = {.tv_sec = -1,
|
struct timespec64 delay = {.tv_sec = -1,
|
||||||
.tv_nsec = set_offset_nsec};
|
.tv_nsec = set_offset_nsec};
|
||||||
|
@ -279,7 +279,7 @@ void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
|
|||||||
* @tsk: Task for which cputime needs to be started
|
* @tsk: Task for which cputime needs to be started
|
||||||
* @samples: Storage for time samples
|
* @samples: Storage for time samples
|
||||||
*
|
*
|
||||||
* The thread group cputime accouting is avoided when there are no posix
|
* The thread group cputime accounting is avoided when there are no posix
|
||||||
* CPU timers armed. Before starting a timer it's required to check whether
|
* CPU timers armed. Before starting a timer it's required to check whether
|
||||||
* the time accounting is active. If not, a full update of the atomic
|
* the time accounting is active. If not, a full update of the atomic
|
||||||
* accounting store needs to be done and the accounting enabled.
|
* accounting store needs to be done and the accounting enabled.
|
||||||
@ -390,7 +390,7 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
|
|||||||
/*
|
/*
|
||||||
* If posix timer expiry is handled in task work context then
|
* If posix timer expiry is handled in task work context then
|
||||||
* timer::it_lock can be taken without disabling interrupts as all
|
* timer::it_lock can be taken without disabling interrupts as all
|
||||||
* other locking happens in task context. This requires a seperate
|
* other locking happens in task context. This requires a separate
|
||||||
* lock class key otherwise regular posix timer expiry would record
|
* lock class key otherwise regular posix timer expiry would record
|
||||||
* the lock class being taken in interrupt context and generate a
|
* the lock class being taken in interrupt context and generate a
|
||||||
* false positive warning.
|
* false positive warning.
|
||||||
@ -1216,7 +1216,7 @@ static void handle_posix_cpu_timers(struct task_struct *tsk)
|
|||||||
check_process_timers(tsk, &firing);
|
check_process_timers(tsk, &firing);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The above timer checks have updated the exipry cache and
|
* The above timer checks have updated the expiry cache and
|
||||||
* because nothing can have queued or modified timers after
|
* because nothing can have queued or modified timers after
|
||||||
* sighand lock was taken above it is guaranteed to be
|
* sighand lock was taken above it is guaranteed to be
|
||||||
* consistent. So the next timer interrupt fastpath check
|
* consistent. So the next timer interrupt fastpath check
|
||||||
|
@ -53,7 +53,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
|
|||||||
* reasons.
|
* reasons.
|
||||||
*
|
*
|
||||||
* Each caller tries to arm the hrtimer on its own CPU, but if the
|
* Each caller tries to arm the hrtimer on its own CPU, but if the
|
||||||
* hrtimer callbback function is currently running, then
|
* hrtimer callback function is currently running, then
|
||||||
* hrtimer_start() cannot move it and the timer stays on the CPU on
|
* hrtimer_start() cannot move it and the timer stays on the CPU on
|
||||||
* which it is assigned at the moment.
|
* which it is assigned at the moment.
|
||||||
*
|
*
|
||||||
|
@ -157,7 +157,7 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check, if the device is disfunctional and a place holder, which
|
* Check, if the device is dysfunctional and a placeholder, which
|
||||||
* needs to be handled by the broadcast device.
|
* needs to be handled by the broadcast device.
|
||||||
*/
|
*/
|
||||||
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
||||||
@ -391,7 +391,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
|
|||||||
* - the broadcast device exists
|
* - the broadcast device exists
|
||||||
* - the broadcast device is not a hrtimer based one
|
* - the broadcast device is not a hrtimer based one
|
||||||
* - the broadcast device is in periodic mode to
|
* - the broadcast device is in periodic mode to
|
||||||
* avoid a hickup during switch to oneshot mode
|
* avoid a hiccup during switch to oneshot mode
|
||||||
*/
|
*/
|
||||||
if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
|
if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
|
||||||
tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
||||||
|
@ -45,7 +45,7 @@ int tick_program_event(ktime_t expires, int force)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tick_resume_onshot - resume oneshot mode
|
* tick_resume_oneshot - resume oneshot mode
|
||||||
*/
|
*/
|
||||||
void tick_resume_oneshot(void)
|
void tick_resume_oneshot(void)
|
||||||
{
|
{
|
||||||
|
@ -751,7 +751,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
|||||||
* Aside of that check whether the local timer softirq is
|
* Aside of that check whether the local timer softirq is
|
||||||
* pending. If so its a bad idea to call get_next_timer_interrupt()
|
* pending. If so its a bad idea to call get_next_timer_interrupt()
|
||||||
* because there is an already expired timer, so it will request
|
* because there is an already expired timer, so it will request
|
||||||
* immeditate expiry, which rearms the hardware timer with a
|
* immediate expiry, which rearms the hardware timer with a
|
||||||
* minimal delta which brings us back to this place
|
* minimal delta which brings us back to this place
|
||||||
* immediately. Lather, rinse and repeat...
|
* immediately. Lather, rinse and repeat...
|
||||||
*/
|
*/
|
||||||
|
@ -29,7 +29,7 @@ enum tick_nohz_mode {
|
|||||||
* @inidle: Indicator that the CPU is in the tick idle mode
|
* @inidle: Indicator that the CPU is in the tick idle mode
|
||||||
* @tick_stopped: Indicator that the idle tick has been stopped
|
* @tick_stopped: Indicator that the idle tick has been stopped
|
||||||
* @idle_active: Indicator that the CPU is actively in the tick idle mode;
|
* @idle_active: Indicator that the CPU is actively in the tick idle mode;
|
||||||
* it is resetted during irq handling phases.
|
* it is reset during irq handling phases.
|
||||||
* @do_timer_lst: CPU was the last one doing do_timer before going idle
|
* @do_timer_lst: CPU was the last one doing do_timer before going idle
|
||||||
* @got_idle_tick: Tick timer function has run with @inidle set
|
* @got_idle_tick: Tick timer function has run with @inidle set
|
||||||
* @last_tick: Store the last tick expiry time when the tick
|
* @last_tick: Store the last tick expiry time when the tick
|
||||||
|
@ -571,7 +571,7 @@ EXPORT_SYMBOL(__usecs_to_jiffies);
|
|||||||
/*
|
/*
|
||||||
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note
|
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note
|
||||||
* that a remainder subtract here would not do the right thing as the
|
* that a remainder subtract here would not do the right thing as the
|
||||||
* resolution values don't fall on second boundries. I.e. the line:
|
* resolution values don't fall on second boundaries. I.e. the line:
|
||||||
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
|
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
|
||||||
* Note that due to the small error in the multiplier here, this
|
* Note that due to the small error in the multiplier here, this
|
||||||
* rounding is incorrect for sufficiently large values of tv_nsec, but
|
* rounding is incorrect for sufficiently large values of tv_nsec, but
|
||||||
|
@ -596,14 +596,14 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
|
|||||||
* careful cache layout of the timekeeper because the sequence count and
|
* careful cache layout of the timekeeper because the sequence count and
|
||||||
* struct tk_read_base would then need two cache lines instead of one.
|
* struct tk_read_base would then need two cache lines instead of one.
|
||||||
*
|
*
|
||||||
* Access to the time keeper clock source is disabled accross the innermost
|
* Access to the time keeper clock source is disabled across the innermost
|
||||||
* steps of suspend/resume. The accessors still work, but the timestamps
|
* steps of suspend/resume. The accessors still work, but the timestamps
|
||||||
* are frozen until time keeping is resumed which happens very early.
|
* are frozen until time keeping is resumed which happens very early.
|
||||||
*
|
*
|
||||||
* For regular suspend/resume there is no observable difference vs. sched
|
* For regular suspend/resume there is no observable difference vs. sched
|
||||||
* clock, but it might affect some of the nasty low level debug printks.
|
* clock, but it might affect some of the nasty low level debug printks.
|
||||||
*
|
*
|
||||||
* OTOH, access to sched clock is not guaranteed accross suspend/resume on
|
* OTOH, access to sched clock is not guaranteed across suspend/resume on
|
||||||
* all systems either so it depends on the hardware in use.
|
* all systems either so it depends on the hardware in use.
|
||||||
*
|
*
|
||||||
* If that turns out to be a real problem then this could be mitigated by
|
* If that turns out to be a real problem then this could be mitigated by
|
||||||
@ -899,7 +899,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
|
|||||||
EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
|
EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ktime_mono_to_any() - convert mononotic time to any other time
|
* ktime_mono_to_any() - convert monotonic time to any other time
|
||||||
* @tmono: time to convert.
|
* @tmono: time to convert.
|
||||||
* @offs: which offset to use
|
* @offs: which offset to use
|
||||||
*/
|
*/
|
||||||
@ -1948,7 +1948,7 @@ static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
|
|||||||
* xtime_nsec_1 = offset + xtime_nsec_2
|
* xtime_nsec_1 = offset + xtime_nsec_2
|
||||||
* Which gives us:
|
* Which gives us:
|
||||||
* xtime_nsec_2 = xtime_nsec_1 - offset
|
* xtime_nsec_2 = xtime_nsec_1 - offset
|
||||||
* Which simplfies to:
|
* Which simplifies to:
|
||||||
* xtime_nsec -= offset
|
* xtime_nsec -= offset
|
||||||
*/
|
*/
|
||||||
if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
|
if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
|
||||||
@ -2336,7 +2336,7 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Validate if a timespec/timeval used to inject a time
|
* Validate if a timespec/timeval used to inject a time
|
||||||
* offset is valid. Offsets can be postive or negative, so
|
* offset is valid. Offsets can be positive or negative, so
|
||||||
* we don't check tv_sec. The value of the timeval/timespec
|
* we don't check tv_sec. The value of the timeval/timespec
|
||||||
* is the sum of its fields,but *NOTE*:
|
* is the sum of its fields,but *NOTE*:
|
||||||
* The field tv_usec/tv_nsec must always be non-negative and
|
* The field tv_usec/tv_nsec must always be non-negative and
|
||||||
|
@ -894,7 +894,7 @@ static inline void forward_timer_base(struct timer_base *base)
|
|||||||
/*
|
/*
|
||||||
* No need to forward if we are close enough below jiffies.
|
* No need to forward if we are close enough below jiffies.
|
||||||
* Also while executing timers, base->clk is 1 offset ahead
|
* Also while executing timers, base->clk is 1 offset ahead
|
||||||
* of jiffies to avoid endless requeuing to current jffies.
|
* of jiffies to avoid endless requeuing to current jiffies.
|
||||||
*/
|
*/
|
||||||
if ((long)(jnow - base->clk) < 1)
|
if ((long)(jnow - base->clk) < 1)
|
||||||
return;
|
return;
|
||||||
@ -1271,7 +1271,7 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
|
|||||||
* The counterpart to del_timer_wait_running().
|
* The counterpart to del_timer_wait_running().
|
||||||
*
|
*
|
||||||
* If there is a waiter for base->expiry_lock, then it was waiting for the
|
* If there is a waiter for base->expiry_lock, then it was waiting for the
|
||||||
* timer callback to finish. Drop expiry_lock and reaquire it. That allows
|
* timer callback to finish. Drop expiry_lock and reacquire it. That allows
|
||||||
* the waiter to acquire the lock and make progress.
|
* the waiter to acquire the lock and make progress.
|
||||||
*/
|
*/
|
||||||
static void timer_sync_wait_running(struct timer_base *base)
|
static void timer_sync_wait_running(struct timer_base *base)
|
||||||
|
@ -108,7 +108,7 @@ void update_vsyscall(struct timekeeper *tk)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If the current clocksource is not VDSO capable, then spare the
|
* If the current clocksource is not VDSO capable, then spare the
|
||||||
* update of the high reolution parts.
|
* update of the high resolution parts.
|
||||||
*/
|
*/
|
||||||
if (clock_mode != VDSO_CLOCKMODE_NONE)
|
if (clock_mode != VDSO_CLOCKMODE_NONE)
|
||||||
update_vdso_data(vdata, tk);
|
update_vdso_data(vdata, tk);
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
* (C) Copyright IBM 2012
|
* (C) Copyright IBM 2012
|
||||||
* Licensed under the GPLv2
|
* Licensed under the GPLv2
|
||||||
*
|
*
|
||||||
* NOTE: This is a meta-test which quickly changes the clocksourc and
|
* NOTE: This is a meta-test which quickly changes the clocksource and
|
||||||
* then uses other tests to detect problems. Thus this test requires
|
* then uses other tests to detect problems. Thus this test requires
|
||||||
* that the inconsistency-check and nanosleep tests be present in the
|
* that the inconsistency-check and nanosleep tests be present in the
|
||||||
* same directory it is run from.
|
* same directory it is run from.
|
||||||
@ -134,7 +134,7 @@ int main(int argv, char **argc)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check everything is sane before we start switching asyncrhonously */
|
/* Check everything is sane before we start switching asynchronously */
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
printf("Validating clocksource %s\n", clocksource_list[i]);
|
printf("Validating clocksource %s\n", clocksource_list[i]);
|
||||||
if (change_clocksource(clocksource_list[i])) {
|
if (change_clocksource(clocksource_list[i])) {
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
* Licensed under the GPLv2
|
* Licensed under the GPLv2
|
||||||
*
|
*
|
||||||
* This test signals the kernel to insert a leap second
|
* This test signals the kernel to insert a leap second
|
||||||
* every day at midnight GMT. This allows for stessing the
|
* every day at midnight GMT. This allows for stressing the
|
||||||
* kernel's leap-second behavior, as well as how well applications
|
* kernel's leap-second behavior, as well as how well applications
|
||||||
* handle the leap-second discontinuity.
|
* handle the leap-second discontinuity.
|
||||||
*
|
*
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
* (C) Copyright 2013, 2015 Linaro Limited
|
* (C) Copyright 2013, 2015 Linaro Limited
|
||||||
* Licensed under the GPL
|
* Licensed under the GPL
|
||||||
*
|
*
|
||||||
* This test demonstrates leapsecond deadlock that is possibe
|
* This test demonstrates leapsecond deadlock that is possible
|
||||||
* on kernels from 2.6.26 to 3.3.
|
* on kernels from 2.6.26 to 3.3.
|
||||||
*
|
*
|
||||||
* WARNING: THIS WILL LIKELY HARDHANG SYSTEMS AND MAY LOSE DATA
|
* WARNING: THIS WILL LIKELY HARD HANG SYSTEMS AND MAY LOSE DATA
|
||||||
* RUN AT YOUR OWN RISK!
|
* RUN AT YOUR OWN RISK!
|
||||||
* To build:
|
* To build:
|
||||||
* $ gcc leapcrash.c -o leapcrash -lrt
|
* $ gcc leapcrash.c -o leapcrash -lrt
|
||||||
|
@ -76,7 +76,7 @@ void checklist(struct timespec *list, int size)
|
|||||||
|
|
||||||
/* The shared thread shares a global list
|
/* The shared thread shares a global list
|
||||||
* that each thread fills while holding the lock.
|
* that each thread fills while holding the lock.
|
||||||
* This stresses clock syncronization across cpus.
|
* This stresses clock synchronization across cpus.
|
||||||
*/
|
*/
|
||||||
void *shared_thread(void *arg)
|
void *shared_thread(void *arg)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user