mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 02:14:58 +00:00
seqlock, treewide: Switch to non-raw seqcount_latch interface
Switch all instrumentable users of the seqcount_latch interface over to the non-raw interface. Co-developed-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20241104161910.780003-5-elver@google.com
This commit is contained in:
parent
5c1806c41c
commit
93190bc35d
@ -174,10 +174,11 @@ static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long ts
|
||||
|
||||
c2n = per_cpu_ptr(&cyc2ns, cpu);
|
||||
|
||||
raw_write_seqcount_latch(&c2n->seq);
|
||||
write_seqcount_latch_begin(&c2n->seq);
|
||||
c2n->data[0] = data;
|
||||
raw_write_seqcount_latch(&c2n->seq);
|
||||
write_seqcount_latch(&c2n->seq);
|
||||
c2n->data[1] = data;
|
||||
write_seqcount_latch_end(&c2n->seq);
|
||||
}
|
||||
|
||||
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
|
||||
|
@ -14,7 +14,7 @@
|
||||
*
|
||||
* If we need to allow unconditional lookups (say as required for NMI context
|
||||
* usage) we need a more complex setup; this data structure provides this by
|
||||
* employing the latch technique -- see @raw_write_seqcount_latch -- to
|
||||
* employing the latch technique -- see @write_seqcount_latch_begin -- to
|
||||
* implement a latched RB-tree which does allow for unconditional lookups by
|
||||
* virtue of always having (at least) one stable copy of the tree.
|
||||
*
|
||||
@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx,
|
||||
* @ops: operators defining the node order
|
||||
*
|
||||
* It inserts @node into @root in an ordered fashion such that we can always
|
||||
* observe one complete tree. See the comment for raw_write_seqcount_latch().
|
||||
* observe one complete tree. See the comment for write_seqcount_latch_begin().
|
||||
*
|
||||
* The inserts use rcu_assign_pointer() to publish the element such that the
|
||||
* tree structure is stored before we can observe the new @node.
|
||||
@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node,
|
||||
struct latch_tree_root *root,
|
||||
const struct latch_tree_ops *ops)
|
||||
{
|
||||
raw_write_seqcount_latch(&root->seq);
|
||||
write_seqcount_latch_begin(&root->seq);
|
||||
__lt_insert(node, root, 0, ops->less);
|
||||
raw_write_seqcount_latch(&root->seq);
|
||||
write_seqcount_latch(&root->seq);
|
||||
__lt_insert(node, root, 1, ops->less);
|
||||
write_seqcount_latch_end(&root->seq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node,
|
||||
*
|
||||
* Removes @node from the trees @root in an ordered fashion such that we can
|
||||
* always observe one complete tree. See the comment for
|
||||
* raw_write_seqcount_latch().
|
||||
* write_seqcount_latch_begin().
|
||||
*
|
||||
* It is assumed that @node will observe one RCU quiescent state before being
|
||||
* reused of freed.
|
||||
@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node,
|
||||
struct latch_tree_root *root,
|
||||
const struct latch_tree_ops *ops)
|
||||
{
|
||||
raw_write_seqcount_latch(&root->seq);
|
||||
write_seqcount_latch_begin(&root->seq);
|
||||
__lt_erase(node, root, 0);
|
||||
raw_write_seqcount_latch(&root->seq);
|
||||
write_seqcount_latch(&root->seq);
|
||||
__lt_erase(node, root, 1);
|
||||
write_seqcount_latch_end(&root->seq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -204,9 +206,9 @@ latch_tree_find(void *key, struct latch_tree_root *root,
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = raw_read_seqcount_latch(&root->seq);
|
||||
seq = read_seqcount_latch(&root->seq);
|
||||
node = __lt_find(key, root, seq & 1, ops->comp);
|
||||
} while (raw_read_seqcount_latch_retry(&root->seq, seq));
|
||||
} while (read_seqcount_latch_retry(&root->seq, seq));
|
||||
|
||||
return node;
|
||||
}
|
||||
|
@ -560,10 +560,11 @@ bool printk_percpu_data_ready(void)
|
||||
/* Must be called under syslog_lock. */
|
||||
static void latched_seq_write(struct latched_seq *ls, u64 val)
|
||||
{
|
||||
raw_write_seqcount_latch(&ls->latch);
|
||||
write_seqcount_latch_begin(&ls->latch);
|
||||
ls->val[0] = val;
|
||||
raw_write_seqcount_latch(&ls->latch);
|
||||
write_seqcount_latch(&ls->latch);
|
||||
ls->val[1] = val;
|
||||
write_seqcount_latch_end(&ls->latch);
|
||||
}
|
||||
|
||||
/* Can be called from any context. */
|
||||
@ -574,10 +575,10 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls)
|
||||
u64 val;
|
||||
|
||||
do {
|
||||
seq = raw_read_seqcount_latch(&ls->latch);
|
||||
seq = read_seqcount_latch(&ls->latch);
|
||||
idx = seq & 0x1;
|
||||
val = ls->val[idx];
|
||||
} while (raw_read_seqcount_latch_retry(&ls->latch, seq));
|
||||
} while (read_seqcount_latch_retry(&ls->latch, seq));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
@ -71,13 +71,13 @@ static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||
|
||||
notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
|
||||
{
|
||||
*seq = raw_read_seqcount_latch(&cd.seq);
|
||||
*seq = read_seqcount_latch(&cd.seq);
|
||||
return cd.read_data + (*seq & 1);
|
||||
}
|
||||
|
||||
notrace int sched_clock_read_retry(unsigned int seq)
|
||||
{
|
||||
return raw_read_seqcount_latch_retry(&cd.seq, seq);
|
||||
return read_seqcount_latch_retry(&cd.seq, seq);
|
||||
}
|
||||
|
||||
static __always_inline unsigned long long __sched_clock(void)
|
||||
@ -132,16 +132,18 @@ unsigned long long notrace sched_clock(void)
|
||||
static void update_clock_read_data(struct clock_read_data *rd)
|
||||
{
|
||||
/* steer readers towards the odd copy */
|
||||
raw_write_seqcount_latch(&cd.seq);
|
||||
write_seqcount_latch_begin(&cd.seq);
|
||||
|
||||
/* now its safe for us to update the normal (even) copy */
|
||||
cd.read_data[0] = *rd;
|
||||
|
||||
/* switch readers back to the even copy */
|
||||
raw_write_seqcount_latch(&cd.seq);
|
||||
write_seqcount_latch(&cd.seq);
|
||||
|
||||
/* update the backup (odd) copy with the new data */
|
||||
cd.read_data[1] = *rd;
|
||||
|
||||
write_seqcount_latch_end(&cd.seq);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -279,7 +281,7 @@ void __init generic_sched_clock_init(void)
|
||||
*/
|
||||
static u64 notrace suspended_sched_clock_read(void)
|
||||
{
|
||||
unsigned int seq = raw_read_seqcount_latch(&cd.seq);
|
||||
unsigned int seq = read_seqcount_latch(&cd.seq);
|
||||
|
||||
return cd.read_data[seq & 1].epoch_cyc;
|
||||
}
|
||||
|
@ -411,7 +411,7 @@ static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
|
||||
* We want to use this from any context including NMI and tracing /
|
||||
* instrumenting the timekeeping code itself.
|
||||
*
|
||||
* Employ the latch technique; see @raw_write_seqcount_latch.
|
||||
* Employ the latch technique; see @write_seqcount_latch.
|
||||
*
|
||||
* So if a NMI hits the update of base[0] then it will use base[1]
|
||||
* which is still consistent. In the worst case this can result is a
|
||||
@ -424,16 +424,18 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
|
||||
struct tk_read_base *base = tkf->base;
|
||||
|
||||
/* Force readers off to base[1] */
|
||||
raw_write_seqcount_latch(&tkf->seq);
|
||||
write_seqcount_latch_begin(&tkf->seq);
|
||||
|
||||
/* Update base[0] */
|
||||
memcpy(base, tkr, sizeof(*base));
|
||||
|
||||
/* Force readers back to base[0] */
|
||||
raw_write_seqcount_latch(&tkf->seq);
|
||||
write_seqcount_latch(&tkf->seq);
|
||||
|
||||
/* Update base[1] */
|
||||
memcpy(base + 1, base, sizeof(*base));
|
||||
|
||||
write_seqcount_latch_end(&tkf->seq);
|
||||
}
|
||||
|
||||
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
@ -443,11 +445,11 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
u64 now;
|
||||
|
||||
do {
|
||||
seq = raw_read_seqcount_latch(&tkf->seq);
|
||||
seq = read_seqcount_latch(&tkf->seq);
|
||||
tkr = tkf->base + (seq & 0x01);
|
||||
now = ktime_to_ns(tkr->base);
|
||||
now += __timekeeping_get_ns(tkr);
|
||||
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
|
||||
} while (read_seqcount_latch_retry(&tkf->seq, seq));
|
||||
|
||||
return now;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user