mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 21:35:07 +00:00
percpu_counter: add percpu_counter_sync()
percpu_counter's accuracy is related to its batch size. For a percpu_counter with a big batch, its deviation could be big, so when the counter's batch is runtime changed to a smaller value for better accuracy, there could also be requirment to reduce the big deviation. So add a percpu-counter sync function to be run on each CPU. Reported-by: kernel test robot <rong.a.chen@intel.com> Signed-off-by: Feng Tang <feng.tang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Dennis Zhou <dennis@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Qian Cai <cai@lca.pw> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kees Cook <keescook@chromium.org> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Tim Chen <tim.c.chen@intel.com> Link: http://lkml.kernel.org/r/1594389708-60781-4-git-send-email-feng.tang@intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4e2ee51e82
commit
0a4954a850
@ -44,6 +44,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
|
||||
s32 batch);
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc);
|
||||
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
|
||||
void percpu_counter_sync(struct percpu_counter *fbc);
|
||||
|
||||
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
|
||||
{
|
||||
@ -172,6 +173,9 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void percpu_counter_sync(struct percpu_counter *fbc)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline void percpu_counter_inc(struct percpu_counter *fbc)
|
||||
|
@ -98,6 +98,25 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_add_batch);
|
||||
|
||||
/*
|
||||
* For percpu_counter with a big batch, the devication of its count could
|
||||
* be big, and there is requirement to reduce the deviation, like when the
|
||||
* counter's batch could be runtime decreased to get a better accuracy,
|
||||
* which can be achieved by running this sync function on each CPU.
|
||||
*/
|
||||
void percpu_counter_sync(struct percpu_counter *fbc)
|
||||
{
|
||||
unsigned long flags;
|
||||
s64 count;
|
||||
|
||||
raw_spin_lock_irqsave(&fbc->lock, flags);
|
||||
count = __this_cpu_read(*fbc->counters);
|
||||
fbc->count += count;
|
||||
__this_cpu_sub(*fbc->counters, count);
|
||||
raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_sync);
|
||||
|
||||
/*
|
||||
* Add up all the per-cpu counts, return the result. This is a more accurate
|
||||
* but much slower version of percpu_counter_read_positive()
|
||||
|
Loading…
x
Reference in New Issue
Block a user