mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
1431996bf9
Though tmpfs does not need it, percpu_counter_limited_add() can be twice as useful if it works sensibly with negative amounts (subs) - typically decrements towards a limit of 0 or nearby: as suggested by Dave Chinner. And in the course of that reworking, skip the percpu counter sum if it is already obvious that the limit would be passed: as suggested by Tim Chen. Extend the comment above __percpu_counter_limited_add(), defining the behaviour with positive and negative amounts, allowing negative limits, but not bothering about overflow beyond S64_MAX. Link: https://lkml.kernel.org/r/8f86083b-c452-95d4-365b-f16a2e4ebcd4@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Carlos Maiolino <cem@kernel.org> Cc: Christian Brauner <brauner@kernel.org> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Darrick J. Wong <djwong@kernel.org> Cc: Dave Chinner <dchinner@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Tim Chen <tim.c.chen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
287 lines
6.8 KiB
C
287 lines
6.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_PERCPU_COUNTER_H
|
|
#define _LINUX_PERCPU_COUNTER_H
|
|
/*
|
|
* A simple "approximate counter" for use in ext2 and ext3 superblocks.
|
|
*
|
|
* WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
|
|
*/
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/list.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/types.h>
|
|
|
|
/* percpu_counter batch for local add or sub */
|
|
#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
struct percpu_counter {
|
|
raw_spinlock_t lock;
|
|
s64 count;
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
struct list_head list; /* All percpu_counters are on a list */
|
|
#endif
|
|
s32 __percpu *counters;
|
|
};
|
|
|
|
extern int percpu_counter_batch;
|
|
|
|
int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
|
|
gfp_t gfp, u32 nr_counters,
|
|
struct lock_class_key *key);
|
|
|
|
#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
|
|
({ \
|
|
static struct lock_class_key __key; \
|
|
\
|
|
__percpu_counter_init_many(fbc, value, gfp, nr_counters,\
|
|
&__key); \
|
|
})
|
|
|
|
|
|
#define percpu_counter_init(fbc, value, gfp) \
|
|
percpu_counter_init_many(fbc, value, gfp, 1)
|
|
|
|
void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
|
|
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
|
|
{
|
|
percpu_counter_destroy_many(fbc, 1);
|
|
}
|
|
|
|
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
|
|
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
|
|
s32 batch);
|
|
s64 __percpu_counter_sum(struct percpu_counter *fbc);
|
|
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
|
|
bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit,
|
|
s64 amount, s32 batch);
|
|
void percpu_counter_sync(struct percpu_counter *fbc);
|
|
|
|
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
|
|
{
|
|
return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
|
|
}
|
|
|
|
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
|
{
|
|
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
|
|
}
|
|
|
|
static inline bool
|
|
percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
|
|
{
|
|
return __percpu_counter_limited_add(fbc, limit, amount,
|
|
percpu_counter_batch);
|
|
}
|
|
|
|
/*
|
|
* With percpu_counter_add_local() and percpu_counter_sub_local(), counts
|
|
* are accumulated in local per cpu counter and not in fbc->count until
|
|
* local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
|
|
* write efficient.
|
|
* But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
|
|
* used to add up the counts from each CPU to account for all the local
|
|
* counts. So percpu_counter_add_local() and percpu_counter_sub_local()
|
|
* should be used when a counter is updated frequently and read rarely.
|
|
*/
|
|
static inline void
|
|
percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
|
|
{
|
|
percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
|
|
}
|
|
|
|
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
|
|
{
|
|
s64 ret = __percpu_counter_sum(fbc);
|
|
return ret < 0 ? 0 : ret;
|
|
}
|
|
|
|
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
|
|
{
|
|
return __percpu_counter_sum(fbc);
|
|
}
|
|
|
|
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
|
|
{
|
|
return fbc->count;
|
|
}
|
|
|
|
/*
|
|
* It is possible for the percpu_counter_read() to return a small negative
|
|
* number for some counter which should never be negative.
|
|
*
|
|
*/
|
|
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
|
|
{
|
|
/* Prevent reloads of fbc->count */
|
|
s64 ret = READ_ONCE(fbc->count);
|
|
|
|
if (ret >= 0)
|
|
return ret;
|
|
return 0;
|
|
}
|
|
|
|
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
|
|
{
|
|
return (fbc->counters != NULL);
|
|
}
|
|
|
|
#else /* !CONFIG_SMP */
|
|
|
|
struct percpu_counter {
|
|
s64 count;
|
|
};
|
|
|
|
static inline int percpu_counter_init_many(struct percpu_counter *fbc,
|
|
s64 amount, gfp_t gfp,
|
|
u32 nr_counters)
|
|
{
|
|
u32 i;
|
|
|
|
for (i = 0; i < nr_counters; i++)
|
|
fbc[i].count = amount;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
|
|
gfp_t gfp)
|
|
{
|
|
return percpu_counter_init_many(fbc, amount, gfp, 1);
|
|
}
|
|
|
|
static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
|
|
u32 nr_counters)
|
|
{
|
|
}
|
|
|
|
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
|
|
{
|
|
}
|
|
|
|
static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
|
|
{
|
|
fbc->count = amount;
|
|
}
|
|
|
|
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
|
|
{
|
|
if (fbc->count > rhs)
|
|
return 1;
|
|
else if (fbc->count < rhs)
|
|
return -1;
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
static inline int
|
|
__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
|
|
{
|
|
return percpu_counter_compare(fbc, rhs);
|
|
}
|
|
|
|
static inline void
|
|
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
fbc->count += amount;
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static inline bool
|
|
percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
|
|
{
|
|
unsigned long flags;
|
|
bool good = false;
|
|
s64 count;
|
|
|
|
if (amount == 0)
|
|
return true;
|
|
|
|
local_irq_save(flags);
|
|
count = fbc->count + amount;
|
|
if ((amount > 0 && count <= limit) ||
|
|
(amount < 0 && count >= limit)) {
|
|
fbc->count = count;
|
|
good = true;
|
|
}
|
|
local_irq_restore(flags);
|
|
return good;
|
|
}
|
|
|
|
/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
|
|
static inline void
|
|
percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
|
|
{
|
|
percpu_counter_add(fbc, amount);
|
|
}
|
|
|
|
static inline void
|
|
percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
|
|
{
|
|
percpu_counter_add(fbc, amount);
|
|
}
|
|
|
|
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
|
|
{
|
|
return fbc->count;
|
|
}
|
|
|
|
/*
|
|
* percpu_counter is intended to track positive numbers. In the UP case the
|
|
* number should never be negative.
|
|
*/
|
|
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
|
|
{
|
|
return fbc->count;
|
|
}
|
|
|
|
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
|
|
{
|
|
return percpu_counter_read_positive(fbc);
|
|
}
|
|
|
|
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
|
|
{
|
|
return percpu_counter_read(fbc);
|
|
}
|
|
|
|
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void percpu_counter_sync(struct percpu_counter *fbc)
|
|
{
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static inline void percpu_counter_inc(struct percpu_counter *fbc)
|
|
{
|
|
percpu_counter_add(fbc, 1);
|
|
}
|
|
|
|
static inline void percpu_counter_dec(struct percpu_counter *fbc)
|
|
{
|
|
percpu_counter_add(fbc, -1);
|
|
}
|
|
|
|
static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
|
|
{
|
|
percpu_counter_add(fbc, -amount);
|
|
}
|
|
|
|
static inline void
|
|
percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
|
|
{
|
|
percpu_counter_add_local(fbc, -amount);
|
|
}
|
|
|
|
#endif /* _LINUX_PERCPU_COUNTER_H */
|