mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
lockref: uninline lockref helper functions
They aren't very good to inline, since they already call external functions (the spinlock code), and we're going to create rather more complicated versions of them that can do the reference count updates locklessly. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
15570086b5
commit
2f4f12e571
@ -21,67 +21,9 @@ struct lockref {
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
/**
|
||||
* lockref_get - Increments reference count unconditionally
|
||||
* @lockcnt: pointer to lockref structure
|
||||
*
|
||||
* This operation is only valid if you already hold a reference
|
||||
* to the object, so you know the count cannot be zero.
|
||||
*/
|
||||
static inline void lockref_get(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
lockref->count++;
|
||||
spin_unlock(&lockref->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lockref_get_not_zero - Increments count unless the count is 0
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count is 0
|
||||
*/
|
||||
static inline int lockref_get_not_zero(struct lockref *lockref)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count) {
|
||||
lockref->count++;
|
||||
retval = 1;
|
||||
}
|
||||
spin_unlock(&lockref->lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* lockref_get_or_lock - Increments count unless the count is 0
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count was zero
|
||||
* and we got the lock instead.
|
||||
*/
|
||||
static inline int lockref_get_or_lock(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
if (!lockref->count)
|
||||
return 0;
|
||||
lockref->count++;
|
||||
spin_unlock(&lockref->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* lockref_put_or_lock - decrements count unless count <= 1 before decrement
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
|
||||
*/
|
||||
static inline int lockref_put_or_lock(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count <= 1)
|
||||
return 0;
|
||||
lockref->count--;
|
||||
spin_unlock(&lockref->lock);
|
||||
return 1;
|
||||
}
|
||||
extern void lockref_get(struct lockref *);
|
||||
extern int lockref_get_not_zero(struct lockref *);
|
||||
extern int lockref_get_or_lock(struct lockref *);
|
||||
extern int lockref_put_or_lock(struct lockref *);
|
||||
|
||||
#endif /* __LINUX_LOCKREF_H */
|
||||
|
@ -20,6 +20,7 @@ lib-$(CONFIG_MMU) += ioremap.o
|
||||
lib-$(CONFIG_SMP) += cpumask.o
|
||||
|
||||
lib-y += kobject.o klist.o
|
||||
obj-y += lockref.o
|
||||
|
||||
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
|
||||
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
|
||||
|
69
lib/lockref.c
Normal file
69
lib/lockref.c
Normal file
@ -0,0 +1,69 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/lockref.h>
|
||||
|
||||
/**
|
||||
* lockref_get - Increments reference count unconditionally
|
||||
* @lockcnt: pointer to lockref structure
|
||||
*
|
||||
* This operation is only valid if you already hold a reference
|
||||
* to the object, so you know the count cannot be zero.
|
||||
*/
|
||||
void lockref_get(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
lockref->count++;
|
||||
spin_unlock(&lockref->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(lockref_get);
|
||||
|
||||
/**
|
||||
* lockref_get_not_zero - Increments count unless the count is 0
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count was zero
|
||||
*/
|
||||
int lockref_get_not_zero(struct lockref *lockref)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count) {
|
||||
lockref->count++;
|
||||
retval = 1;
|
||||
}
|
||||
spin_unlock(&lockref->lock);
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(lockref_get_not_zero);
|
||||
|
||||
/**
|
||||
* lockref_get_or_lock - Increments count unless the count is 0
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count was zero
|
||||
* and we got the lock instead.
|
||||
*/
|
||||
int lockref_get_or_lock(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
if (!lockref->count)
|
||||
return 0;
|
||||
lockref->count++;
|
||||
spin_unlock(&lockref->lock);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(lockref_get_or_lock);
|
||||
|
||||
/**
|
||||
* lockref_put_or_lock - decrements count unless count <= 1 before decrement
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
|
||||
*/
|
||||
int lockref_put_or_lock(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count <= 1)
|
||||
return 0;
|
||||
lockref->count--;
|
||||
spin_unlock(&lockref->lock);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(lockref_put_or_lock);
|
Loading…
Reference in New Issue
Block a user