mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 18:55:12 +00:00
Merge branch 'locking/core' into locking/urgent, to pick up pending commits
Merge all pending locking commits into a single branch. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
ae39e0bd15
8
Documentation/core-api/cleanup.rst
Normal file
8
Documentation/core-api/cleanup.rst
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
===========================
|
||||||
|
Scope-based Cleanup Helpers
|
||||||
|
===========================
|
||||||
|
|
||||||
|
.. kernel-doc:: include/linux/cleanup.h
|
||||||
|
:doc: scope-based cleanup helpers
|
@ -35,6 +35,7 @@ Library functionality that is used throughout the kernel.
|
|||||||
|
|
||||||
kobject
|
kobject
|
||||||
kref
|
kref
|
||||||
|
cleanup
|
||||||
assoc_array
|
assoc_array
|
||||||
xarray
|
xarray
|
||||||
maple_tree
|
maple_tree
|
||||||
|
@ -163,20 +163,18 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
|
|||||||
}
|
}
|
||||||
#define arch_atomic64_dec_return arch_atomic64_dec_return
|
#define arch_atomic64_dec_return arch_atomic64_dec_return
|
||||||
|
|
||||||
static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
|
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
|
||||||
{
|
{
|
||||||
__alternative_atomic64(add, add_return,
|
__alternative_atomic64(add, add_return,
|
||||||
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
||||||
ASM_NO_INPUT_CLOBBER("memory"));
|
ASM_NO_INPUT_CLOBBER("memory"));
|
||||||
return i;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
|
static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
|
||||||
{
|
{
|
||||||
__alternative_atomic64(sub, sub_return,
|
__alternative_atomic64(sub, sub_return,
|
||||||
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
||||||
ASM_NO_INPUT_CLOBBER("memory"));
|
ASM_NO_INPUT_CLOBBER("memory"));
|
||||||
return i;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void arch_atomic64_inc(atomic64_t *v)
|
static __always_inline void arch_atomic64_inc(atomic64_t *v)
|
||||||
|
@ -16,6 +16,11 @@
|
|||||||
cmpxchg8b (\reg)
|
cmpxchg8b (\reg)
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro read64_nonatomic reg
|
||||||
|
movl (\reg), %eax
|
||||||
|
movl 4(\reg), %edx
|
||||||
|
.endm
|
||||||
|
|
||||||
SYM_FUNC_START(atomic64_read_cx8)
|
SYM_FUNC_START(atomic64_read_cx8)
|
||||||
read64 %ecx
|
read64 %ecx
|
||||||
RET
|
RET
|
||||||
@ -51,7 +56,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
|
|||||||
movl %edx, %edi
|
movl %edx, %edi
|
||||||
movl %ecx, %ebp
|
movl %ecx, %ebp
|
||||||
|
|
||||||
read64 %ecx
|
read64_nonatomic %ecx
|
||||||
1:
|
1:
|
||||||
movl %eax, %ebx
|
movl %eax, %ebx
|
||||||
movl %edx, %ecx
|
movl %edx, %ecx
|
||||||
@ -79,7 +84,7 @@ addsub_return sub sub sbb
|
|||||||
SYM_FUNC_START(atomic64_\func\()_return_cx8)
|
SYM_FUNC_START(atomic64_\func\()_return_cx8)
|
||||||
pushl %ebx
|
pushl %ebx
|
||||||
|
|
||||||
read64 %esi
|
read64_nonatomic %esi
|
||||||
1:
|
1:
|
||||||
movl %eax, %ebx
|
movl %eax, %ebx
|
||||||
movl %edx, %ecx
|
movl %edx, %ecx
|
||||||
|
@ -4,6 +4,142 @@
|
|||||||
|
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: scope-based cleanup helpers
|
||||||
|
*
|
||||||
|
* The "goto error" pattern is notorious for introducing subtle resource
|
||||||
|
* leaks. It is tedious and error prone to add new resource acquisition
|
||||||
|
* constraints into code paths that already have several unwind
|
||||||
|
* conditions. The "cleanup" helpers enable the compiler to help with
|
||||||
|
* this tedium and can aid in maintaining LIFO (last in first out)
|
||||||
|
* unwind ordering to avoid unintentional leaks.
|
||||||
|
*
|
||||||
|
* As drivers make up the majority of the kernel code base, here is an
|
||||||
|
* example of using these helpers to clean up PCI drivers. The target of
|
||||||
|
* the cleanups are occasions where a goto is used to unwind a device
|
||||||
|
* reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
|
||||||
|
* before returning.
|
||||||
|
*
|
||||||
|
* The DEFINE_FREE() macro can arrange for PCI device references to be
|
||||||
|
* dropped when the associated variable goes out of scope::
|
||||||
|
*
|
||||||
|
* DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
|
||||||
|
* ...
|
||||||
|
* struct pci_dev *dev __free(pci_dev_put) =
|
||||||
|
* pci_get_slot(parent, PCI_DEVFN(0, 0));
|
||||||
|
*
|
||||||
|
* The above will automatically call pci_dev_put() if @dev is non-NULL
|
||||||
|
* when @dev goes out of scope (automatic variable scope). If a function
|
||||||
|
* wants to invoke pci_dev_put() on error, but return @dev (i.e. without
|
||||||
|
* freeing it) on success, it can do::
|
||||||
|
*
|
||||||
|
* return no_free_ptr(dev);
|
||||||
|
*
|
||||||
|
* ...or::
|
||||||
|
*
|
||||||
|
* return_ptr(dev);
|
||||||
|
*
|
||||||
|
* The DEFINE_GUARD() macro can arrange for the PCI device lock to be
|
||||||
|
* dropped when the scope where guard() is invoked ends::
|
||||||
|
*
|
||||||
|
* DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
|
||||||
|
* ...
|
||||||
|
* guard(pci_dev)(dev);
|
||||||
|
*
|
||||||
|
* The lifetime of the lock obtained by the guard() helper follows the
|
||||||
|
* scope of automatic variable declaration. Take the following example::
|
||||||
|
*
|
||||||
|
* func(...)
|
||||||
|
* {
|
||||||
|
* if (...) {
|
||||||
|
* ...
|
||||||
|
* guard(pci_dev)(dev); // pci_dev_lock() invoked here
|
||||||
|
* ...
|
||||||
|
* } // <- implied pci_dev_unlock() triggered here
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* Observe the lock is held for the remainder of the "if ()" block not
|
||||||
|
* the remainder of "func()".
|
||||||
|
*
|
||||||
|
* Now, when a function uses both __free() and guard(), or multiple
|
||||||
|
* instances of __free(), the LIFO order of variable definition order
|
||||||
|
* matters. GCC documentation says:
|
||||||
|
*
|
||||||
|
* "When multiple variables in the same scope have cleanup attributes,
|
||||||
|
* at exit from the scope their associated cleanup functions are run in
|
||||||
|
* reverse order of definition (last defined, first cleanup)."
|
||||||
|
*
|
||||||
|
* When the unwind order matters it requires that variables be defined
|
||||||
|
* mid-function scope rather than at the top of the file. Take the
|
||||||
|
* following example and notice the bug highlighted by "!!"::
|
||||||
|
*
|
||||||
|
* LIST_HEAD(list);
|
||||||
|
* DEFINE_MUTEX(lock);
|
||||||
|
*
|
||||||
|
* struct object {
|
||||||
|
* struct list_head node;
|
||||||
|
* };
|
||||||
|
*
|
||||||
|
* static struct object *alloc_add(void)
|
||||||
|
* {
|
||||||
|
* struct object *obj;
|
||||||
|
*
|
||||||
|
* lockdep_assert_held(&lock);
|
||||||
|
* obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||||
|
* if (obj) {
|
||||||
|
* LIST_HEAD_INIT(&obj->node);
|
||||||
|
* list_add(obj->node, &list):
|
||||||
|
* }
|
||||||
|
* return obj;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* static void remove_free(struct object *obj)
|
||||||
|
* {
|
||||||
|
* lockdep_assert_held(&lock);
|
||||||
|
* list_del(&obj->node);
|
||||||
|
* kfree(obj);
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
|
||||||
|
* static int init(void)
|
||||||
|
* {
|
||||||
|
* struct object *obj __free(remove_free) = NULL;
|
||||||
|
* int err;
|
||||||
|
*
|
||||||
|
* guard(mutex)(&lock);
|
||||||
|
* obj = alloc_add();
|
||||||
|
*
|
||||||
|
* if (!obj)
|
||||||
|
* return -ENOMEM;
|
||||||
|
*
|
||||||
|
* err = other_init(obj);
|
||||||
|
* if (err)
|
||||||
|
* return err; // remove_free() called without the lock!!
|
||||||
|
*
|
||||||
|
* no_free_ptr(obj);
|
||||||
|
* return 0;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* That bug is fixed by changing init() to call guard() and define +
|
||||||
|
* initialize @obj in this order::
|
||||||
|
*
|
||||||
|
* guard(mutex)(&lock);
|
||||||
|
* struct object *obj __free(remove_free) = alloc_add();
|
||||||
|
*
|
||||||
|
* Given that the "__free(...) = NULL" pattern for variables defined at
|
||||||
|
* the top of the function poses this potential interdependency problem
|
||||||
|
* the recommendation is to always define and assign variables in one
|
||||||
|
* statement and not group variable definitions at the top of the
|
||||||
|
* function when __free() is used.
|
||||||
|
*
|
||||||
|
* Lastly, given that the benefit of cleanup helpers is removal of
|
||||||
|
* "goto", and that the "goto" statement can jump between scopes, the
|
||||||
|
* expectation is that usage of "goto" and cleanup helpers is never
|
||||||
|
* mixed in the same function. I.e. for a given routine, convert all
|
||||||
|
* resources that need a "goto" cleanup to scope-based cleanup, or
|
||||||
|
* convert none of them.
|
||||||
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DEFINE_FREE(name, type, free):
|
* DEFINE_FREE(name, type, free):
|
||||||
* simple helper macro that defines the required wrapper for a __free()
|
* simple helper macro that defines the required wrapper for a __free()
|
||||||
|
@ -785,7 +785,7 @@ static void lockdep_print_held_locks(struct task_struct *p)
|
|||||||
printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
|
printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
|
||||||
else
|
else
|
||||||
printk("%d lock%s held by %s/%d:\n", depth,
|
printk("%d lock%s held by %s/%d:\n", depth,
|
||||||
depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
|
str_plural(depth), p->comm, task_pid_nr(p));
|
||||||
/*
|
/*
|
||||||
* It's not reliable to print a task's held locks if it's not sleeping
|
* It's not reliable to print a task's held locks if it's not sleeping
|
||||||
* and it's not the current task.
|
* and it's not the current task.
|
||||||
@ -2067,6 +2067,9 @@ static noinline void print_bfs_bug(int ret)
|
|||||||
/*
|
/*
|
||||||
* Breadth-first-search failed, graph got corrupted?
|
* Breadth-first-search failed, graph got corrupted?
|
||||||
*/
|
*/
|
||||||
|
if (ret == BFS_EQUEUEFULL)
|
||||||
|
pr_warn("Increase LOCKDEP_CIRCULAR_QUEUE_BITS to avoid this warning:\n");
|
||||||
|
|
||||||
WARN(1, "lockdep bfs error:%d\n", ret);
|
WARN(1, "lockdep bfs error:%d\n", ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6196,25 +6199,27 @@ static struct pending_free *get_pending_free(void)
|
|||||||
static void free_zapped_rcu(struct rcu_head *cb);
|
static void free_zapped_rcu(struct rcu_head *cb);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Schedule an RCU callback if no RCU callback is pending. Must be called with
|
* See if we need to queue an RCU callback, must called with
|
||||||
* the graph lock held.
|
* the lockdep lock held, returns false if either we don't have
|
||||||
*/
|
* any pending free or the callback is already scheduled.
|
||||||
static void call_rcu_zapped(struct pending_free *pf)
|
* Otherwise, a call_rcu() must follow this function call.
|
||||||
|
*/
|
||||||
|
static bool prepare_call_rcu_zapped(struct pending_free *pf)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(inside_selftest());
|
WARN_ON_ONCE(inside_selftest());
|
||||||
|
|
||||||
if (list_empty(&pf->zapped))
|
if (list_empty(&pf->zapped))
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
if (delayed_free.scheduled)
|
if (delayed_free.scheduled)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
delayed_free.scheduled = true;
|
delayed_free.scheduled = true;
|
||||||
|
|
||||||
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
|
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
|
||||||
delayed_free.index ^= 1;
|
delayed_free.index ^= 1;
|
||||||
|
|
||||||
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The caller must hold the graph lock. May be called from RCU context. */
|
/* The caller must hold the graph lock. May be called from RCU context. */
|
||||||
@ -6240,6 +6245,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
|
|||||||
{
|
{
|
||||||
struct pending_free *pf;
|
struct pending_free *pf;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
bool need_callback;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
|
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
|
||||||
return;
|
return;
|
||||||
@ -6251,14 +6257,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
|
|||||||
pf = delayed_free.pf + (delayed_free.index ^ 1);
|
pf = delayed_free.pf + (delayed_free.index ^ 1);
|
||||||
__free_zapped_classes(pf);
|
__free_zapped_classes(pf);
|
||||||
delayed_free.scheduled = false;
|
delayed_free.scheduled = false;
|
||||||
|
need_callback =
|
||||||
/*
|
prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
|
||||||
* If there's anything on the open list, close and start a new callback.
|
|
||||||
*/
|
|
||||||
call_rcu_zapped(delayed_free.pf + delayed_free.index);
|
|
||||||
|
|
||||||
lockdep_unlock();
|
lockdep_unlock();
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there's pending free and its callback has not been scheduled,
|
||||||
|
* queue an RCU callback.
|
||||||
|
*/
|
||||||
|
if (need_callback)
|
||||||
|
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -6298,6 +6308,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
|
|||||||
{
|
{
|
||||||
struct pending_free *pf;
|
struct pending_free *pf;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
bool need_callback;
|
||||||
|
|
||||||
init_data_structures_once();
|
init_data_structures_once();
|
||||||
|
|
||||||
@ -6305,10 +6316,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
|
|||||||
lockdep_lock();
|
lockdep_lock();
|
||||||
pf = get_pending_free();
|
pf = get_pending_free();
|
||||||
__lockdep_free_key_range(pf, start, size);
|
__lockdep_free_key_range(pf, start, size);
|
||||||
call_rcu_zapped(pf);
|
need_callback = prepare_call_rcu_zapped(pf);
|
||||||
lockdep_unlock();
|
lockdep_unlock();
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
|
if (need_callback)
|
||||||
|
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||||
/*
|
/*
|
||||||
* Wait for any possible iterators from look_up_lock_class() to pass
|
* Wait for any possible iterators from look_up_lock_class() to pass
|
||||||
* before continuing to free the memory they refer to.
|
* before continuing to free the memory they refer to.
|
||||||
@ -6402,6 +6414,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
|
|||||||
struct pending_free *pf;
|
struct pending_free *pf;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int locked;
|
int locked;
|
||||||
|
bool need_callback = false;
|
||||||
|
|
||||||
raw_local_irq_save(flags);
|
raw_local_irq_save(flags);
|
||||||
locked = graph_lock();
|
locked = graph_lock();
|
||||||
@ -6410,11 +6423,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
|
|||||||
|
|
||||||
pf = get_pending_free();
|
pf = get_pending_free();
|
||||||
__lockdep_reset_lock(pf, lock);
|
__lockdep_reset_lock(pf, lock);
|
||||||
call_rcu_zapped(pf);
|
need_callback = prepare_call_rcu_zapped(pf);
|
||||||
|
|
||||||
graph_unlock();
|
graph_unlock();
|
||||||
out_irq:
|
out_irq:
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
|
if (need_callback)
|
||||||
|
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -6458,6 +6473,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
|
|||||||
struct pending_free *pf;
|
struct pending_free *pf;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool found = false;
|
bool found = false;
|
||||||
|
bool need_callback = false;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
@ -6478,11 +6494,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
|
|||||||
if (found) {
|
if (found) {
|
||||||
pf = get_pending_free();
|
pf = get_pending_free();
|
||||||
__lockdep_free_key_range(pf, key, 1);
|
__lockdep_free_key_range(pf, key, 1);
|
||||||
call_rcu_zapped(pf);
|
need_callback = prepare_call_rcu_zapped(pf);
|
||||||
}
|
}
|
||||||
lockdep_unlock();
|
lockdep_unlock();
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
|
|
||||||
|
if (need_callback)
|
||||||
|
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||||
|
|
||||||
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
|
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
}
|
}
|
||||||
|
@ -424,7 +424,7 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
|
|||||||
for (i = 0; i < offset; i++)
|
for (i = 0; i < offset; i++)
|
||||||
seq_puts(m, " ");
|
seq_puts(m, " ");
|
||||||
for (i = 0; i < length; i++)
|
for (i = 0; i < length; i++)
|
||||||
seq_printf(m, "%c", c);
|
seq_putc(m, c);
|
||||||
seq_puts(m, "\n");
|
seq_puts(m, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,12 +181,21 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
|
|||||||
__rwsem_set_reader_owned(sem, current);
|
__rwsem_set_reader_owned(sem, current);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_RWSEMS
|
||||||
|
/*
|
||||||
|
* Return just the real task structure pointer of the owner
|
||||||
|
*/
|
||||||
|
static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
|
||||||
|
{
|
||||||
|
return (struct task_struct *)
|
||||||
|
(atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true if the rwsem is owned by a reader.
|
* Return true if the rwsem is owned by a reader.
|
||||||
*/
|
*/
|
||||||
static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
|
static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DEBUG_RWSEMS
|
|
||||||
/*
|
/*
|
||||||
* Check the count to see if it is write-locked.
|
* Check the count to see if it is write-locked.
|
||||||
*/
|
*/
|
||||||
@ -194,11 +203,9 @@ static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
|
|||||||
|
|
||||||
if (count & RWSEM_WRITER_MASK)
|
if (count & RWSEM_WRITER_MASK)
|
||||||
return false;
|
return false;
|
||||||
#endif
|
|
||||||
return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
|
return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_RWSEMS
|
|
||||||
/*
|
/*
|
||||||
* With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
|
* With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
|
||||||
* is a task pointer in owner of a reader-owned rwsem, it will be the
|
* is a task pointer in owner of a reader-owned rwsem, it will be the
|
||||||
@ -265,15 +272,6 @@ static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Return just the real task structure pointer of the owner
|
|
||||||
*/
|
|
||||||
static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
|
|
||||||
{
|
|
||||||
return (struct task_struct *)
|
|
||||||
(atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the real task structure pointer of the owner and the embedded
|
* Return the real task structure pointer of the owner and the embedded
|
||||||
* flags in the owner. pflags must be non-NULL.
|
* flags in the owner. pflags must be non-NULL.
|
||||||
|
Loading…
Reference in New Issue
Block a user