mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 13:34:30 +00:00
nd_btt: Make BTT lanes preemptible
nd_region_acquire_lane uses get_cpu, which disables preemption. This is an issue on PREEMPT_RT kernels, since btt_write_pg and also nd_region_acquire_lane itself take a spin lock, resulting in BUG: sleeping function called from invalid context. Fix the issue by replacing get_cpu with smp_process_id and migrate_disable when needed. This makes BTT operations preemptible, thus permitting the use of spin_lock. BUG example occurring when running ndctl tests on PREEMPT_RT kernel: BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:48 in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 4903, name: libndctl preempt_count: 1, expected: 0 RCU nest depth: 0, expected: 0 Preemption disabled at: [<ffffffffc1313db5>] nd_region_acquire_lane+0x15/0x90 [libnvdimm] Call Trace: <TASK> dump_stack_lvl+0x8e/0xb0 __might_resched+0x19b/0x250 rt_spin_lock+0x4c/0x100 ? btt_write_pg+0x2d7/0x500 [nd_btt] btt_write_pg+0x2d7/0x500 [nd_btt] ? local_clock_noinstr+0x9/0xc0 btt_submit_bio+0x16d/0x270 [nd_btt] __submit_bio+0x48/0x80 __submit_bio_noacct+0x7e/0x1e0 submit_bio_wait+0x58/0xb0 __blkdev_direct_IO_simple+0x107/0x240 ? inode_set_ctime_current+0x51/0x110 ? __pfx_submit_bio_wait_endio+0x10/0x10 blkdev_write_iter+0x1d8/0x290 vfs_write+0x237/0x330 ... </TASK> Fixes: 5212e11fde4d ("nd_btt: atomic sector updates") Signed-off-by: Tomas Glozar <tglozar@redhat.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
This commit is contained in:
parent
6fd4ebfc4d
commit
36c75ce3bd
@ -939,7 +939,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
|
||||
{
|
||||
unsigned int cpu, lane;
|
||||
|
||||
cpu = get_cpu();
|
||||
migrate_disable();
|
||||
cpu = smp_processor_id();
|
||||
if (nd_region->num_lanes < nr_cpu_ids) {
|
||||
struct nd_percpu_lane *ndl_lock, *ndl_count;
|
||||
|
||||
@ -958,16 +959,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane);
|
||||
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
|
||||
{
|
||||
if (nd_region->num_lanes < nr_cpu_ids) {
|
||||
unsigned int cpu = get_cpu();
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct nd_percpu_lane *ndl_lock, *ndl_count;
|
||||
|
||||
ndl_count = per_cpu_ptr(nd_region->lane, cpu);
|
||||
ndl_lock = per_cpu_ptr(nd_region->lane, lane);
|
||||
if (--ndl_count->count == 0)
|
||||
spin_unlock(&ndl_lock->lock);
|
||||
put_cpu();
|
||||
}
|
||||
put_cpu();
|
||||
migrate_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(nd_region_release_lane);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user