mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
rcu/kvfree: Move some functions under CONFIG_TINY_RCU
Currently when a tiny RCU is enabled, the tree.c file is not compiled, thus duplicating function names do not conflict with each other. Because of moving of kvfree_rcu() functionality to the SLAB, we have to reorder some functions and place them together under CONFIG_TINY_RCU macro definition. Therefore, those functions name will not conflict when a kernel is compiled for CONFIG_TINY_RCU flavor. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Acked-by: Hyeonggon Yoo <hyeonggon.yoo@sk.com> Tested-by: Hyeonggon Yoo <hyeonggon.yoo@sk.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
0f52b4db4f
commit
d824ed707b
@ -3653,16 +3653,6 @@ static void kfree_rcu_monitor(struct work_struct *work)
|
||||
schedule_delayed_monitor_work(krcp);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart
|
||||
schedule_page_work_fn(struct hrtimer *t)
|
||||
{
|
||||
struct kfree_rcu_cpu *krcp =
|
||||
container_of(t, struct kfree_rcu_cpu, hrtimer);
|
||||
|
||||
queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void fill_page_cache_func(struct work_struct *work)
|
||||
{
|
||||
struct kvfree_rcu_bulk_data *bnode;
|
||||
@ -3698,27 +3688,6 @@ static void fill_page_cache_func(struct work_struct *work)
|
||||
atomic_set(&krcp->backoff_page_cache_fill, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
run_page_cache_worker(struct kfree_rcu_cpu *krcp)
|
||||
{
|
||||
// If cache disabled, bail out.
|
||||
if (!rcu_min_cached_objs)
|
||||
return;
|
||||
|
||||
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
|
||||
!atomic_xchg(&krcp->work_in_progress, 1)) {
|
||||
if (atomic_read(&krcp->backoff_page_cache_fill)) {
|
||||
queue_delayed_work(system_unbound_wq,
|
||||
&krcp->page_cache_work,
|
||||
msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
|
||||
} else {
|
||||
hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
krcp->hrtimer.function = schedule_page_work_fn;
|
||||
hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
|
||||
// state specified by flags. If can_alloc is true, the caller must
|
||||
// be schedulable and not be holding any locks or mutexes that might be
|
||||
@ -3779,6 +3748,51 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
|
||||
return true;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_TINY_RCU)
|
||||
|
||||
static enum hrtimer_restart
|
||||
schedule_page_work_fn(struct hrtimer *t)
|
||||
{
|
||||
struct kfree_rcu_cpu *krcp =
|
||||
container_of(t, struct kfree_rcu_cpu, hrtimer);
|
||||
|
||||
queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void
|
||||
run_page_cache_worker(struct kfree_rcu_cpu *krcp)
|
||||
{
|
||||
// If cache disabled, bail out.
|
||||
if (!rcu_min_cached_objs)
|
||||
return;
|
||||
|
||||
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
|
||||
!atomic_xchg(&krcp->work_in_progress, 1)) {
|
||||
if (atomic_read(&krcp->backoff_page_cache_fill)) {
|
||||
queue_delayed_work(system_unbound_wq,
|
||||
&krcp->page_cache_work,
|
||||
msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
|
||||
} else {
|
||||
hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
krcp->hrtimer.function = schedule_page_work_fn;
|
||||
hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void __init kfree_rcu_scheduler_running(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
||||
|
||||
if (need_offload_krc(krcp))
|
||||
schedule_delayed_monitor_work(krcp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue a request for lazy invocation of the appropriate free routine
|
||||
* after a grace period. Please note that three paths are maintained,
|
||||
@ -3944,6 +3958,8 @@ void kvfree_rcu_barrier(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
|
||||
|
||||
#endif /* #if !defined(CONFIG_TINY_RCU) */
|
||||
|
||||
static unsigned long
|
||||
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
@ -3985,18 +4001,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
return freed == 0 ? SHRINK_STOP : freed;
|
||||
}
|
||||
|
||||
void __init kfree_rcu_scheduler_running(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
||||
|
||||
if (need_offload_krc(krcp))
|
||||
schedule_delayed_monitor_work(krcp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* During early boot, any blocking grace-period wait automatically
|
||||
* implies a grace period.
|
||||
|
Loading…
x
Reference in New Issue
Block a user