Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks.git

This commit is contained in:
Stephen Rothwell 2024-12-20 15:11:58 +11:00
commit 24896579b9
50 changed files with 340 additions and 231 deletions

View File

@ -153,3 +153,12 @@ asymmetric system, a broken guest at EL1 could still attempt to execute
mode will return to host userspace with an ``exit_reason`` of mode will return to host userspace with an ``exit_reason`` of
``KVM_EXIT_FAIL_ENTRY`` and will remain non-runnable until successfully ``KVM_EXIT_FAIL_ENTRY`` and will remain non-runnable until successfully
re-initialised by a subsequent ``KVM_ARM_VCPU_INIT`` operation. re-initialised by a subsequent ``KVM_ARM_VCPU_INIT`` operation.
NOHZ FULL
---------
Nohz full CPUs are not a desirable fallback target to run 32bits el0
applications. If present, a set of housekeeping CPUs that can do
the job instead is preferred. Otherwise 32-bit EL0 is not supported.
Should the need arise, appropriate support can be introduced in the
future.

View File

@ -307,13 +307,11 @@ static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
{ {
struct task_struct *task; struct task_struct *task;
task = kthread_create_on_node(bL_switcher_thread, arg, task = kthread_run_on_cpu(bL_switcher_thread, arg,
cpu_to_node(cpu), "kswitcher_%d", cpu); cpu, "kswitcher_%d");
if (!IS_ERR(task)) { if (IS_ERR(task))
kthread_bind(task, cpu);
wake_up_process(task);
} else
pr_err("%s failed for CPU %d\n", __func__, cpu); pr_err("%s failed for CPU %d\n", __func__, cpu);
return task; return task;
} }

View File

@ -671,6 +671,7 @@ static inline bool supports_clearbhb(int scope)
} }
const struct cpumask *system_32bit_el0_cpumask(void); const struct cpumask *system_32bit_el0_cpumask(void);
const struct cpumask *fallback_32bit_el0_cpumask(void);
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
static inline bool system_supports_32bit_el0(void) static inline bool system_supports_32bit_el0(void)

View File

@ -283,6 +283,8 @@ task_cpu_possible_mask(struct task_struct *p)
} }
#define task_cpu_possible_mask task_cpu_possible_mask #define task_cpu_possible_mask task_cpu_possible_mask
const struct cpumask *task_cpu_fallback_mask(struct task_struct *p);
void verify_cpu_asid_bits(void); void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void); void post_ttbr_update_workaround(void);

View File

@ -75,6 +75,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/sched/isolation.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
@ -1640,6 +1641,17 @@ const struct cpumask *system_32bit_el0_cpumask(void)
return cpu_possible_mask; return cpu_possible_mask;
} }
const struct cpumask *task_cpu_fallback_mask(struct task_struct *p)
{
if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
return housekeeping_cpumask(HK_TYPE_TICK);
if (!is_compat_thread(task_thread_info(p)))
return housekeeping_cpumask(HK_TYPE_TICK);
return system_32bit_el0_cpumask();
}
static int __init parse_32bit_el0_param(char *str) static int __init parse_32bit_el0_param(char *str)
{ {
allow_mismatched_32bit_el0 = true; allow_mismatched_32bit_el0 = true;
@ -3741,7 +3753,10 @@ static int enable_mismatched_32bit_el0(unsigned int cpu)
static int lucky_winner = -1; static int lucky_winner = -1;
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0); bool cpu_32bit = false;
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0) && housekeeping_cpu(cpu, HK_TYPE_TICK))
cpu_32bit = true;
if (cpu_32bit) { if (cpu_32bit) {
cpumask_set_cpu(cpu, cpu_32bit_el0_mask); cpumask_set_cpu(cpu, cpu_32bit_el0_mask);

View File

@ -681,7 +681,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
pid_nr = pid_vnr(pid); pid_nr = pid_vnr(pid);
put_pid(pid); put_pid(pid);
pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr); pit->worker = kthread_run_worker(0, "kvm-pit/%d", pid_nr);
if (IS_ERR(pit->worker)) if (IS_ERR(pit->worker))
goto fail_kthread; goto fail_kthread;

View File

@ -517,7 +517,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
crypto_init_queue(&engine->queue, qlen); crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock); spin_lock_init(&engine->queue_lock);
engine->kworker = kthread_create_worker(0, "%s", engine->name); engine->kworker = kthread_run_worker(0, "%s", engine->name);
if (IS_ERR(engine->kworker)) { if (IS_ERR(engine->kworker)) {
dev_err(dev, "failed to create crypto request pump task\n"); dev_err(dev, "failed to create crypto request pump task\n");
return NULL; return NULL;

View File

@ -225,7 +225,7 @@ static void __init cppc_freq_invariance_init(void)
if (fie_disabled) if (fie_disabled)
return; return;
kworker_fie = kthread_create_worker(0, "cppc_fie"); kworker_fie = kthread_run_worker(0, "cppc_fie");
if (IS_ERR(kworker_fie)) { if (IS_ERR(kworker_fie)) {
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__, pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
PTR_ERR(kworker_fie)); PTR_ERR(kworker_fie));

View File

@ -277,7 +277,7 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
INIT_LIST_HEAD(&vblank->pending_work); INIT_LIST_HEAD(&vblank->pending_work);
init_waitqueue_head(&vblank->work_wait_queue); init_waitqueue_head(&vblank->work_wait_queue);
worker = kthread_create_worker(0, "card%d-crtc%d", worker = kthread_run_worker(0, "card%d-crtc%d",
vblank->dev->primary->index, vblank->dev->primary->index,
vblank->pipe); vblank->pipe);
if (IS_ERR(worker)) if (IS_ERR(worker))

View File

@ -369,7 +369,7 @@ static int live_parallel_switch(void *arg)
if (!data[n].ce[0]) if (!data[n].ce[0])
continue; continue;
worker = kthread_create_worker(0, "igt/parallel:%s", worker = kthread_run_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name); data[n].ce[0]->engine->name);
if (IS_ERR(worker)) { if (IS_ERR(worker)) {
err = PTR_ERR(worker); err = PTR_ERR(worker);

View File

@ -3574,7 +3574,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
arg[id].batch = NULL; arg[id].batch = NULL;
arg[id].count = 0; arg[id].count = 0;
worker[id] = kthread_create_worker(0, "igt/smoke:%d", id); worker[id] = kthread_run_worker(0, "igt/smoke:%d", id);
if (IS_ERR(worker[id])) { if (IS_ERR(worker[id])) {
err = PTR_ERR(worker[id]); err = PTR_ERR(worker[id]);
break; break;

View File

@ -1025,7 +1025,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].engine = other; threads[tmp].engine = other;
threads[tmp].flags = flags; threads[tmp].flags = flags;
worker = kthread_create_worker(0, "igt/%s", worker = kthread_run_worker(0, "igt/%s",
other->name); other->name);
if (IS_ERR(worker)) { if (IS_ERR(worker)) {
err = PTR_ERR(worker); err = PTR_ERR(worker);

View File

@ -489,7 +489,7 @@ static int live_slpc_tile_interaction(void *arg)
return -ENOMEM; return -ENOMEM;
for_each_gt(gt, i915, i) { for_each_gt(gt, i915, i) {
threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id); threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id);
if (IS_ERR(threads[i].worker)) { if (IS_ERR(threads[i].worker)) {
ret = PTR_ERR(threads[i].worker); ret = PTR_ERR(threads[i].worker);

View File

@ -492,7 +492,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
for (n = 0; n < ncpus; n++) { for (n = 0; n < ncpus; n++) {
struct kthread_worker *worker; struct kthread_worker *worker;
worker = kthread_create_worker(0, "igt/%d", n); worker = kthread_run_worker(0, "igt/%d", n);
if (IS_ERR(worker)) { if (IS_ERR(worker)) {
ret = PTR_ERR(worker); ret = PTR_ERR(worker);
ncpus = n; ncpus = n;
@ -1645,7 +1645,7 @@ static int live_parallel_engines(void *arg)
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct kthread_worker *worker; struct kthread_worker *worker;
worker = kthread_create_worker(0, "igt/parallel:%s", worker = kthread_run_worker(0, "igt/parallel:%s",
engine->name); engine->name);
if (IS_ERR(worker)) { if (IS_ERR(worker)) {
err = PTR_ERR(worker); err = PTR_ERR(worker);
@ -1806,7 +1806,7 @@ static int live_breadcrumbs_smoketest(void *arg)
unsigned int i = idx * ncpus + n; unsigned int i = idx * ncpus + n;
struct kthread_worker *worker; struct kthread_worker *worker;
worker = kthread_create_worker(0, "igt/%d.%d", idx, n); worker = kthread_run_worker(0, "igt/%d.%d", idx, n);
if (IS_ERR(worker)) { if (IS_ERR(worker)) {
ret = PTR_ERR(worker); ret = PTR_ERR(worker);
goto out_flush; goto out_flush;
@ -3219,7 +3219,7 @@ static int perf_parallel_engines(void *arg)
memset(&engines[idx].p, 0, sizeof(engines[idx].p)); memset(&engines[idx].p, 0, sizeof(engines[idx].p));
worker = kthread_create_worker(0, "igt:%s", worker = kthread_run_worker(0, "igt:%s",
engine->name); engine->name);
if (IS_ERR(worker)) { if (IS_ERR(worker)) {
err = PTR_ERR(worker); err = PTR_ERR(worker);

View File

@ -109,7 +109,7 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev)
mutex_init(&kms->dump_mutex); mutex_init(&kms->dump_mutex);
kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot"); kms->dump_worker = kthread_run_worker(0, "%s", "disp_snapshot");
if (IS_ERR(kms->dump_worker)) if (IS_ERR(kms->dump_worker))
DRM_ERROR("failed to create disp state task\n"); DRM_ERROR("failed to create disp state task\n");

View File

@ -115,7 +115,7 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
timer->kms = kms; timer->kms = kms;
timer->crtc_idx = crtc_idx; timer->crtc_idx = crtc_idx;
timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx); timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) { if (IS_ERR(timer->worker)) {
int ret = PTR_ERR(timer->worker); int ret = PTR_ERR(timer->worker);
timer->worker = NULL; timer->worker = NULL;

View File

@ -859,7 +859,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->funcs = funcs; gpu->funcs = funcs;
gpu->name = name; gpu->name = name;
gpu->worker = kthread_create_worker(0, "gpu-worker"); gpu->worker = kthread_run_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) { if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker); ret = PTR_ERR(gpu->worker);
gpu->worker = NULL; gpu->worker = NULL;

View File

@ -269,7 +269,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
/* initialize event thread */ /* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)]; ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev; ev_thread->dev = ddev;
ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id); ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) { if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker); ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");

View File

@ -271,7 +271,7 @@ static int wave5_vpu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n"); dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
dev->hrtimer.function = &wave5_vpu_timer_callback; dev->hrtimer.function = &wave5_vpu_timer_callback;
dev->worker = kthread_create_worker(0, "vpu_irq_thread"); dev->worker = kthread_run_worker(0, "vpu_irq_thread");
if (IS_ERR(dev->worker)) { if (IS_ERR(dev->worker)) {
dev_err(&pdev->dev, "failed to create vpu irq worker\n"); dev_err(&pdev->dev, "failed to create vpu irq worker\n");
ret = PTR_ERR(dev->worker); ret = PTR_ERR(dev->worker);

View File

@ -394,7 +394,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
kthread_init_delayed_work(&chip->irq_poll_work, kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll); mv88e6xxx_irq_poll);
chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev)); chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker)) if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker); return PTR_ERR(chip->kworker);

View File

@ -2053,7 +2053,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct kthread_worker *kworker; struct kthread_worker *kworker;
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work); kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
kworker = kthread_create_worker(0, "ice-dplls-%s", kworker = kthread_run_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf))); dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker)) if (IS_ERR(kworker))
return PTR_ERR(kworker); return PTR_ERR(kworker);

View File

@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss; pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) { if (IS_ERR(kworker)) {
kfree(gnss); kfree(gnss);
return NULL; return NULL;

View File

@ -3080,7 +3080,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
/* Allocate a kworker for handling work required for the ports /* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock. * connected to the PTP hardware clock.
*/ */
kworker = kthread_create_worker(0, "ice-ptp-%s", kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf))); dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker)) if (IS_ERR(kworker))
return PTR_ERR(kworker); return PTR_ERR(kworker);

View File

@ -715,7 +715,7 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
int err; int err;
ec_spi->high_pri_worker = ec_spi->high_pri_worker =
kthread_create_worker(0, "cros_ec_spi_high_pri"); kthread_run_worker(0, "cros_ec_spi_high_pri");
if (IS_ERR(ec_spi->high_pri_worker)) { if (IS_ERR(ec_spi->high_pri_worker)) {
err = PTR_ERR(ec_spi->high_pri_worker); err = PTR_ERR(ec_spi->high_pri_worker);

View File

@ -296,7 +296,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (ptp->info->do_aux_work) { if (ptp->info->do_aux_work) {
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index); ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index);
if (IS_ERR(ptp->kworker)) { if (IS_ERR(ptp->kworker)) {
err = PTR_ERR(ptp->kworker); err = PTR_ERR(ptp->kworker);
pr_err("failed to create ptp aux_worker %d\n", err); pr_err("failed to create ptp aux_worker %d\n", err);

View File

@ -2610,14 +2610,11 @@ static int bnx2fc_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2fc_percpu, cpu); p = &per_cpu(bnx2fc_percpu, cpu);
thread = kthread_create_on_node(bnx2fc_percpu_io_thread, thread = kthread_create_on_cpu(bnx2fc_percpu_io_thread,
(void *)p, cpu_to_node(cpu), (void *)p, cpu, "bnx2fc_thread/%d");
"bnx2fc_thread/%d", cpu);
if (IS_ERR(thread)) if (IS_ERR(thread))
return PTR_ERR(thread); return PTR_ERR(thread);
/* bind thread to the cpu */
kthread_bind(thread, cpu);
p->iothread = thread; p->iothread = thread;
wake_up_process(thread); wake_up_process(thread);
return 0; return 0;

View File

@ -415,14 +415,11 @@ static int bnx2i_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2i_percpu, cpu); p = &per_cpu(bnx2i_percpu, cpu);
thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, thread = kthread_create_on_cpu(bnx2i_percpu_io_thread, (void *)p,
cpu_to_node(cpu), cpu, "bnx2i_thread/%d");
"bnx2i_thread/%d", cpu);
if (IS_ERR(thread)) if (IS_ERR(thread))
return PTR_ERR(thread); return PTR_ERR(thread);
/* bind thread to the cpu */
kthread_bind(thread, cpu);
p->iothread = thread; p->iothread = thread;
wake_up_process(thread); wake_up_process(thread);
return 0; return 0;

View File

@ -1961,13 +1961,11 @@ static int qedi_cpu_online(unsigned int cpu)
struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct task_struct *thread; struct task_struct *thread;
thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p, thread = kthread_create_on_cpu(qedi_percpu_io_thread, (void *)p,
cpu_to_node(cpu), cpu, "qedi_thread/%d");
"qedi_thread/%d", cpu);
if (IS_ERR(thread)) if (IS_ERR(thread))
return PTR_ERR(thread); return PTR_ERR(thread);
kthread_bind(thread, cpu);
p->iothread = thread; p->iothread = thread;
wake_up_process(thread); wake_up_process(thread);
return 0; return 0;

View File

@ -108,14 +108,12 @@ static int on_all_cpus(int (*fn)(void))
.fn = fn, .fn = fn,
.started = ATOMIC_INIT(0) .started = ATOMIC_INIT(0)
}; };
struct task_struct *k = kthread_create(bstrap_fn, &bstrap, struct task_struct *k = kthread_run_on_cpu(bstrap_fn, &bstrap,
"hotpotato%d", cpu); cpu, "hotpotato%d");
int ret; int ret;
if (IS_ERR(k)) if (IS_ERR(k))
return -ENOMEM; return -ENOMEM;
kthread_bind(k, cpu);
wake_up_process(k);
/* /*
* If we call kthread_stop() before the "wake up" has had an * If we call kthread_stop() before the "wake up" has had an
* effect, then the thread may exit with -EINTR without ever * effect, then the thread may exit with -EINTR without ever

View File

@ -2055,7 +2055,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
ctlr->busy = false; ctlr->busy = false;
ctlr->queue_empty = true; ctlr->queue_empty = true;
ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
if (IS_ERR(ctlr->kworker)) { if (IS_ERR(ctlr->kworker)) {
dev_err(&ctlr->dev, "failed to create message pump kworker\n"); dev_err(&ctlr->dev, "failed to create message pump kworker\n");
return PTR_ERR(ctlr->kworker); return PTR_ERR(ctlr->kworker);

View File

@ -7635,7 +7635,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
mutex_init(&port->lock); mutex_init(&port->lock);
mutex_init(&port->swap_lock); mutex_init(&port->swap_lock);
port->wq = kthread_create_worker(0, dev_name(dev)); port->wq = kthread_run_worker(0, dev_name(dev));
if (IS_ERR(port->wq)) if (IS_ERR(port->wq))
return ERR_CAST(port->wq); return ERR_CAST(port->wq);
sched_set_fifo(port->wq->task); sched_set_fifo(port->wq->task);

View File

@ -229,7 +229,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
dev = &vdpasim->vdpa.dev; dev = &vdpasim->vdpa.dev;
kthread_init_work(&vdpasim->work, vdpasim_work_fn); kthread_init_work(&vdpasim->work, vdpasim_work_fn);
vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s", vdpasim->worker = kthread_run_worker(0, "vDPA sim worker: %s",
dev_attr->name); dev_attr->name);
if (IS_ERR(vdpasim->worker)) if (IS_ERR(vdpasim->worker))
goto err_iommu; goto err_iommu;

View File

@ -1229,7 +1229,7 @@ int __init watchdog_dev_init(void)
{ {
int err; int err;
watchdog_kworker = kthread_create_worker(0, "watchdogd"); watchdog_kworker = kthread_run_worker(0, "watchdogd");
if (IS_ERR(watchdog_kworker)) { if (IS_ERR(watchdog_kworker)) {
pr_err("Failed to create watchdog kworker\n"); pr_err("Failed to create watchdog kworker\n");
return PTR_ERR(watchdog_kworker); return PTR_ERR(watchdog_kworker);

View File

@ -320,7 +320,7 @@ static void erofs_destroy_percpu_workers(void)
static struct kthread_worker *erofs_init_percpu_worker(int cpu) static struct kthread_worker *erofs_init_percpu_worker(int cpu)
{ {
struct kthread_worker *worker = struct kthread_worker *worker =
kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu); kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u");
if (IS_ERR(worker)) if (IS_ERR(worker))
return worker; return worker;

View File

@ -240,6 +240,7 @@ enum cpuhp_state {
CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RANDOM_ONLINE, CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_KTHREADS_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,

View File

@ -85,6 +85,7 @@ kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
void free_kthread_struct(struct task_struct *k); void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu); void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask);
int kthread_stop(struct task_struct *k); int kthread_stop(struct task_struct *k);
int kthread_stop_put(struct task_struct *k); int kthread_stop_put(struct task_struct *k);
bool kthread_should_stop(void); bool kthread_should_stop(void);
@ -186,13 +187,58 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
int kthread_worker_fn(void *worker_ptr); int kthread_worker_fn(void *worker_ptr);
__printf(2, 3) __printf(3, 4)
struct kthread_worker * struct kthread_worker *kthread_create_worker_on_node(unsigned int flags,
kthread_create_worker(unsigned int flags, const char namefmt[], ...); int node,
const char namefmt[], ...);
__printf(3, 4) struct kthread_worker * #define kthread_create_worker(flags, namefmt, ...) \
kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__);
/**
* kthread_run_worker - create and wake a kthread worker.
* @flags: flags modifying the default behavior of the worker
* @namefmt: printf-style name for the thread.
*
* Description: Convenient wrapper for kthread_create_worker() followed by
* wake_up_process(). Returns the kthread_worker or ERR_PTR(-ENOMEM).
*/
#define kthread_run_worker(flags, namefmt, ...) \
({ \
struct kthread_worker *__kw \
= kthread_create_worker(flags, namefmt, ## __VA_ARGS__); \
if (!IS_ERR(__kw)) \
wake_up_process(__kw->task); \
__kw; \
})
struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags, kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[], ...); const char namefmt[]);
/**
* kthread_run_worker_on_cpu - create and wake a cpu bound kthread worker.
* @cpu: CPU number
* @flags: flags modifying the default behavior of the worker
* @namefmt: printf-style name for the thread. Format is restricted
* to "name.*%u". Code fills in cpu number.
*
* Description: Convenient wrapper for kthread_create_worker_on_cpu()
* followed by wake_up_process(). Returns the kthread_worker or
* ERR_PTR(-ENOMEM).
*/
static inline struct kthread_worker *
kthread_run_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[])
{
struct kthread_worker *kw;
kw = kthread_create_worker_on_cpu(cpu, flags, namefmt);
if (!IS_ERR(kw))
wake_up_process(kw->task);
return kw;
}
bool kthread_queue_work(struct kthread_worker *worker, bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work); struct kthread_work *work);

View File

@ -24,6 +24,7 @@ static inline void leave_mm(void) { }
#ifndef task_cpu_possible_mask #ifndef task_cpu_possible_mask
# define task_cpu_possible_mask(p) cpu_possible_mask # define task_cpu_possible_mask(p) cpu_possible_mask
# define task_cpu_possible(cpu, p) true # define task_cpu_possible(cpu, p) true
# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
#else #else
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) # define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
#endif #endif

View File

@ -435,13 +435,11 @@ static int __init kallsyms_test_init(void)
{ {
struct task_struct *t; struct task_struct *t;
t = kthread_create(test_entry, NULL, "kallsyms_test"); t = kthread_run_on_cpu(test_entry, NULL, 0, "kallsyms_test");
if (IS_ERR(t)) { if (IS_ERR(t)) {
pr_info("Create kallsyms selftest task failed\n"); pr_info("Create kallsyms selftest task failed\n");
return PTR_ERR(t); return PTR_ERR(t);
} }
kthread_bind(t, 0);
wake_up_process(t);
return 0; return 0;
} }

View File

@ -35,6 +35,9 @@ static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list); static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task; struct task_struct *kthreadd_task;
static LIST_HEAD(kthreads_hotplug);
static DEFINE_MUTEX(kthreads_hotplug_lock);
struct kthread_create_info struct kthread_create_info
{ {
/* Information passed to kthread() from kthreadd. */ /* Information passed to kthread() from kthreadd. */
@ -53,6 +56,8 @@ struct kthread_create_info
struct kthread { struct kthread {
unsigned long flags; unsigned long flags;
unsigned int cpu; unsigned int cpu;
unsigned int node;
int started;
int result; int result;
int (*threadfn)(void *); int (*threadfn)(void *);
void *data; void *data;
@ -63,6 +68,9 @@ struct kthread {
#endif #endif
/* To store the full name if task comm is truncated. */ /* To store the full name if task comm is truncated. */
char *full_name; char *full_name;
struct task_struct *task;
struct list_head hotplug_node;
struct cpumask *preferred_affinity;
}; };
enum KTHREAD_BITS { enum KTHREAD_BITS {
@ -121,8 +129,11 @@ bool set_kthread_struct(struct task_struct *p)
init_completion(&kthread->exited); init_completion(&kthread->exited);
init_completion(&kthread->parked); init_completion(&kthread->parked);
INIT_LIST_HEAD(&kthread->hotplug_node);
p->vfork_done = &kthread->exited; p->vfork_done = &kthread->exited;
kthread->task = p;
kthread->node = tsk_fork_get_node(current);
p->worker_private = kthread; p->worker_private = kthread;
return true; return true;
} }
@ -313,6 +324,16 @@ void __noreturn kthread_exit(long result)
{ {
struct kthread *kthread = to_kthread(current); struct kthread *kthread = to_kthread(current);
kthread->result = result; kthread->result = result;
if (!list_empty(&kthread->hotplug_node)) {
mutex_lock(&kthreads_hotplug_lock);
list_del(&kthread->hotplug_node);
mutex_unlock(&kthreads_hotplug_lock);
if (kthread->preferred_affinity) {
kfree(kthread->preferred_affinity);
kthread->preferred_affinity = NULL;
}
}
do_exit(0); do_exit(0);
} }
EXPORT_SYMBOL(kthread_exit); EXPORT_SYMBOL(kthread_exit);
@ -338,6 +359,56 @@ void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
} }
EXPORT_SYMBOL(kthread_complete_and_exit); EXPORT_SYMBOL(kthread_complete_and_exit);
static void kthread_fetch_affinity(struct kthread *kthread, struct cpumask *cpumask)
{
const struct cpumask *pref;
if (kthread->preferred_affinity) {
pref = kthread->preferred_affinity;
} else {
if (WARN_ON_ONCE(kthread->node == NUMA_NO_NODE))
return;
pref = cpumask_of_node(kthread->node);
}
cpumask_and(cpumask, pref, housekeeping_cpumask(HK_TYPE_KTHREAD));
if (cpumask_empty(cpumask))
cpumask_copy(cpumask, housekeeping_cpumask(HK_TYPE_KTHREAD));
}
static void kthread_affine_node(void)
{
struct kthread *kthread = to_kthread(current);
cpumask_var_t affinity;
WARN_ON_ONCE(kthread_is_per_cpu(current));
if (kthread->node == NUMA_NO_NODE) {
housekeeping_affine(current, HK_TYPE_KTHREAD);
} else {
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
WARN_ON_ONCE(1);
return;
}
mutex_lock(&kthreads_hotplug_lock);
WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
/*
* The node cpumask is racy when read from kthread() but:
* - a racing CPU going down will either fail on the subsequent
* call to set_cpus_allowed_ptr() or be migrated to housekeepers
* afterwards by the scheduler.
* - a racing CPU going up will be handled by kthreads_online_cpu()
*/
kthread_fetch_affinity(kthread, affinity);
set_cpus_allowed_ptr(current, affinity);
mutex_unlock(&kthreads_hotplug_lock);
free_cpumask_var(affinity);
}
}
static int kthread(void *_create) static int kthread(void *_create)
{ {
static const struct sched_param param = { .sched_priority = 0 }; static const struct sched_param param = { .sched_priority = 0 };
@ -368,7 +439,6 @@ static int kthread(void *_create)
* back to default in case they have been changed. * back to default in case they have been changed.
*/ */
sched_setscheduler_nocheck(current, SCHED_NORMAL, &param); sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
/* OK, tell user we're spawned, wait for stop or wakeup */ /* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE); __set_current_state(TASK_UNINTERRUPTIBLE);
@ -382,6 +452,11 @@ static int kthread(void *_create)
schedule_preempt_disabled(); schedule_preempt_disabled();
preempt_enable(); preempt_enable();
self->started = 1;
if (!(current->flags & PF_NO_SETAFFINITY) && !self->preferred_affinity)
kthread_affine_node();
ret = -EINTR; ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
cgroup_kthread_ready(); cgroup_kthread_ready();
@ -540,7 +615,9 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int
void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{ {
struct kthread *kthread = to_kthread(p);
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
WARN_ON_ONCE(kthread->started);
} }
/** /**
@ -554,7 +631,9 @@ void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
*/ */
void kthread_bind(struct task_struct *p, unsigned int cpu) void kthread_bind(struct task_struct *p, unsigned int cpu)
{ {
struct kthread *kthread = to_kthread(p);
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
WARN_ON_ONCE(kthread->started);
} }
EXPORT_SYMBOL(kthread_bind); EXPORT_SYMBOL(kthread_bind);
@ -775,6 +854,92 @@ int kthreadd(void *unused)
return 0; return 0;
} }
int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
{
struct kthread *kthread = to_kthread(p);
cpumask_var_t affinity;
unsigned long flags;
int ret;
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) {
WARN_ON(1);
return -EINVAL;
}
WARN_ON_ONCE(kthread->preferred_affinity);
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
return -ENOMEM;
kthread->preferred_affinity = kzalloc(sizeof(struct cpumask), GFP_KERNEL);
if (!kthread->preferred_affinity) {
ret = -ENOMEM;
goto out;
}
mutex_lock(&kthreads_hotplug_lock);
cpumask_copy(kthread->preferred_affinity, mask);
WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
kthread_fetch_affinity(kthread, affinity);
/* It's safe because the task is inactive. */
raw_spin_lock_irqsave(&p->pi_lock, flags);
do_set_cpus_allowed(p, affinity);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
mutex_unlock(&kthreads_hotplug_lock);
out:
free_cpumask_var(affinity);
return 0;
}
/*
* Re-affine kthreads according to their preferences
* and the newly online CPU. The CPU down part is handled
* by select_fallback_rq() which default re-affines to
* housekeepers from other nodes in case the preferred
* affinity doesn't apply anymore.
*/
static int kthreads_online_cpu(unsigned int cpu)
{
cpumask_var_t affinity;
struct kthread *k;
int ret;
guard(mutex)(&kthreads_hotplug_lock);
if (list_empty(&kthreads_hotplug))
return 0;
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
return -ENOMEM;
ret = 0;
list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
kthread_is_per_cpu(k->task))) {
ret = -EINVAL;
continue;
}
kthread_fetch_affinity(k, affinity);
set_cpus_allowed_ptr(k->task, affinity);
}
free_cpumask_var(affinity);
return ret;
}
static int kthreads_init(void)
{
return cpuhp_setup_state(CPUHP_AP_KTHREADS_ONLINE, "kthreads:online",
kthreads_online_cpu, NULL);
}
early_initcall(kthreads_init);
void __kthread_init_worker(struct kthread_worker *worker, void __kthread_init_worker(struct kthread_worker *worker,
const char *name, const char *name,
struct lock_class_key *key) struct lock_class_key *key)
@ -866,12 +1031,11 @@ int kthread_worker_fn(void *worker_ptr)
EXPORT_SYMBOL_GPL(kthread_worker_fn); EXPORT_SYMBOL_GPL(kthread_worker_fn);
static __printf(3, 0) struct kthread_worker * static __printf(3, 0) struct kthread_worker *
__kthread_create_worker(int cpu, unsigned int flags, __kthread_create_worker_on_node(unsigned int flags, int node,
const char namefmt[], va_list args) const char namefmt[], va_list args)
{ {
struct kthread_worker *worker; struct kthread_worker *worker;
struct task_struct *task; struct task_struct *task;
int node = NUMA_NO_NODE;
worker = kzalloc(sizeof(*worker), GFP_KERNEL); worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (!worker) if (!worker)
@ -879,20 +1043,14 @@ __kthread_create_worker(int cpu, unsigned int flags,
kthread_init_worker(worker); kthread_init_worker(worker);
if (cpu >= 0)
node = cpu_to_node(cpu);
task = __kthread_create_on_node(kthread_worker_fn, worker, task = __kthread_create_on_node(kthread_worker_fn, worker,
node, namefmt, args); node, namefmt, args);
if (IS_ERR(task)) if (IS_ERR(task))
goto fail_task; goto fail_task;
if (cpu >= 0)
kthread_bind(task, cpu);
worker->flags = flags; worker->flags = flags;
worker->task = task; worker->task = task;
wake_up_process(task);
return worker; return worker;
fail_task: fail_task:
@ -903,6 +1061,7 @@ __kthread_create_worker(int cpu, unsigned int flags,
/** /**
* kthread_create_worker - create a kthread worker * kthread_create_worker - create a kthread worker
* @flags: flags modifying the default behavior of the worker * @flags: flags modifying the default behavior of the worker
* @node: task structure for the thread is allocated on this node
* @namefmt: printf-style name for the kthread worker (task). * @namefmt: printf-style name for the kthread worker (task).
* *
* Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
@ -910,25 +1069,26 @@ __kthread_create_worker(int cpu, unsigned int flags,
* when the caller was killed by a fatal signal. * when the caller was killed by a fatal signal.
*/ */
struct kthread_worker * struct kthread_worker *
kthread_create_worker(unsigned int flags, const char namefmt[], ...) kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[], ...)
{ {
struct kthread_worker *worker; struct kthread_worker *worker;
va_list args; va_list args;
va_start(args, namefmt); va_start(args, namefmt);
worker = __kthread_create_worker(-1, flags, namefmt, args); worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
va_end(args); va_end(args);
return worker; return worker;
} }
EXPORT_SYMBOL(kthread_create_worker); EXPORT_SYMBOL(kthread_create_worker_on_node);
/** /**
* kthread_create_worker_on_cpu - create a kthread worker and bind it * kthread_create_worker_on_cpu - create a kthread worker and bind it
* to a given CPU and the associated NUMA node. * to a given CPU and the associated NUMA node.
* @cpu: CPU number * @cpu: CPU number
* @flags: flags modifying the default behavior of the worker * @flags: flags modifying the default behavior of the worker
* @namefmt: printf-style name for the kthread worker (task). * @namefmt: printf-style name for the thread. Format is restricted
* to "name.*%u". Code fills in cpu number.
* *
* Use a valid CPU number if you want to bind the kthread worker * Use a valid CPU number if you want to bind the kthread worker
* to the given CPU and the associated NUMA node. * to the given CPU and the associated NUMA node.
@ -960,14 +1120,13 @@ EXPORT_SYMBOL(kthread_create_worker);
*/ */
struct kthread_worker * struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags, kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[], ...) const char namefmt[])
{ {
struct kthread_worker *worker; struct kthread_worker *worker;
va_list args;
va_start(args, namefmt); worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu);
worker = __kthread_create_worker(cpu, flags, namefmt, args); if (!IS_ERR(worker))
va_end(args); kthread_bind(worker->task, cpu);
return worker; return worker;
} }

View File

@ -149,7 +149,6 @@ static int rcu_scheduler_fully_active __read_mostly;
static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
unsigned long gps, unsigned long flags); unsigned long gps, unsigned long flags);
static struct task_struct *rcu_boost_task(struct rcu_node *rnp);
static void invoke_rcu_core(void); static void invoke_rcu_core(void);
static void rcu_report_exp_rdp(struct rcu_data *rdp); static void rcu_report_exp_rdp(struct rcu_data *rdp);
static void sync_sched_exp_online_cleanup(int cpu); static void sync_sched_exp_online_cleanup(int cpu);
@ -4072,6 +4071,22 @@ rcu_boot_init_percpu_data(int cpu)
rcu_boot_init_nocb_percpu_data(rdp); rcu_boot_init_nocb_percpu_data(rdp);
} }
static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp)
{
cpumask_var_t affinity;
int cpu;
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
return;
for_each_leaf_node_possible_cpu(rnp, cpu)
cpumask_set_cpu(cpu, affinity);
kthread_affine_preferred(t, affinity);
free_cpumask_var(affinity);
}
struct kthread_worker *rcu_exp_gp_kworker; struct kthread_worker *rcu_exp_gp_kworker;
static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
@ -4094,16 +4109,9 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param); sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
}
static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp) rcu_thread_affine_rnp(kworker->task, rnp);
{ wake_up_process(kworker->task);
struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker);
if (!kworker)
return NULL;
return kworker->task;
} }
static void __init rcu_start_exp_gp_kworker(void) static void __init rcu_start_exp_gp_kworker(void)
@ -4111,7 +4119,7 @@ static void __init rcu_start_exp_gp_kworker(void)
const char *name = "rcu_exp_gp_kthread_worker"; const char *name = "rcu_exp_gp_kthread_worker";
struct sched_param param = { .sched_priority = kthread_prio }; struct sched_param param = { .sched_priority = kthread_prio };
rcu_exp_gp_kworker = kthread_create_worker(0, name); rcu_exp_gp_kworker = kthread_run_worker(0, name);
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
pr_err("Failed to create %s!\n", name); pr_err("Failed to create %s!\n", name);
rcu_exp_gp_kworker = NULL; rcu_exp_gp_kworker = NULL;
@ -4188,67 +4196,6 @@ int rcutree_prepare_cpu(unsigned int cpu)
return 0; return 0;
} }
/*
* Update kthreads affinity during CPU-hotplug changes.
*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
* held, so the value of rnp->qsmaskinit will be stable.
*
* We don't include outgoingcpu in the affinity set, use -1 if there is
* no outgoing CPU. If there are no CPUs left in the affinity set,
* this function allows the kthread to execute on any CPU.
*
* Any future concurrent calls are serialized via ->kthread_mutex.
*/
static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu)
{
cpumask_var_t cm;
unsigned long mask;
struct rcu_data *rdp;
struct rcu_node *rnp;
struct task_struct *task_boost, *task_exp;
rdp = per_cpu_ptr(&rcu_data, cpu);
rnp = rdp->mynode;
task_boost = rcu_boost_task(rnp);
task_exp = rcu_exp_par_gp_task(rnp);
/*
* If CPU is the boot one, those tasks are created later from early
* initcall since kthreadd must be created first.
*/
if (!task_boost && !task_exp)
return;
if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
return;
mutex_lock(&rnp->kthread_mutex);
mask = rcu_rnp_online_cpus(rnp);
for_each_leaf_node_possible_cpu(rnp, cpu)
if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
cpu != outgoingcpu)
cpumask_set_cpu(cpu, cm);
cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
if (cpumask_empty(cm)) {
cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
if (outgoingcpu >= 0)
cpumask_clear_cpu(outgoingcpu, cm);
}
if (task_exp)
set_cpus_allowed_ptr(task_exp, cm);
if (task_boost)
set_cpus_allowed_ptr(task_boost, cm);
mutex_unlock(&rnp->kthread_mutex);
free_cpumask_var(cm);
}
/* /*
* Has the specified (known valid) CPU ever been fully online? * Has the specified (known valid) CPU ever been fully online?
*/ */
@ -4277,7 +4224,6 @@ int rcutree_online_cpu(unsigned int cpu)
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return 0; /* Too early in boot for scheduler work. */ return 0; /* Too early in boot for scheduler work. */
sync_sched_exp_online_cleanup(cpu); sync_sched_exp_online_cleanup(cpu);
rcutree_affinity_setting(cpu, -1);
// Stop-machine done, so allow nohz_full to disable tick. // Stop-machine done, so allow nohz_full to disable tick.
tick_dep_clear(TICK_DEP_BIT_RCU); tick_dep_clear(TICK_DEP_BIT_RCU);
@ -4494,8 +4440,6 @@ int rcutree_offline_cpu(unsigned int cpu)
rnp->ffmask &= ~rdp->grpmask; rnp->ffmask &= ~rdp->grpmask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcutree_affinity_setting(cpu, cpu);
// nohz_full CPUs need the tick for stop-machine to work quickly // nohz_full CPUs need the tick for stop-machine to work quickly
tick_dep_set(TICK_DEP_BIT_RCU); tick_dep_set(TICK_DEP_BIT_RCU);
return 0; return 0;

View File

@ -1218,16 +1218,13 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->boost_kthread_task = t; rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
sp.sched_priority = kthread_prio; sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
rcu_thread_affine_rnp(t, rnp);
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
} }
static struct task_struct *rcu_boost_task(struct rcu_node *rnp)
{
return READ_ONCE(rnp->boost_kthread_task);
}
#else /* #ifdef CONFIG_RCU_BOOST */ #else /* #ifdef CONFIG_RCU_BOOST */
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
@ -1244,10 +1241,6 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
{ {
} }
static struct task_struct *rcu_boost_task(struct rcu_node *rnp)
{
return NULL;
}
#endif /* #else #ifdef CONFIG_RCU_BOOST */ #endif /* #else #ifdef CONFIG_RCU_BOOST */
/* /*

View File

@ -3536,7 +3536,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
* *
* More yuck to audit. * More yuck to audit.
*/ */
do_set_cpus_allowed(p, task_cpu_possible_mask(p)); do_set_cpus_allowed(p, task_cpu_fallback_mask(p));
state = fail; state = fail;
break; break;
case fail: case fail:

View File

@ -5410,7 +5410,7 @@ static struct kthread_worker *scx_create_rt_helper(const char *name)
{ {
struct kthread_worker *helper; struct kthread_worker *helper;
helper = kthread_create_worker(0, name); helper = kthread_run_worker(0, name);
if (helper) if (helper)
sched_set_fifo(helper->task); sched_set_fifo(helper->task);
return helper; return helper;

View File

@ -7832,7 +7832,7 @@ static void __init wq_cpu_intensive_thresh_init(void)
unsigned long thresh; unsigned long thresh;
unsigned long bogo; unsigned long bogo;
pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); pwq_release_worker = kthread_run_worker(0, "pool_workqueue_release");
BUG_ON(IS_ERR(pwq_release_worker)); BUG_ON(IS_ERR(pwq_release_worker));
/* if the user set it to a specific value, keep it */ /* if the user set it to a specific value, keep it */

View File

@ -371,14 +371,10 @@ static int ot_start_sync(struct ot_test *test)
if (!cpu_online(cpu)) if (!cpu_online(cpu))
continue; continue;
work = kthread_create_on_node(ot_thread_worker, item, work = kthread_run_on_cpu(ot_thread_worker, item,
cpu_to_node(cpu), "ot_worker_%d", cpu); cpu, "ot_worker_%d");
if (IS_ERR(work)) { if (IS_ERR(work))
pr_err("failed to create thread for cpu %d\n", cpu); pr_err("failed to create thread for cpu %d\n", cpu);
} else {
kthread_bind(work, cpu);
wake_up_process(work);
}
} }
/* wait a while to make sure all threads waiting at start line */ /* wait a while to make sure all threads waiting at start line */
@ -562,14 +558,9 @@ static int ot_start_async(struct ot_test *test)
if (!cpu_online(cpu)) if (!cpu_online(cpu))
continue; continue;
work = kthread_create_on_node(ot_thread_worker, item, work = kthread_run_on_cpu(ot_thread_worker, item, cpu, "ot_worker_%d");
cpu_to_node(cpu), "ot_worker_%d", cpu); if (IS_ERR(work))
if (IS_ERR(work)) {
pr_err("failed to create thread for cpu %d\n", cpu); pr_err("failed to create thread for cpu %d\n", cpu);
} else {
kthread_bind(work, cpu);
wake_up_process(work);
}
} }
/* wait a while to make sure all threads waiting at start line */ /* wait a while to make sure all threads waiting at start line */

View File

@ -3156,15 +3156,9 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
static int kcompactd(void *p) static int kcompactd(void *p)
{ {
pg_data_t *pgdat = (pg_data_t *)p; pg_data_t *pgdat = (pg_data_t *)p;
struct task_struct *tsk = current;
long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC);
long timeout = default_timeout; long timeout = default_timeout;
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
if (!cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask);
set_freezable(); set_freezable();
pgdat->kcompactd_max_order = 0; pgdat->kcompactd_max_order = 0;
@ -3235,10 +3229,12 @@ void __meminit kcompactd_run(int nid)
if (pgdat->kcompactd) if (pgdat->kcompactd)
return; return;
pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); pgdat->kcompactd = kthread_create_on_node(kcompactd, pgdat, nid, "kcompactd%d", nid);
if (IS_ERR(pgdat->kcompactd)) { if (IS_ERR(pgdat->kcompactd)) {
pr_err("Failed to start kcompactd on node %d\n", nid); pr_err("Failed to start kcompactd on node %d\n", nid);
pgdat->kcompactd = NULL; pgdat->kcompactd = NULL;
} else {
wake_up_process(pgdat->kcompactd);
} }
} }
@ -3256,30 +3252,6 @@ void __meminit kcompactd_stop(int nid)
} }
} }
/*
* It's optimal to keep kcompactd on the same CPUs as their memory, but
* not required for correctness. So if the last cpu in a node goes
* away, we get changed to run anywhere: as the first one comes back,
* restore their cpu bindings.
*/
static int kcompactd_cpu_online(unsigned int cpu)
{
int nid;
for_each_node_state(nid, N_MEMORY) {
pg_data_t *pgdat = NODE_DATA(nid);
const struct cpumask *mask;
mask = cpumask_of_node(pgdat->node_id);
if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
/* One of our CPUs online: restore mask */
if (pgdat->kcompactd)
set_cpus_allowed_ptr(pgdat->kcompactd, mask);
}
return 0;
}
static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table, static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table,
int write, void *buffer, size_t *lenp, loff_t *ppos) int write, void *buffer, size_t *lenp, loff_t *ppos)
{ {
@ -3339,15 +3311,6 @@ static struct ctl_table vm_compaction[] = {
static int __init kcompactd_init(void) static int __init kcompactd_init(void)
{ {
int nid; int nid;
int ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"mm/compaction:online",
kcompactd_cpu_online, NULL);
if (ret < 0) {
pr_err("kcompactd: failed to register hotplug callbacks.\n");
return ret;
}
for_each_node_state(nid, N_MEMORY) for_each_node_state(nid, N_MEMORY)
kcompactd_run(nid); kcompactd_run(nid);

View File

@ -7151,10 +7151,6 @@ static int kswapd(void *p)
unsigned int highest_zoneidx = MAX_NR_ZONES - 1; unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
pg_data_t *pgdat = (pg_data_t *)p; pg_data_t *pgdat = (pg_data_t *)p;
struct task_struct *tsk = current; struct task_struct *tsk = current;
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
if (!cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask);
/* /*
* Tell the memory management that we're a "memory allocator", * Tell the memory management that we're a "memory allocator",
@ -7323,13 +7319,15 @@ void __meminit kswapd_run(int nid)
pgdat_kswapd_lock(pgdat); pgdat_kswapd_lock(pgdat);
if (!pgdat->kswapd) { if (!pgdat->kswapd) {
pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); pgdat->kswapd = kthread_create_on_node(kswapd, pgdat, nid, "kswapd%d", nid);
if (IS_ERR(pgdat->kswapd)) { if (IS_ERR(pgdat->kswapd)) {
/* failure at boot is fatal */ /* failure at boot is fatal */
pr_err("Failed to start kswapd on node %dret=%ld\n", pr_err("Failed to start kswapd on node %dret=%ld\n",
nid, PTR_ERR(pgdat->kswapd)); nid, PTR_ERR(pgdat->kswapd));
BUG_ON(system_state < SYSTEM_RUNNING); BUG_ON(system_state < SYSTEM_RUNNING);
pgdat->kswapd = NULL; pgdat->kswapd = NULL;
} else {
wake_up_process(pgdat->kswapd);
} }
} }
pgdat_kswapd_unlock(pgdat); pgdat_kswapd_unlock(pgdat);

View File

@ -66,7 +66,7 @@ static int ksz_connect(struct dsa_switch *ds)
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index); ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) { if (IS_ERR(xmit_worker)) {
ret = PTR_ERR(xmit_worker); ret = PTR_ERR(xmit_worker);

View File

@ -110,7 +110,7 @@ static int ocelot_connect(struct dsa_switch *ds)
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
priv->xmit_worker = kthread_create_worker(0, "felix_xmit"); priv->xmit_worker = kthread_run_worker(0, "felix_xmit");
if (IS_ERR(priv->xmit_worker)) { if (IS_ERR(priv->xmit_worker)) {
err = PTR_ERR(priv->xmit_worker); err = PTR_ERR(priv->xmit_worker);
kfree(priv); kfree(priv);

View File

@ -707,7 +707,7 @@ static int sja1105_connect(struct dsa_switch *ds)
spin_lock_init(&priv->meta_lock); spin_lock_init(&priv->meta_lock);
xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index); ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) { if (IS_ERR(xmit_worker)) {
err = PTR_ERR(xmit_worker); err = PTR_ERR(xmit_worker);