sched_ext: Make find_dsq_for_dispatch() handle SCX_DSQ_LOCAL_ON

find_dsq_for_dispatch() handles all DSQ IDs except SCX_DSQ_LOCAL_ON.
Instead, each caller is hanlding SCX_DSQ_LOCAL_ON before calling it. Move
SCX_DSQ_LOCAL_ON lookup into find_dsq_for_dispatch() to remove duplicate
code in direct_dispatch() and dispatch_to_local_dsq().

No functional changes intended.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David Vernet <void@manifault.com>
This commit is contained in:
Tejun Heo 2024-09-09 13:42:47 -10:00
parent 4d3ca89bdd
commit e683949a4b

View File

@ -1804,6 +1804,15 @@ static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
if (dsq_id == SCX_DSQ_LOCAL)
return &rq->scx.local_dsq;
if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
return &scx_dsq_global;
return &cpu_rq(cpu)->scx.local_dsq;
}
dsq = find_non_local_dsq(dsq_id);
if (unlikely(!dsq)) {
scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
@ -1847,8 +1856,8 @@ static void mark_direct_dispatch(struct task_struct *ddsp_task,
static void direct_dispatch(struct task_struct *p, u64 enq_flags)
{
struct rq *rq = task_rq(p);
struct scx_dispatch_q *dsq;
u64 dsq_id = p->scx.ddsp_dsq_id;
struct scx_dispatch_q *dsq =
find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
touch_core_sched_dispatch(rq, p);
@ -1860,15 +1869,9 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags)
* DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
* the enqueue so that it's executed when @rq can be unlocked.
*/
if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
unsigned long opss;
if (cpu == cpu_of(rq)) {
dsq_id = SCX_DSQ_LOCAL;
goto dispatch;
}
opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
switch (opss & SCX_OPSS_STATE_MASK) {
@ -1895,8 +1898,6 @@ static void direct_dispatch(struct task_struct *p, u64 enq_flags)
return;
}
dispatch:
dsq = find_dsq_for_dispatch(rq, dsq_id, p);
dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
}
@ -2372,51 +2373,38 @@ static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
enum dispatch_to_local_dsq_ret {
DTL_DISPATCHED, /* successfully dispatched */
DTL_LOST, /* lost race to dequeue */
DTL_NOT_LOCAL, /* destination is not a local DSQ */
DTL_INVALID, /* invalid local dsq_id */
};
/**
* dispatch_to_local_dsq - Dispatch a task to a local dsq
* @rq: current rq which is locked
* @dsq_id: destination dsq ID
* @dst_dsq: destination DSQ
* @p: task to dispatch
* @enq_flags: %SCX_ENQ_*
*
* We're holding @rq lock and want to dispatch @p to the local DSQ identified by
* @dsq_id. This function performs all the synchronization dancing needed
* because local DSQs are protected with rq locks.
* We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
* DSQ. This function performs all the synchronization dancing needed because
* local DSQs are protected with rq locks.
*
* The caller must have exclusive ownership of @p (e.g. through
* %SCX_OPSS_DISPATCHING).
*/
static enum dispatch_to_local_dsq_ret
dispatch_to_local_dsq(struct rq *rq, u64 dsq_id, struct task_struct *p,
u64 enq_flags)
dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
struct task_struct *p, u64 enq_flags)
{
struct rq *src_rq = task_rq(p);
struct rq *dst_rq;
struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
/*
* We're synchronized against dequeue through DISPATCHING. As @p can't
* be dequeued, its task_rq and cpus_allowed are stable too.
*
* If dispatching to @rq that @p is already on, no lock dancing needed.
*/
if (dsq_id == SCX_DSQ_LOCAL) {
dst_rq = rq;
} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
return DTL_INVALID;
dst_rq = cpu_rq(cpu);
} else {
return DTL_NOT_LOCAL;
}
/* if dispatching to @rq that @p is already on, no lock dancing needed */
if (rq == src_rq && rq == dst_rq) {
dispatch_enqueue(&dst_rq->scx.local_dsq, p,
enq_flags | SCX_ENQ_CLEAR_OPSS);
dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
return DTL_DISPATCHED;
}
@ -2558,19 +2546,21 @@ static void finish_dispatch(struct rq *rq, struct task_struct *p,
BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
switch (dispatch_to_local_dsq(rq, dsq_id, p, enq_flags)) {
case DTL_DISPATCHED:
break;
case DTL_LOST:
break;
case DTL_INVALID:
dsq_id = SCX_DSQ_GLOBAL;
fallthrough;
case DTL_NOT_LOCAL:
dsq = find_dsq_for_dispatch(cpu_rq(raw_smp_processor_id()),
dsq_id, p);
dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
if (dsq->id == SCX_DSQ_LOCAL) {
switch (dispatch_to_local_dsq(rq, dsq, p, enq_flags)) {
case DTL_DISPATCHED:
break;
case DTL_LOST:
break;
case DTL_INVALID:
dispatch_enqueue(&scx_dsq_global, p,
enq_flags | SCX_ENQ_CLEAR_OPSS);
break;
}
} else {
dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
break;
}
}
@ -2747,13 +2737,13 @@ static void process_ddsp_deferred_locals(struct rq *rq)
*/
while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
struct task_struct, scx.dsq_list.node))) {
s32 ret;
struct scx_dispatch_q *dsq;
list_del_init(&p->scx.dsq_list.node);
ret = dispatch_to_local_dsq(rq, p->scx.ddsp_dsq_id, p,
p->scx.ddsp_enq_flags);
WARN_ON_ONCE(ret == DTL_NOT_LOCAL);
dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
}
}