aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2026-04-27 14:16:35 -1000
committerTejun Heo <tj@kernel.org>2026-05-04 09:06:03 -1000
commitff9eda4ea906b1f02fc260ddc42d2d9bd736a49c (patch)
treee3dbebcc3fd61a806e97ae2cecf15e81e6291a5f /kernel/sched
parent60f21a2649308bbd84919ba6656d5ccd660953cf (diff)
sched_ext: Skip past-sched_ext_dead() tasks in scx_task_iter_next_locked()
scx_task_iter's cgroup-scoped mode can return tasks whose sched_ext_dead() has already completed: cgroup_task_dead() removes from cset->tasks after sched_ext_dead() in finish_task_switch() and is irq-work deferred on PREEMPT_RT. The global mode is fine - sched_ext_dead() removes from scx_tasks via list_del_init() first. Callers (sub-sched enable prep/abort/apply, scx_sub_disable(), scx_fail_parent()) assume returned tasks are still on @sch and trip WARN_ON_ONCE() or operate on torn-down state otherwise. Set %SCX_TASK_OFF_TASKS in sched_ext_dead() under @p's rq lock and have scx_task_iter_next_locked() skip flagged tasks under the same lock. Setter and reader serialize on the per-task rq lock - no race. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/ext.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index dc5d4787296b..3f0d8aeaed81 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -928,16 +928,27 @@ static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
*
* Test for idle_sched_class as only init_tasks are on it.
*/
- if (p->sched_class != &idle_sched_class)
- break;
- }
- if (!p)
- return NULL;
+ if (p->sched_class == &idle_sched_class)
+ continue;
- iter->rq = task_rq_lock(p, &iter->rf);
- iter->locked_task = p;
+ iter->rq = task_rq_lock(p, &iter->rf);
+ iter->locked_task = p;
- return p;
+ /*
+ * cgroup_task_dead() removes the dead tasks from cset->tasks
+ * after sched_ext_dead() and cgroup iteration may see tasks
+ * which already finished sched_ext_dead(). %SCX_TASK_OFF_TASKS
+ * is set by sched_ext_dead() under @p's rq lock. Test it to
+ * avoid visiting tasks which are already dead from SCX POV.
+ */
+ if (p->scx.flags & SCX_TASK_OFF_TASKS) {
+ __scx_task_iter_rq_unlock(iter);
+ continue;
+ }
+
+ return p;
+ }
+ return NULL;
}
/**
@@ -3850,6 +3861,11 @@ void sched_ext_dead(struct task_struct *p)
/*
* @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
* ENABLED transitions can't race us. Disable ops for @p.
+ *
+ * %SCX_TASK_OFF_TASKS synchronizes against cgroup task iteration - see
+ * scx_task_iter_next_locked(). NONE tasks need no marking: cgroup
+ * iteration is only used from sub-sched paths, which require root
+ * enabled. Root enable transitions every live task to at least READY.
*/
if (scx_get_task_state(p) != SCX_TASK_NONE) {
struct rq_flags rf;
@@ -3857,6 +3873,7 @@ void sched_ext_dead(struct task_struct *p)
rq = task_rq_lock(p, &rf);
scx_disable_and_exit_task(scx_task_sched(p), p);
+ p->scx.flags |= SCX_TASK_OFF_TASKS;
task_rq_unlock(rq, p, &rf);
}
}