aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-05-03 08:05:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-05-03 08:05:23 -0700
commitc3cba36b394ba48b7920dcb99c46cee8ee59a116 (patch)
tree57762c5df25b0c4d4e4d8dc041f3c604609f7f41 /kernel/sched
parent66edb901bf874d9e0787326ba12d3548b2da8700 (diff)
parent3da56dc063cd77b9c0b40add930767fab4e389f3 (diff)
Merge tag 'sched-urgent-2026-05-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: - Fix the delayed dequeue negative lag increase fix in the fair scheduler (Peter Zijlstra) - Fix wakeup_preempt_fair() to do proper delayed dequeue (Vincent Guittot) - Clear sched_entity::rel_deadline when initializing forked entities, which bug can cause all tasks to be EEVDF-ineligible, causing a NULL pointer dereference crash in pick_next_entity() (Zicheng Qu) * tag 'sched-urgent-2026-05-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Clear rel_deadline when initializing forked entities sched/fair: Fix wakeup_preempt_fair() vs delayed dequeue sched/fair: Fix the negative lag increase fix
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c42
2 files changed, 25 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index da20fb6ea25a..b8871449d3c6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4458,6 +4458,7 @@ static void __sched_fork(u64 clone_flags, struct task_struct *p)
p->se.nr_migrations = 0;
p->se.vruntime = 0;
p->se.vlag = 0;
+ p->se.rel_deadline = 0;
INIT_LIST_HEAD(&p->se.group_node);
/* A delayed task cannot be in clone(). */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 69361c63353a..728965851842 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -847,13 +847,19 @@ static s64 entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 avrunt
* Similarly, check that the entity didn't gain positive lag when DELAY_ZERO
* is set.
*
- * Return true if the lag has been adjusted.
+ * Return true if the vlag has been modified. Specifically:
+ *
+ * se->vlag != avg_vruntime() - se->vruntime
+ *
+ * This can be due to clamping in entity_lag() or clamping due to
+ * sched_delayed. Either way, when vlag is modified and the entity is
+ * retained, the tree needs to be adjusted.
*/
static __always_inline
bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- s64 vlag = entity_lag(cfs_rq, se, avg_vruntime(cfs_rq));
- bool ret;
+ u64 avruntime = avg_vruntime(cfs_rq);
+ s64 vlag = entity_lag(cfs_rq, se, avruntime);
WARN_ON_ONCE(!se->on_rq);
@@ -863,10 +869,9 @@ bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (sched_feat(DELAY_ZERO))
vlag = min(vlag, 0);
}
- ret = (vlag == se->vlag);
se->vlag = vlag;
- return ret;
+ return avruntime - vlag != se->vruntime;
}
/*
@@ -1099,7 +1104,7 @@ static inline void cancel_protect_slice(struct sched_entity *se)
*
* Which allows tree pruning through eligibility.
*/
-static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq, bool protect)
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq, bool protect)
{
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
struct sched_entity *se = __pick_first_entity(cfs_rq);
@@ -1170,11 +1175,6 @@ found:
return best;
}
-static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
-{
- return __pick_eevdf(cfs_rq, true);
-}
-
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
@@ -5749,11 +5749,11 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
* 4) do not run the "skip" process, if something else is available
*/
static struct sched_entity *
-pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
+pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq, bool protect)
{
struct sched_entity *se;
- se = pick_eevdf(cfs_rq);
+ se = pick_eevdf(cfs_rq, protect);
if (se->sched_delayed) {
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
/*
@@ -9027,7 +9027,7 @@ static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_f
{
enum preempt_wakeup_action preempt_action = PREEMPT_WAKEUP_PICK;
struct task_struct *donor = rq->donor;
- struct sched_entity *se = &donor->se, *pse = &p->se;
+ struct sched_entity *nse, *se = &donor->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(donor);
int cse_is_idle, pse_is_idle;
@@ -9138,11 +9138,17 @@ static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_f
}
pick:
+ nse = pick_next_entity(rq, cfs_rq, preempt_action != PREEMPT_WAKEUP_SHORT);
+ /* If @p has become the most eligible task, force preemption */
+ if (nse == pse)
+ goto preempt;
+
/*
- * If @p has become the most eligible task, force preemption.
+ * Because p is enqueued, nse being null can only mean that we
+ * dequeued a delayed task.
*/
- if (__pick_eevdf(cfs_rq, preempt_action != PREEMPT_WAKEUP_SHORT) == pse)
- goto preempt;
+ if (!nse)
+ goto pick;
if (sched_feat(RUN_TO_PARITY))
update_protect_slice(cfs_rq, se);
@@ -9179,7 +9185,7 @@ again:
throttled |= check_cfs_rq_runtime(cfs_rq);
- se = pick_next_entity(rq, cfs_rq);
+ se = pick_next_entity(rq, cfs_rq, true);
if (!se)
goto again;
cfs_rq = group_cfs_rq(se);