sched: Move sched_class::prio_changed() into the change pattern
Move sched_class::prio_changed() into the change pattern. And while there, extend it with sched_class::get_prio() in order to fix the deadline sitation. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Juri Lelli <juri.lelli@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
This commit is contained in:
@@ -2169,12 +2169,6 @@ inline int task_curr(const struct task_struct *p)
|
||||
return cpu_curr(task_cpu(p)) == p;
|
||||
}
|
||||
|
||||
void check_prio_changed(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
{
|
||||
if (oldprio != p->prio || dl_task(p))
|
||||
p->sched_class->prio_changed(rq, p, oldprio);
|
||||
}
|
||||
|
||||
void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
struct task_struct *donor = rq->donor;
|
||||
@@ -7400,9 +7394,6 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
|
||||
p->sched_class = next_class;
|
||||
p->prio = prio;
|
||||
}
|
||||
|
||||
if (!(queue_flag & DEQUEUE_CLASS))
|
||||
check_prio_changed(rq, p, oldprio);
|
||||
out_unlock:
|
||||
/* Avoid rq from going away on us: */
|
||||
preempt_disable();
|
||||
@@ -10855,6 +10846,13 @@ struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int
|
||||
.running = task_current_donor(rq, p),
|
||||
};
|
||||
|
||||
if (!(flags & DEQUEUE_CLASS)) {
|
||||
if (p->sched_class->get_prio)
|
||||
ctx->prio = p->sched_class->get_prio(rq, p);
|
||||
else
|
||||
ctx->prio = p->prio;
|
||||
}
|
||||
|
||||
if (ctx->queued)
|
||||
dequeue_task(rq, p, flags);
|
||||
if (ctx->running)
|
||||
@@ -10881,6 +10879,10 @@ void sched_change_end(struct sched_change_ctx *ctx)
|
||||
if (ctx->running)
|
||||
set_next_task(rq, p);
|
||||
|
||||
if ((ctx->flags & ENQUEUE_CLASS) && p->sched_class->switched_to)
|
||||
p->sched_class->switched_to(rq, p);
|
||||
if (ctx->flags & ENQUEUE_CLASS) {
|
||||
if (p->sched_class->switched_to)
|
||||
p->sched_class->switched_to(rq, p);
|
||||
} else {
|
||||
p->sched_class->prio_changed(rq, p, ctx->prio);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3042,23 +3042,24 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 get_prio_dl(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
return p->dl.deadline;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the scheduling parameters of a -deadline task changed,
|
||||
* a push or pull operation might be needed.
|
||||
*/
|
||||
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
|
||||
int oldprio)
|
||||
static void prio_changed_dl(struct rq *rq, struct task_struct *p, u64 old_deadline)
|
||||
{
|
||||
if (!task_on_rq_queued(p))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This might be too much, but unfortunately
|
||||
* we don't have the old deadline value, and
|
||||
* we can't argue if the task is increasing
|
||||
* or lowering its prio, so...
|
||||
*/
|
||||
if (!rq->dl.overloaded)
|
||||
if (p->dl.deadline == old_deadline)
|
||||
return;
|
||||
|
||||
if (dl_time_before(old_deadline, p->dl.deadline))
|
||||
deadline_queue_pull_task(rq);
|
||||
|
||||
if (task_current_donor(rq, p)) {
|
||||
@@ -3113,6 +3114,7 @@ DEFINE_SCHED_CLASS(dl) = {
|
||||
.task_tick = task_tick_dl,
|
||||
.task_fork = task_fork_dl,
|
||||
|
||||
.get_prio = get_prio_dl,
|
||||
.prio_changed = prio_changed_dl,
|
||||
.switched_from = switched_from_dl,
|
||||
.switched_to = switched_to_dl,
|
||||
|
||||
@@ -2961,7 +2961,7 @@ static void reweight_task_scx(struct rq *rq, struct task_struct *p,
|
||||
p, p->scx.weight);
|
||||
}
|
||||
|
||||
static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -3926,9 +3926,6 @@ static void scx_disable_workfn(struct kthread_work *work)
|
||||
p->sched_class = new_class;
|
||||
}
|
||||
|
||||
if (!(queue_flags & DEQUEUE_CLASS))
|
||||
check_prio_changed(task_rq(p), p, p->prio);
|
||||
|
||||
scx_exit_task(p);
|
||||
}
|
||||
scx_task_iter_stop(&sti);
|
||||
@@ -4675,9 +4672,6 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
||||
p->sched_class = new_class;
|
||||
}
|
||||
|
||||
if (!(queue_flags & DEQUEUE_CLASS))
|
||||
check_prio_changed(task_rq(p), p, p->prio);
|
||||
|
||||
put_task_struct(p);
|
||||
}
|
||||
scx_task_iter_stop(&sti);
|
||||
|
||||
@@ -13150,11 +13150,14 @@ static void task_fork_fair(struct task_struct *p)
|
||||
* the current task.
|
||||
*/
|
||||
static void
|
||||
prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
prio_changed_fair(struct rq *rq, struct task_struct *p, u64 oldprio)
|
||||
{
|
||||
if (!task_on_rq_queued(p))
|
||||
return;
|
||||
|
||||
if (p->prio == oldprio)
|
||||
return;
|
||||
|
||||
if (rq->cfs.nr_queued == 1)
|
||||
return;
|
||||
|
||||
@@ -13166,8 +13169,9 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
if (task_current_donor(rq, p)) {
|
||||
if (p->prio > oldprio)
|
||||
resched_curr(rq);
|
||||
} else
|
||||
} else {
|
||||
wakeup_preempt(rq, p, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
||||
@@ -504,8 +504,11 @@ static void switching_to_idle(struct rq *rq, struct task_struct *p)
|
||||
}
|
||||
|
||||
static void
|
||||
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
prio_changed_idle(struct rq *rq, struct task_struct *p, u64 oldprio)
|
||||
{
|
||||
if (p->prio == oldprio)
|
||||
return;
|
||||
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
||||
@@ -2437,11 +2437,14 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
||||
* us to initiate a push or pull.
|
||||
*/
|
||||
static void
|
||||
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
prio_changed_rt(struct rq *rq, struct task_struct *p, u64 oldprio)
|
||||
{
|
||||
if (!task_on_rq_queued(p))
|
||||
return;
|
||||
|
||||
if (p->prio == oldprio)
|
||||
return;
|
||||
|
||||
if (task_current_donor(rq, p)) {
|
||||
/*
|
||||
* If our priority decreases while running, we
|
||||
|
||||
@@ -2451,8 +2451,10 @@ struct sched_class {
|
||||
|
||||
void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
|
||||
const struct load_weight *lw);
|
||||
|
||||
u64 (*get_prio) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
|
||||
int oldprio);
|
||||
u64 oldprio);
|
||||
|
||||
unsigned int (*get_rr_interval)(struct rq *rq,
|
||||
struct task_struct *task);
|
||||
@@ -3877,8 +3879,6 @@ extern void set_load_weight(struct task_struct *p, bool update_load);
|
||||
extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
|
||||
extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
|
||||
|
||||
extern void check_prio_changed(struct rq *rq, struct task_struct *p, int oldprio);
|
||||
|
||||
extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
|
||||
extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
|
||||
|
||||
@@ -3899,6 +3899,7 @@ extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
|
||||
* the task's queueing state is idempotent across the operation.
|
||||
*/
|
||||
struct sched_change_ctx {
|
||||
u64 prio;
|
||||
struct task_struct *p;
|
||||
int flags;
|
||||
bool queued;
|
||||
|
||||
@@ -81,8 +81,11 @@ static void switching_to_stop(struct rq *rq, struct task_struct *p)
|
||||
}
|
||||
|
||||
static void
|
||||
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
prio_changed_stop(struct rq *rq, struct task_struct *p, u64 oldprio)
|
||||
{
|
||||
if (p->prio == oldprio)
|
||||
return;
|
||||
|
||||
BUG(); /* how!?, what priority? */
|
||||
}
|
||||
|
||||
|
||||
@@ -95,12 +95,6 @@ void set_user_nice(struct task_struct *p, long nice)
|
||||
old_prio = p->prio;
|
||||
p->prio = effective_prio(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the task increased its priority or is running and
|
||||
* lowered its priority, then reschedule its CPU:
|
||||
*/
|
||||
p->sched_class->prio_changed(rq, p, old_prio);
|
||||
}
|
||||
EXPORT_SYMBOL(set_user_nice);
|
||||
|
||||
@@ -706,9 +700,6 @@ change:
|
||||
}
|
||||
}
|
||||
|
||||
if (!(queue_flags & DEQUEUE_CLASS))
|
||||
check_prio_changed(rq, p, oldprio);
|
||||
|
||||
/* Avoid rq from going away on us: */
|
||||
preempt_disable();
|
||||
head = splice_balance_callbacks(rq);
|
||||
|
||||
Reference in New Issue
Block a user