Files
zephyr/kernel/timeslicing.c
Peter Mitsis 3affd0385e kernel: Fix race condition in z_time_slice()
Instead of directly calling the current thread-specific time slice
handler in z_time_slice(), we must call a saved copy of the handler
that was made when _sched_spinlock was still held. Otherwise there
is a small window of time where another CPU could change the handler
to NULL just before we call it.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
2026-01-03 10:18:53 +01:00

162 lines
3.9 KiB
C

/*
* Copyright (c) 2018, 2024 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <kswap.h>
#include <ksched.h>
#include <ipi.h>
static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms);
static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY;
static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS];
static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
#ifdef CONFIG_SWAP_NONATOMIC
/* If z_swap() isn't atomic, then it's possible for a timer interrupt
* to try to timeslice away _current after it has already pended
* itself but before the corresponding context switch. Treat that as
* a noop condition in z_time_slice().
*/
struct k_thread *pending_current;
#endif
static inline int slice_time(struct k_thread *thread)
{
int ret = slice_ticks;
#ifdef CONFIG_TIMESLICE_PER_THREAD
if (thread->base.slice_ticks != 0) {
ret = thread->base.slice_ticks;
}
#else
ARG_UNUSED(thread);
#endif
return ret;
}
static int z_time_slice_size(struct k_thread *thread)
{
if (z_is_thread_prevented_from_running(thread) ||
z_is_idle_thread_object(thread) ||
(slice_time(thread) == 0)) {
return 0;
}
#ifdef CONFIG_TIMESLICE_PER_THREAD
if (thread->base.slice_ticks != 0) {
return thread->base.slice_ticks;
}
#endif
if (thread_is_preemptible(thread) &&
!z_is_prio_higher(thread->base.prio, slice_max_prio)) {
return slice_ticks;
}
return 0;
}
static void slice_timeout(struct _timeout *timeout)
{
int cpu = ARRAY_INDEX(slice_timeouts, timeout);
slice_expired[cpu] = true;
/* We need an IPI if we just handled a timeslice expiration
* for a different CPU.
*/
if (cpu != _current_cpu->id) {
flag_ipi(IPI_CPU_MASK(cpu));
}
}
void z_reset_time_slice(struct k_thread *thread)
{
int cpu = _current_cpu->id;
int slice_size = z_time_slice_size(thread);
z_abort_timeout(&slice_timeouts[cpu]);
slice_expired[cpu] = false;
if (slice_size != 0) {
z_add_timeout(&slice_timeouts[cpu], slice_timeout,
K_TICKS(slice_size - 1));
}
}
static ALWAYS_INLINE bool thread_defines_time_slice_size(struct k_thread *thread)
{
#ifdef CONFIG_TIMESLICE_PER_THREAD
return (thread->base.slice_ticks != 0);
#else /* !CONFIG_TIMESLICE_PER_THREAD */
return false;
#endif /* !CONFIG_TIMESLICE_PER_THREAD */
}
void k_sched_time_slice_set(int32_t slice, int prio)
{
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
slice_ticks = k_ms_to_ticks_ceil32(slice);
slice_max_prio = prio;
/*
* Threads that define their own time slice size should not have
* their time slices reset here as a thread-specific time slice size
* take precedence over the global time slice size.
*/
if (!thread_defines_time_slice_size(_current)) {
z_reset_time_slice(_current);
}
k_spin_unlock(&_sched_spinlock, key);
}
#ifdef CONFIG_TIMESLICE_PER_THREAD
void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks,
k_thread_timeslice_fn_t expired, void *data)
{
K_SPINLOCK(&_sched_spinlock) {
thread->base.slice_ticks = thread_slice_ticks;
thread->base.slice_expired = expired;
thread->base.slice_data = data;
z_reset_time_slice(thread);
}
}
#endif
/* Called out of each timer and IPI interrupt */
void z_time_slice(void)
{
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
struct k_thread *curr = _current;
#ifdef CONFIG_SWAP_NONATOMIC
if (pending_current == curr) {
z_reset_time_slice(curr);
k_spin_unlock(&_sched_spinlock, key);
return;
}
pending_current = NULL;
#endif
if (slice_expired[_current_cpu->id] && (z_time_slice_size(curr) != 0)) {
#ifdef CONFIG_TIMESLICE_PER_THREAD
k_thread_timeslice_fn_t handler = curr->base.slice_expired;
if (handler != NULL) {
k_spin_unlock(&_sched_spinlock, key);
handler(curr, curr->base.slice_data);
key = k_spin_lock(&_sched_spinlock);
}
#endif
if (!z_is_thread_prevented_from_running(curr)) {
move_current_to_end_of_prio_q();
}
z_reset_time_slice(curr);
}
k_spin_unlock(&_sched_spinlock, key);
}