Commit5c5e17fintroduced a subtle regression when userspace was configured on architectures requiring guard pages. Prior to5c5e17f, the assumption was that guard pages would be included in `CONFIG_DYNAMIC_THREAD_STACK_SIZE`, and that was something that the caller of `k_thread_stack_alloc()` would need to be aware of, although it was not documented at all, unfortunately. It seems that5c5e17fintended to remove the need for that assumption, but the necessary conditions for doing so had not been met. Update pool storage size to account for guard pages, which ensures that users can access every byte of `CONFIG_DYNAMIC_THREAD_STACK_SIZE` rather than needing to be aware that guard pages would be included in the requested size. The compromise is a more intuitive API at the cost of more storage space for the pool of thread stacks when userspace is enabled. Signed-off-by: Chris Friedt <cfriedt@tenstorrent.com>
176 lines
4.6 KiB
C
176 lines
4.6 KiB
C
/*
|
|
* Copyright (c) 2022, Meta
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include "kernel_internal.h"
|
|
|
|
#include <zephyr/kernel.h>
|
|
#include <ksched.h>
|
|
#include <zephyr/kernel/thread_stack.h>
|
|
#include <zephyr/logging/log.h>
|
|
#include <zephyr/sys/bitarray.h>
|
|
#include <zephyr/sys/kobject.h>
|
|
#include <zephyr/internal/syscall_handler.h>
|
|
|
|
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
|
|
|
#if CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0
|
|
#define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE
|
|
#else
|
|
#define BA_SIZE 1
|
|
#endif /* CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0 */
|
|
|
|
struct dyn_cb_data {
|
|
k_tid_t tid;
|
|
k_thread_stack_t *stack;
|
|
};
|
|
|
|
static K_THREAD_STACK_ARRAY_DEFINE(dynamic_stack, CONFIG_DYNAMIC_THREAD_POOL_SIZE,
|
|
K_THREAD_STACK_LEN(CONFIG_DYNAMIC_THREAD_STACK_SIZE));
|
|
SYS_BITARRAY_DEFINE_STATIC(dynamic_ba, BA_SIZE);
|
|
|
|
static k_thread_stack_t *z_thread_stack_alloc_pool(size_t size, int flags)
|
|
{
|
|
int rv;
|
|
size_t offset;
|
|
k_thread_stack_t *stack;
|
|
|
|
if (size > CONFIG_DYNAMIC_THREAD_STACK_SIZE) {
|
|
LOG_DBG("stack size %zu is > pool stack size %zu", size,
|
|
(size_t)CONFIG_DYNAMIC_THREAD_STACK_SIZE);
|
|
return NULL;
|
|
}
|
|
|
|
rv = sys_bitarray_alloc(&dynamic_ba, 1, &offset);
|
|
if (rv < 0) {
|
|
LOG_DBG("unable to allocate stack from pool");
|
|
return NULL;
|
|
}
|
|
|
|
__ASSERT_NO_MSG(offset < CONFIG_DYNAMIC_THREAD_POOL_SIZE);
|
|
|
|
stack = (k_thread_stack_t *)&dynamic_stack[offset];
|
|
|
|
return stack;
|
|
}
|
|
|
|
static k_thread_stack_t *z_thread_stack_alloc_dyn(size_t size, int flags)
|
|
{
|
|
if ((flags & K_USER) == K_USER) {
|
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
|
return k_object_alloc_size(K_OBJ_THREAD_STACK_ELEMENT, size);
|
|
#else
|
|
/* Dynamic user stack needs a kobject, so if this option is not
|
|
* enabled we can't proceed.
|
|
*/
|
|
return NULL;
|
|
#endif /* CONFIG_DYNAMIC_OBJECTS */
|
|
}
|
|
|
|
return z_thread_aligned_alloc(Z_KERNEL_STACK_OBJ_ALIGN, K_KERNEL_STACK_LEN(size));
|
|
}
|
|
|
|
k_thread_stack_t *z_impl_k_thread_stack_alloc(size_t size, int flags)
|
|
{
|
|
k_thread_stack_t *stack = NULL;
|
|
|
|
if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_ALLOC)) {
|
|
stack = z_thread_stack_alloc_dyn(size, flags);
|
|
if (stack == NULL && CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
|
|
stack = z_thread_stack_alloc_pool(size, flags);
|
|
}
|
|
} else if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_PREFER_POOL)) {
|
|
if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
|
|
stack = z_thread_stack_alloc_pool(size, flags);
|
|
}
|
|
|
|
if ((stack == NULL) && IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
|
|
stack = z_thread_stack_alloc_dyn(size, flags);
|
|
}
|
|
}
|
|
|
|
return stack;
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
static inline k_thread_stack_t *z_vrfy_k_thread_stack_alloc(size_t size, int flags)
|
|
{
|
|
return z_impl_k_thread_stack_alloc(size, flags);
|
|
}
|
|
#include <zephyr/syscalls/k_thread_stack_alloc_mrsh.c>
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
static void dyn_cb(const struct k_thread *thread, void *user_data)
|
|
{
|
|
struct dyn_cb_data *const data = (struct dyn_cb_data *)user_data;
|
|
|
|
if (data->stack == (k_thread_stack_t *)thread->stack_info.start) {
|
|
__ASSERT(data->tid == NULL, "stack %p is associated with more than one thread!",
|
|
(void *)thread->stack_info.start);
|
|
data->tid = (k_tid_t)thread;
|
|
}
|
|
}
|
|
|
|
int z_impl_k_thread_stack_free(k_thread_stack_t *stack)
|
|
{
|
|
struct dyn_cb_data data = {.stack = stack};
|
|
|
|
/* Get a possible tid associated with stack */
|
|
k_thread_foreach(dyn_cb, &data);
|
|
|
|
if (data.tid != NULL) {
|
|
if (!(z_is_thread_state_set(data.tid, _THREAD_DUMMY) ||
|
|
z_is_thread_state_set(data.tid, _THREAD_DEAD))) {
|
|
LOG_ERR("tid %p is in use!", data.tid);
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
|
|
if (CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0) {
|
|
if (IS_ARRAY_ELEMENT(dynamic_stack, stack)) {
|
|
if (sys_bitarray_free(&dynamic_ba, 1, ARRAY_INDEX(dynamic_stack, stack))) {
|
|
LOG_ERR("stack %p is not allocated!", stack);
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
|
|
#ifdef CONFIG_USERSPACE
|
|
if (k_object_find(stack)) {
|
|
k_object_free(stack);
|
|
} else {
|
|
k_free(stack);
|
|
}
|
|
#else
|
|
k_free(stack);
|
|
#endif /* CONFIG_USERSPACE */
|
|
} else {
|
|
LOG_DBG("Invalid stack %p", stack);
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack)
|
|
{
|
|
/* The thread stack object must not be in initialized state.
|
|
*
|
|
* Thread stack objects are initialized when the thread is created
|
|
* and de-initialized when the thread is destroyed. Since we can't
|
|
* free a stack that is in use, we have to check that the caller
|
|
* has access to the object but that it is not in use anymore.
|
|
*/
|
|
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_THREAD_STACK_ELEMENT));
|
|
|
|
return z_impl_k_thread_stack_free(stack);
|
|
}
|
|
#include <zephyr/syscalls/k_thread_stack_free_mrsh.c>
|
|
#endif /* CONFIG_USERSPACE */
|