Compare commits
86 Commits
main
...
v4.1-branc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7081d7c5f1 | ||
|
|
0e0546b8b8 | ||
|
|
06485803da | ||
|
|
84c1843762 | ||
|
|
9c83a8ae00 | ||
|
|
e77ee0f3da | ||
|
|
8c898e367d | ||
|
|
01b3d0576d | ||
|
|
cb51da0c3c | ||
|
|
1753daff1e | ||
|
|
210ed90aa1 | ||
|
|
59a57e3e76 | ||
|
|
28164d395d | ||
|
|
e7fd4b21ce | ||
|
|
9bacf34791 | ||
|
|
fd07cc4289 | ||
|
|
4a481aa974 | ||
|
|
b1d1726a4b | ||
|
|
03db3db2de | ||
|
|
dc88506080 | ||
|
|
eae415d60b | ||
|
|
a3be71a3e7 | ||
|
|
e81d10f204 | ||
|
|
7074e6bc6b | ||
|
|
fe49371b1c | ||
|
|
7b385fae89 | ||
|
|
0bf79e6e65 | ||
|
|
42901f800f | ||
|
|
84e8c9fb1a | ||
|
|
f82ade9fac | ||
|
|
2c0ff49c52 | ||
|
|
68393e009f | ||
|
|
e3c11592f8 | ||
|
|
ed707e3db6 | ||
|
|
5763ee96fd | ||
|
|
2f3c4c0802 | ||
|
|
31e35bf3c8 | ||
|
|
7e90d36cfc | ||
|
|
599716344f | ||
|
|
da17d69204 | ||
|
|
89afad4846 | ||
|
|
e57a404d3a | ||
|
|
d973e58be8 | ||
|
|
b897ed3806 | ||
|
|
7eedc86d7a | ||
|
|
2ce4a2f1c0 | ||
|
|
34b526826a | ||
|
|
53a3b66521 | ||
|
|
041035b12a | ||
|
|
dfc02bd117 | ||
|
|
2793658989 | ||
|
|
60de14b5e0 | ||
|
|
a6bf101dae | ||
|
|
e0f92e379a | ||
|
|
4189364849 | ||
|
|
ba7efd996b | ||
|
|
c9674ad58b | ||
|
|
ee27fbf4f6 | ||
|
|
92fc041155 | ||
|
|
74cd0a9d18 | ||
|
|
8da58ff55e | ||
|
|
54ac4ee622 | ||
|
|
f24adc8859 | ||
|
|
73166bbe19 | ||
|
|
11bda5e770 | ||
|
|
433f65fdbc | ||
|
|
020db5a223 | ||
|
|
272f233989 | ||
|
|
bcabc32533 | ||
|
|
bbd4d9e6ca | ||
|
|
8ca5b2f20f | ||
|
|
50dce44a92 | ||
|
|
f6a55c9612 | ||
|
|
5601c18bee | ||
|
|
0b4672f16b | ||
|
|
e1e58dafe3 | ||
|
|
4f9fe4c4d4 | ||
|
|
b57d4cc12a | ||
|
|
537361e7e1 | ||
|
|
f38ff34326 | ||
|
|
09e6786002 | ||
|
|
ddf160a141 | ||
|
|
3b192aab79 | ||
|
|
92dac91276 | ||
|
|
399f177fd6 | ||
|
|
0ac2d654b6 |
121
.github/workflows/doc-build.yml
vendored
121
.github/workflows/doc-build.yml
vendored
@@ -12,13 +12,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
# NOTE: west docstrings will be extracted from the version listed here
|
||||
WEST_VERSION: 1.2.0
|
||||
# The latest CMake available directly with apt is 3.18, but we need >=3.20
|
||||
# so we fetch that through pip.
|
||||
CMAKE_VERSION: 3.20.5
|
||||
DOXYGEN_VERSION: 1.12.0
|
||||
JOB_COUNT: 4
|
||||
JOB_COUNT: 8
|
||||
|
||||
jobs:
|
||||
doc-file-check:
|
||||
@@ -59,62 +53,77 @@ jobs:
|
||||
name: "Documentation Build (HTML)"
|
||||
needs: [doc-file-check]
|
||||
if: >
|
||||
github.repository_owner == 'zephyrproject-rtos' &&
|
||||
( needs.doc-file-check.outputs.file_check == 'true' || github.event_name != 'pull_request' )
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 90
|
||||
needs.doc-file-check.outputs.file_check == 'true' || github.event_name != 'pull_request'
|
||||
runs-on:
|
||||
group: zephyr-runner-v2-linux-x64-4xlarge
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.28.6.20251003
|
||||
options: '--entrypoint /bin/bash'
|
||||
timeout-minutes: 20
|
||||
concurrency:
|
||||
group: doc-build-html-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
env:
|
||||
BASE_REF: ${{ github.base_ref }}
|
||||
|
||||
|
||||
steps:
|
||||
- name: install-pkgs
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y wget python3-pip git ninja-build graphviz lcov
|
||||
wget --no-verbose "https://github.com/doxygen/doxygen/releases/download/Release_${DOXYGEN_VERSION//./_}/doxygen-${DOXYGEN_VERSION}.linux.bin.tar.gz"
|
||||
sudo tar xf doxygen-${DOXYGEN_VERSION}.linux.bin.tar.gz -C /opt
|
||||
echo "/opt/doxygen-${DOXYGEN_VERSION}/bin" >> $GITHUB_PATH
|
||||
echo "${HOME}/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Print cloud service information
|
||||
run: |
|
||||
echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}"
|
||||
echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}"
|
||||
echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}"
|
||||
|
||||
- name: Apply container owner mismatch workaround
|
||||
run: |
|
||||
# FIXME: The owner UID of the GITHUB_WORKSPACE directory may not
|
||||
# match the container user UID because of the way GitHub
|
||||
# Actions runner is implemented. Remove this workaround when
|
||||
# GitHub comes up with a fundamental fix for this problem.
|
||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||
|
||||
- name: Clone cached Zephyr repository
|
||||
continue-on-error: true
|
||||
run: |
|
||||
git clone --shared /repo-cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
path: zephyr
|
||||
|
||||
- name: Rebase
|
||||
if: github.event_name == 'pull_request'
|
||||
continue-on-error: true
|
||||
env:
|
||||
BASE_REF: ${{ github.base_ref }}
|
||||
PR_HEAD: ${{ github.event.pull_request.head.sha }}
|
||||
working-directory: zephyr
|
||||
- name: Environment Setup
|
||||
run: |
|
||||
git config --global user.email "actions@zephyrproject.org"
|
||||
git config --global user.name "Github Actions"
|
||||
rm -fr ".git/rebase-apply"
|
||||
rm -fr ".git/rebase-merge"
|
||||
git rebase origin/${BASE_REF}
|
||||
git clean -f -d
|
||||
git log --graph --oneline HEAD...${PR_HEAD}
|
||||
if [ "${{github.event_name}}" = "pull_request" ]; then
|
||||
git config --global user.email "bot@zephyrproject.org"
|
||||
git config --global user.name "Zephyr Builder"
|
||||
rm -fr ".git/rebase-apply"
|
||||
rm -fr ".git/rebase-merge"
|
||||
git rebase origin/${BASE_REF}
|
||||
git clean -f -d
|
||||
git log --pretty=oneline | head -n 10
|
||||
fi
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Setup Zephyr project
|
||||
uses: zephyrproject-rtos/action-zephyr-setup@v1
|
||||
with:
|
||||
app-path: zephyr
|
||||
toolchains: 'all'
|
||||
west init -l . || true
|
||||
west config manifest.group-filter -- +ci,+optional
|
||||
west config --global update.narrow true
|
||||
west update --path-cache /repo-cache/zephyrproject 2>&1 1> west.update.log || west update --path-cache /repo-cache/zephyrproject 2>&1 1> west.update.log || ( rm -rf ../modules ../bootloader ../tools && west update --path-cache /repo-cache/zephyrproject)
|
||||
west forall -c 'git reset --hard HEAD'
|
||||
|
||||
- name: install-pip
|
||||
working-directory: zephyr
|
||||
echo "ZEPHYR_SDK_INSTALL_DIR=/opt/toolchains/zephyr-sdk-$( cat SDK_VERSION )" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Python packages required for documentation build
|
||||
run: |
|
||||
pip install -r doc/requirements.txt
|
||||
pip install coverxygen
|
||||
|
||||
- name: build-docs
|
||||
- name: Build HTML documentation
|
||||
shell: bash
|
||||
working-directory: zephyr
|
||||
run: |
|
||||
if [[ "$GITHUB_REF" =~ "refs/tags/v" ]]; then
|
||||
DOC_TAG="release"
|
||||
@@ -139,26 +148,26 @@ jobs:
|
||||
lcov --remove doc-coverage.info \*/deprecated > new.info
|
||||
genhtml --no-function-coverage --no-branch-coverage new.info -o coverage-report
|
||||
|
||||
- name: compress-docs
|
||||
working-directory: zephyr
|
||||
- name: Compress documentation build artifacts
|
||||
run: |
|
||||
tar --use-compress-program="xz -T0" -cf html-output.tar.xz --exclude html/_sources --exclude html/doxygen/xml --directory=doc/_build html
|
||||
tar --use-compress-program="xz -T0" -cf api-output.tar.xz --directory=doc/_build html/doxygen/html
|
||||
tar --use-compress-program="xz -T0" -cf api-coverage.tar.xz coverage-report
|
||||
|
||||
- name: upload-build
|
||||
uses: actions/upload-artifact@v4
|
||||
- name: Upload HTML output
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: html-output
|
||||
path: zephyr/html-output.tar.xz
|
||||
path: html-output.tar.xz
|
||||
|
||||
- name: upload-api-coverage
|
||||
uses: actions/upload-artifact@v4
|
||||
- name: Upload Doxygen coverage artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: api-coverage
|
||||
path: zephyr/api-coverage.tar.xz
|
||||
path: api-coverage.tar.xz
|
||||
|
||||
- name: process-pr
|
||||
|
||||
- name: Summarize PR documentation URLs
|
||||
if: github.event_name == 'pull_request'
|
||||
run: |
|
||||
REPO_NAME="${{ github.event.repository.name }}"
|
||||
@@ -172,8 +181,8 @@ jobs:
|
||||
echo "API Documentation will be available shortly at: ${API_DOC_URL}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "API Coverage Report will be available shortly at: ${API_COVERAGE_URL}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: upload-pr-number
|
||||
uses: actions/upload-artifact@v4
|
||||
- name: Upload PR number
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: github.event_name == 'pull_request'
|
||||
with:
|
||||
name: pr_num
|
||||
|
||||
2
VERSION
2
VERSION
@@ -1,5 +1,5 @@
|
||||
VERSION_MAJOR = 4
|
||||
VERSION_MINOR = 1
|
||||
PATCHLEVEL = 0
|
||||
PATCHLEVEL = 1
|
||||
VERSION_TWEAK = 0
|
||||
EXTRAVERSION =
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2021 Lexmark International, Inc.
|
||||
* Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -37,6 +38,34 @@
|
||||
*/
|
||||
#define DEFAULT_EXC_RETURN 0xFD;
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static void setup_priv_stack(struct k_thread *thread)
|
||||
{
|
||||
/* Set up privileged stack before entering user mode */
|
||||
thread->arch.priv_stack_start = (uint32_t)z_priv_stack_find(thread->stack_obj);
|
||||
|
||||
/* CONFIG_PRIVILEGED_STACK_SIZE does not account for MPU_GUARD_ALIGN_AND_SIZE or
|
||||
* MPU_GUARD_ALIGN_AND_SIZE_FLOAT. Therefore, we must compute priv_stack_end here before
|
||||
* adjusting priv_stack_start for the mpu guard alignment
|
||||
*/
|
||||
thread->arch.priv_stack_end = thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
/* Stack guard area reserved at the bottom of the thread's
|
||||
* privileged stack. Adjust the available (writable) stack
|
||||
* buffer area accordingly.
|
||||
*/
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
thread->arch.priv_stack_start +=
|
||||
((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
||||
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#else
|
||||
thread->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
}
|
||||
#endif
|
||||
|
||||
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
|
||||
* end of the stack, and thus reusable by the stack when not needed anymore.
|
||||
*
|
||||
@@ -80,7 +109,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
|
||||
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
thread->arch.priv_stack_start = 0;
|
||||
if ((thread->base.user_options & K_USER) != 0) {
|
||||
setup_priv_stack(thread);
|
||||
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, thread->arch.priv_stack_end);
|
||||
iframe->pc = (uint32_t)arch_user_mode_enter;
|
||||
} else {
|
||||
iframe->pc = (uint32_t)z_thread_entry;
|
||||
@@ -122,9 +154,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
|
||||
}
|
||||
#endif
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
thread->arch.priv_stack_start = 0;
|
||||
#endif
|
||||
#endif
|
||||
/*
|
||||
* initial values in all other registers/thread entries are
|
||||
@@ -196,10 +225,8 @@ static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
uint32_t sp_is_priv = 1;
|
||||
|
||||
/* Set up privileged stack before entering user mode */
|
||||
_current->arch.priv_stack_start =
|
||||
(uint32_t)z_priv_stack_find(_current->stack_obj);
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
#if defined(CONFIG_THREAD_STACK_INFO)
|
||||
/* We're dropping to user mode which means the guard area is no
|
||||
@@ -216,29 +243,29 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
|
||||
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif /* CONFIG_THREAD_STACK_INFO */
|
||||
|
||||
/* Stack guard area reserved at the bottom of the thread's
|
||||
* privileged stack. Adjust the available (writable) stack
|
||||
* buffer area accordingly.
|
||||
*/
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
_current->arch.priv_stack_start +=
|
||||
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
|
||||
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#else
|
||||
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
_current->arch.priv_stack_end =
|
||||
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
#endif
|
||||
/* 2 ways how arch_user_mode_enter is called:
|
||||
* - called as part of context switch from z_arm_pendsv, in this case privileged stack is
|
||||
* already setup and stack pointer points to privileged stack.
|
||||
* - called directly from k_thread_user_mode_enter, in this case privileged stack is not
|
||||
* setup and stack pointer points to user stack.
|
||||
*
|
||||
* When called from k_thread_user_mode_enter, we need to check and setup the privileged
|
||||
* stack and then instruct z_arm_userspace_enter to change the PSP to the privileged stack.
|
||||
* Note that we do not change the PSP in this function to avoid any conflict with compiler's
|
||||
* sequence which has already pushed stuff on the user stack.
|
||||
*/
|
||||
if (0 == _current->arch.priv_stack_start) {
|
||||
setup_priv_stack(_current);
|
||||
sp_is_priv = 0;
|
||||
}
|
||||
|
||||
z_arm_userspace_enter(user_entry, p1, p2, p3,
|
||||
(uint32_t)_current->stack_info.start,
|
||||
_current->stack_info.size -
|
||||
_current->stack_info.delta);
|
||||
_current->stack_info.delta,
|
||||
sp_is_priv);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
|
||||
@@ -489,20 +489,219 @@ SECTION_FUNC(TEXT, z_arm_svc)
|
||||
* r8 - saved link register
|
||||
*/
|
||||
.L_do_syscall:
|
||||
/*
|
||||
* Build a privilege stack frame from the user stack frame, then switch PSP
|
||||
* to it. This ensures return from SVC does not rely on the user stack.
|
||||
*
|
||||
* Layout of privilege stack created from user stack:
|
||||
*
|
||||
* +------+-------------------------+------+-------------------------+--------------------------+
|
||||
* | User stack | Privilege stack | Notes |
|
||||
* +------+-------------------------+------+-------------------------+--------------------------+
|
||||
* |Offset| contents |Offset| contents | |
|
||||
* +------+-------------------------+------+-------------------------+--------------------------+
|
||||
* | 0 | R0 -> | 0 | R0 | PSP switches from 0th |
|
||||
* | | | | | offset of user stack to |
|
||||
* | | | | | 0th offset of priv stack |
|
||||
* | 4 | R1 -> | 4 | R1 | |
|
||||
* | 8 | R2 -> | 8 | R2 | |
|
||||
* | 12 | R3 -> |12 | R3 | |
|
||||
* | 16 | R12 -> |16 | R12 | |
|
||||
* | 20 | LR -> |20 | LR | |
|
||||
* | 24 | Return Address -x> |24 | z_arm_do_syscall |return address from user |
|
||||
* | | | | |sf is not copied. Instead,|
|
||||
* | | | | |it is replaced so that |
|
||||
* | | | | |z_arm_svc returns to |
|
||||
* | | | | |z_arm_do_syscall. |
|
||||
* | | | | | |
|
||||
* | 28 | xPSR (w/ or w/o pad) -> |28 | xPSR (pad bit cleared) |This completes the basic |
|
||||
* | | | | |exception sf w/ or w/o pad|
|
||||
* | | | | | |
|
||||
* | -- | FP regs + FPSCR -> |-- | FP regs + FPSCR |For arch supporting fp |
|
||||
* | | (w/ or w/o pad) | | |context an additional |
|
||||
* | | | | |extended sf is copied. |
|
||||
* |________________________________|______|_________________________|__________________________|
|
||||
* | | | | |On returning to |
|
||||
* | | | | |z_arm_do_syscall, the |
|
||||
* | | | | |above sf has already been |
|
||||
* | | | | |unstacked and 8B from the |
|
||||
* | | | | |then sf are used to pass |
|
||||
* | | | | |original pre-svc sp & the |
|
||||
* | | | | |return address. |
|
||||
* | | | | |Note: at the moment |
|
||||
* | | | | |z_arm_do_syscall also |
|
||||
* | | | | |expects the return address|
|
||||
* | | | | |to be set in r8. |
|
||||
* | | | | | |
|
||||
* | | | 0 | address that |z_arm_do_syscall expects |
|
||||
* | | | | z_arm_do_syscall should |the original pre-svc sp at|
|
||||
* | | | | set as PSP before |0th offset i.e. new sp[0] |
|
||||
* | | | | returning from svc. |and, |
|
||||
* | | | | | |
|
||||
* | | | 4 | Address that |the return address at |
|
||||
* | | | | z_arm_do_syscall should |sp[4]. Note that this is |
|
||||
* | | | | return to after handling|the return address copied |
|
||||
* | | | | svc |from user exception sf[24]|
|
||||
* | | | | |which was not copied in |
|
||||
* | | | | |the previous sf. |
|
||||
* +------+-------------------------+------+-------------------------+--------------------------+
|
||||
* "sf" in this function is used as abbreviation for "stack frame".
|
||||
* Note that the "FP regs + FPSCR" are only present if CONFIG_FPU_SHARING=y, and the optional pad
|
||||
* is only present if PSP was not 8-byte aligned when SVC was executed.
|
||||
* Also note that FPU cannot be present in ARMv6-M or ARMv8-M Baseline implementations
|
||||
* (i.e., it may only be present when CONFIG_ARMV7_M_ARMV8_M_MAINLINE is enabled).
|
||||
*/
|
||||
/* Start by fetching the top of privileged stack */
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
movs r3, #24
|
||||
ldr r1, [r0, r3] /* grab address of PC from stack frame */
|
||||
mov r8, r1
|
||||
ldr r1, =_kernel
|
||||
ldr r1, [r1, #_kernel_offset_to_current]
|
||||
adds r1, r1, #_thread_offset_to_priv_stack_start
|
||||
ldr r1, [r1] /* bottom of priv stack */
|
||||
ldr r3, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
subs r3, #(_EXC_HW_SAVED_BASIC_SF_SIZE+8) /* 8 for original sp and pc */
|
||||
add r1, r3
|
||||
mov ip, r1
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
ldr r8, [r0, #24] /* grab address of PC from stack frame */
|
||||
ldr ip, =_kernel
|
||||
ldr ip, [ip, #_kernel_offset_to_current]
|
||||
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* bottom of priv stack */
|
||||
add ip, #CONFIG_PRIVILEGED_STACK_SIZE
|
||||
#ifdef CONFIG_FPU_SHARING
|
||||
/* Assess whether svc calling thread had been using the FP registers. */
|
||||
tst lr, #_EXC_RETURN_FTYPE_Msk
|
||||
ite eq
|
||||
moveq r8, #_EXC_HW_SAVED_EXTENDED_SF_SIZE
|
||||
movne r8, #_EXC_HW_SAVED_BASIC_SF_SIZE
|
||||
#else
|
||||
mov r8, #_EXC_HW_SAVED_BASIC_SF_SIZE
|
||||
#endif
|
||||
sub ip, #8 /* z_arm_do_syscall will use this to get original sp and pc */
|
||||
sub ip, r8 /* 32 for basic sf + 72 for the optional esf */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* At this point:
|
||||
* r0 has PSP i.e. top of user stack
|
||||
* ip has top of privilege stack
|
||||
* r8 has hardware-saved stack frame size (only in case of mainline)
|
||||
*/
|
||||
push {r4-r7}
|
||||
push {r2}
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
mov r2, r0 /* safe to use r2 since it is saved on MSP */
|
||||
|
||||
/* Check for padding in the sf */
|
||||
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* grab xPSR from sf which has the pad bit */
|
||||
movs r3, #1
|
||||
/* Check if pad bit 9 is set */
|
||||
lsls r3, r3, #9
|
||||
tst r1, r3
|
||||
beq .L_no_padding
|
||||
/* special handling for padded sf */
|
||||
bics r1, r3 /* clear the pad bit (priv stack is aligned and doesn't need it) */
|
||||
adds r2, #4
|
||||
.L_no_padding:
|
||||
/* Calculate original pre-svc user sp which is psp + sf size (+4B if pad bit was set) */
|
||||
adds r2, #_EXC_HW_SAVED_BASIC_SF_SIZE
|
||||
mov r3, ip
|
||||
str r2,[r3, #0]
|
||||
|
||||
/* Store the pre-SVC user SP at the offset expected by z_arm_do_syscall,
|
||||
* as detailed in the table above.
|
||||
*/
|
||||
str r2,[r3, #_EXC_HW_SAVED_BASIC_SF_SIZE]
|
||||
/* sf of priv stack has the same xPSR as user stack but with 9th bit reset */
|
||||
str r1,[r3, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET]
|
||||
|
||||
/* r0-r3, r12, LR from user stack sf are copied to sf of priv stack */
|
||||
mov r1, r0
|
||||
mov r2, r3
|
||||
ldmia r1!, {r4-r7}
|
||||
stmia r2!, {r4-r7}
|
||||
ldmia r1!, {r4-r5}
|
||||
stmia r2!, {r4-r5}
|
||||
|
||||
/* Store the svc return address at the offset expected by z_arm_do_syscall,
|
||||
* as detailed in the table above.
|
||||
*/
|
||||
str r5, [r3, #(_EXC_HW_SAVED_BASIC_SF_SIZE+4)]
|
||||
|
||||
ldr r1, =z_arm_do_syscall
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
str r1, [r0, r3] /* overwrite the PC to point to z_arm_do_syscall */
|
||||
str r1, [r3, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* Execution return to z_arm_do_syscall */
|
||||
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* grab address of PC from stack frame */
|
||||
/* Store the svc return address (i.e. next instr to svc) in r8 as expected by z_arm_do_syscall.
|
||||
*/
|
||||
mov r8, r1
|
||||
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
str r1, [r0, #24] /* overwrite the PC to point to z_arm_do_syscall */
|
||||
mov r2, r0 /* safe to use r2 since it is saved on MSP */
|
||||
|
||||
/* Calculate original pre-svc user sp without pad which is psp + sf size */
|
||||
add r2, r8
|
||||
|
||||
/* Also, check for padding in the sf */
|
||||
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* grab xPSR from sf which has the pad bit */
|
||||
tst r1, #(1<<9) /* Check if pad bit 9 is set */
|
||||
beq .L_no_padding
|
||||
bics r1, #(1<<9) /* clear the pad bit (priv stack is aligned and doesn't need it) */
|
||||
/* Calculate original pre-svc user sp with pad */
|
||||
add r2, #4
|
||||
.L_no_padding:
|
||||
str r2,[ip, #0]
|
||||
/* Store the pre-SVC user SP at the offset expected by z_arm_do_syscall,
|
||||
* as detailed in the table above.
|
||||
*/
|
||||
str r2,[ip, r8]
|
||||
str r1,[ip, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* priv sf get user sf xPSR with bit9 reset */
|
||||
|
||||
/* r0-r3, r12, LR from user stack sf are copied to sf of priv stack */
|
||||
mov r1, r0
|
||||
mov r2, ip
|
||||
ldmia r1!, {r4-r7}
|
||||
stmia r2!, {r4-r7}
|
||||
ldmia r1!, {r4-r5}
|
||||
stmia r2!, {r4-r5}
|
||||
|
||||
/* Store the svc return address at the offset expected by z_arm_do_syscall,
|
||||
* as detailed in the table above.
|
||||
*/
|
||||
add r8, #4
|
||||
str r5, [ip, r8]
|
||||
|
||||
ldr r1, =z_arm_do_syscall
|
||||
str r1, [ip, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* Execution return to z_arm_do_syscall */
|
||||
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* grab address of PC from stack frame */
|
||||
/* Store the svc return address (i.e. next instr to svc) in r8 as expected by z_arm_do_syscall.
|
||||
*/
|
||||
mov r8, r1
|
||||
|
||||
/* basic stack frame is copied at this point to privilege stack,
|
||||
* now time to copy the fp context
|
||||
*/
|
||||
#ifdef CONFIG_FPU_SHARING
|
||||
tst lr, #_EXC_RETURN_FTYPE_Msk
|
||||
bne .L_skip_fp_copy
|
||||
add r1, r0, #32
|
||||
add r2, ip, #32
|
||||
|
||||
vldmia r1!, {s0-s15}
|
||||
vstmia r2!, {s0-s15}
|
||||
|
||||
/* copy FPSCR + reserved (8 bytes) */
|
||||
ldmia r1!, {r4, r5}
|
||||
stmia r2!, {r4, r5}
|
||||
.L_skip_fp_copy:
|
||||
#endif
|
||||
|
||||
#endif
|
||||
pop {r2} /* restore CONTROL value */
|
||||
pop {r4-r7}
|
||||
|
||||
/* Point PSP to privilege stack,
|
||||
* note that r0 still has the old PSP
|
||||
*/
|
||||
msr PSP, ip
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
ldr r3, =K_SYSCALL_LIMIT
|
||||
cmp r6, r3
|
||||
@@ -556,14 +755,12 @@ SECTION_FUNC(TEXT, z_arm_svc)
|
||||
isb
|
||||
|
||||
#if defined(CONFIG_BUILTIN_STACK_GUARD)
|
||||
/* Thread is now in privileged mode; after returning from SCVall it
|
||||
* will use the default (user) stack before switching to the privileged
|
||||
* stack to execute the system call. We need to protect the user stack
|
||||
* against stack overflows until this stack transition.
|
||||
*/
|
||||
ldr r1, [r0, #_thread_offset_to_stack_info_start] /* stack_info.start */
|
||||
msr PSPLIM, r1
|
||||
#endif /* CONFIG_BUILTIN_STACK_GUARD */
|
||||
/* Set stack pointer limit (needed in privileged mode) */
|
||||
ldr ip, =_kernel
|
||||
ldr ip, [ip, #_kernel_offset_to_current]
|
||||
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
msr PSPLIM, ip
|
||||
#endif
|
||||
|
||||
/* return from SVC to the modified LR - z_arm_do_syscall */
|
||||
bx lr
|
||||
|
||||
@@ -45,6 +45,35 @@
|
||||
K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static void setup_priv_stack(struct k_thread *thread)
|
||||
{
|
||||
/* Set up privileged stack before entering user mode */
|
||||
thread->arch.priv_stack_start = (uint32_t)z_priv_stack_find(thread->stack_obj);
|
||||
|
||||
/* CONFIG_PRIVILEGED_STACK_SIZE does not account for MPU_GUARD_ALIGN_AND_SIZE or
|
||||
* MPU_GUARD_ALIGN_AND_SIZE_FLOAT. Therefore, we must compute priv_stack_end here before
|
||||
* adjusting priv_stack_start for the mpu guard alignment
|
||||
*/
|
||||
thread->arch.priv_stack_end = thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
/* Stack guard area reserved at the bottom of the thread's
|
||||
* privileged stack. Adjust the available (writable) stack
|
||||
* buffer area accordingly.
|
||||
*/
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
thread->arch.priv_stack_start +=
|
||||
((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0)
|
||||
? MPU_GUARD_ALIGN_AND_SIZE_FLOAT
|
||||
: MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#else
|
||||
thread->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
}
|
||||
#endif
|
||||
|
||||
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
|
||||
* end of the stack, and thus reusable by the stack when not needed anymore.
|
||||
*
|
||||
@@ -87,7 +116,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *sta
|
||||
|
||||
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
thread->arch.priv_stack_start = 0;
|
||||
if ((thread->base.user_options & K_USER) != 0) {
|
||||
setup_priv_stack(thread);
|
||||
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, thread->arch.priv_stack_end);
|
||||
iframe->pc = (uint32_t)arch_user_mode_enter;
|
||||
} else {
|
||||
iframe->pc = (uint32_t)z_thread_entry;
|
||||
@@ -118,9 +150,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *sta
|
||||
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
|
||||
}
|
||||
#endif
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
thread->arch.priv_stack_start = 0;
|
||||
#endif
|
||||
#endif
|
||||
/*
|
||||
* initial values in all other registers/thread entries are
|
||||
@@ -215,9 +244,8 @@ uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread)
|
||||
#ifdef CONFIG_USERSPACE
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3)
|
||||
{
|
||||
uint32_t sp_is_priv = 1;
|
||||
|
||||
/* Set up privileged stack before entering user mode */
|
||||
_current->arch.priv_stack_start = (uint32_t)z_priv_stack_find(_current->stack_obj);
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
#if defined(CONFIG_THREAD_STACK_INFO)
|
||||
/* We're dropping to user mode which means the guard area is no
|
||||
@@ -234,23 +262,26 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, v
|
||||
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
|
||||
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif /* CONFIG_THREAD_STACK_INFO */
|
||||
|
||||
/* Stack guard area reserved at the bottom of the thread's
|
||||
* privileged stack. Adjust the available (writable) stack
|
||||
* buffer area accordingly.
|
||||
*/
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
_current->arch.priv_stack_start +=
|
||||
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0)
|
||||
? MPU_GUARD_ALIGN_AND_SIZE_FLOAT
|
||||
: MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#else
|
||||
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
|
||||
/* 2 ways how arch_user_mode_enter is called:
|
||||
* - called as part of context switch from z_arm_pendsv, in this case privileged stack is
|
||||
* already setup and stack pointer points to privileged stack.
|
||||
* - called directly from k_thread_user_mode_enter, in this case privileged stack is not
|
||||
* setup and stack pointer points to user stack.
|
||||
*
|
||||
* When called from k_thread_user_mode_enter, we need to check and setup the privileged
|
||||
* stack and then instruct z_arm_userspace_enter to change the PSP to the privileged stack.
|
||||
* Note that we do not change the PSP in this function to avoid any conflict with compiler's
|
||||
* sequence which has already pushed stuff on the user stack.
|
||||
*/
|
||||
if (0 == _current->arch.priv_stack_start) {
|
||||
setup_priv_stack(_current);
|
||||
sp_is_priv = 0;
|
||||
}
|
||||
|
||||
z_arm_userspace_enter(user_entry, p1, p2, p3, (uint32_t)_current->stack_info.start,
|
||||
_current->stack_info.size - _current->stack_info.delta);
|
||||
_current->stack_info.size - _current->stack_info.delta, sp_is_priv);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
@@ -557,6 +588,8 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
|
||||
"ldr r4, =z_thread_entry\n"
|
||||
/* We don’t intend to return, so there is no need to link. */
|
||||
"bx r4\n"
|
||||
/* Force a literal pool placement for the addresses referenced above */
|
||||
".ltorg\n"
|
||||
:
|
||||
: "r"(_main), "r"(stack_ptr)
|
||||
: "r0", "r1", "r2", "r3", "r4", "ip", "lr", "memory");
|
||||
@@ -623,6 +656,8 @@ FUNC_NORETURN void z_arm_switch_to_main_no_multithreading(k_thread_entry_t main_
|
||||
"ldr r0, =arch_irq_lock_outlined\n"
|
||||
"blx r0\n"
|
||||
"loop: b loop\n\t" /* while (true); */
|
||||
/* Force a literal pool placement for the addresses referenced above */
|
||||
".ltorg\n"
|
||||
:
|
||||
: [_p1] "r"(p1), [_p2] "r"(p2), [_p3] "r"(p3), [_psp] "r"(psp),
|
||||
[_main_entry] "r"(main_entry)
|
||||
|
||||
@@ -45,8 +45,9 @@ GEN_OFFSET_SYM(_thread_arch_t, mode_exc_return);
|
||||
#endif
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_end);
|
||||
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
GEN_OFFSET_SYM(_thread_arch_t, sp_usr);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
* Userspace and service handler hooks
|
||||
*
|
||||
* Copyright (c) 2017 Linaro Limited
|
||||
* Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
@@ -41,45 +42,41 @@ GDATA(_k_syscall_table)
|
||||
*
|
||||
* The function is invoked as:
|
||||
* z_arm_userspace_enter(user_entry, p1, p2, p3,
|
||||
* stack_info.start, stack_info.size);
|
||||
* stack_info.start, stack_info.size,
|
||||
* sp_is_priv);
|
||||
*/
|
||||
SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
||||
/* move user_entry to lr */
|
||||
mov lr, r0
|
||||
|
||||
/* prepare to set stack to privileged stack */
|
||||
/* load arguments from stack:
|
||||
* r4 = user stack start
|
||||
* r5 = user stack size
|
||||
* r6 = sp_is_priv (1 if already on privileged stack)
|
||||
*/
|
||||
pop {r4, r5, r6}
|
||||
|
||||
/* get current thread pointer */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* move p1 to ip */
|
||||
mov ip, r1
|
||||
ldr r1, =_thread_offset_to_priv_stack_start
|
||||
ldr r0, [r0, r1] /* priv stack ptr */
|
||||
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r0, r0, r1
|
||||
ldr r1, =_thread_offset_to_priv_stack_end
|
||||
ldr r0, [r0, r1]
|
||||
/* Restore p1 from ip */
|
||||
mov r1, ip
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r0, r0, ip
|
||||
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r0, r0, ip
|
||||
|
||||
ldr ip, =_kernel
|
||||
ldr ip, [ip, #_kernel_offset_to_current]
|
||||
str r0, [ip, #_thread_offset_to_priv_stack_end] /* priv stack end */
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|
||||
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
ldr r0, [r0, #_thread_offset_to_priv_stack_end] /* privileged stack ptr */
|
||||
#endif
|
||||
|
||||
/* store current stack pointer to ip
|
||||
* the current stack pointer is needed to retrieve
|
||||
* stack_info.start and stack_info.size
|
||||
*/
|
||||
mov ip, sp
|
||||
/* check if current stack is privileged and switch to it if not */
|
||||
cmp r6, #1
|
||||
beq 1f
|
||||
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
|
||||
mov sp, r0
|
||||
#else
|
||||
/* set stack to privileged stack
|
||||
@@ -93,35 +90,28 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
||||
msr PSP, r0
|
||||
#endif
|
||||
|
||||
1:
|
||||
/* push thread args and entrypoint to stack */
|
||||
push {r1,r2,r3,lr}
|
||||
|
||||
#if defined(CONFIG_BUILTIN_STACK_GUARD)
|
||||
/* At this point the privileged stack is not yet protected by PSPLIM.
|
||||
* Since we have just switched to the top of the privileged stack, we
|
||||
* Since we have switched to the top of the privileged stack, we
|
||||
* are safe, as long as the stack can accommodate the maximum exception
|
||||
* stack frame.
|
||||
*/
|
||||
|
||||
/* set stack pointer limit to the start of the priv stack */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
sub r0, r0, r1 /* Calculate start of privileged stack */
|
||||
/* set stack pointer limit to the start of the privileged stack */
|
||||
msr PSPLIM, r0
|
||||
#endif
|
||||
|
||||
/* push args to stack */
|
||||
push {r1,r2,r3,lr}
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
mov r1, ip
|
||||
push {r0,r1}
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|
||||
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
push {r0,ip}
|
||||
#endif
|
||||
|
||||
/* Re-program dynamic memory map.
|
||||
*
|
||||
* Important note:
|
||||
* z_arm_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
|
||||
* to guard the privilege stack for overflows (if building with option
|
||||
* to guard the privileged stack for overflows (if building with option
|
||||
* CONFIG_MPU_STACK_GUARD). There is a risk of actually overflowing the
|
||||
* stack while doing the re-programming. We minimize the risk by placing
|
||||
* this function immediately after we have switched to the privileged stack
|
||||
@@ -135,29 +125,10 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
bl z_arm_configure_dynamic_mpu_regions
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0,r3}
|
||||
|
||||
/* load up stack info from user stack */
|
||||
ldr r0, [r3]
|
||||
ldr r3, [r3, #4]
|
||||
mov ip, r3
|
||||
|
||||
push {r0,r3}
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|
||||
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
pop {r0,ip}
|
||||
|
||||
/* load up stack info from user stack */
|
||||
ldr r0, [ip]
|
||||
ldr ip, [ip, #4]
|
||||
|
||||
push {r0,ip}
|
||||
#endif
|
||||
|
||||
/* clear the user stack area to clean out privileged data */
|
||||
/* from right past the guard right up to the end */
|
||||
mov r2, ip
|
||||
mov r0, r4
|
||||
mov r2, r5
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
ldr r1,=0xaaaaaaaa
|
||||
#else
|
||||
@@ -165,17 +136,18 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
||||
#endif
|
||||
bl memset
|
||||
|
||||
/* At this point:
|
||||
* r4 contains user stack start
|
||||
* r5 contains user stack size
|
||||
* calculate top of user stack in r0 (start+size)
|
||||
*/
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0, r1}
|
||||
mov ip, r1
|
||||
adds r0, r4, r5
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|
||||
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
pop {r0,ip}
|
||||
add r0, r4, r5
|
||||
#endif
|
||||
|
||||
/* r0 contains user stack start, ip contains user stack size */
|
||||
add r0, r0, ip /* calculate top of stack */
|
||||
|
||||
/* pop remaining arguments from stack before switching stacks */
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* Use r4 to pop lr, then restore r4 */
|
||||
@@ -188,7 +160,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
||||
pop {r1,r2,r3,lr}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
|
||||
/*
|
||||
* set stack to user stack. We are in SYSTEM state, so r13 and r14 are
|
||||
* shared with USER state
|
||||
@@ -223,10 +195,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
||||
isb
|
||||
|
||||
/* Set PSPLIM to guard the thread's user stack. */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
ldr r0, [r0, #_thread_offset_to_stack_info_start]
|
||||
msr PSPLIM, r0
|
||||
msr PSPLIM, r4
|
||||
|
||||
pop {r0, ip}
|
||||
#endif
|
||||
@@ -308,9 +277,8 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
||||
* This function is used to do system calls from unprivileged code. This
|
||||
* function is responsible for the following:
|
||||
* 1) Fixing up bad syscalls
|
||||
* 2) Configuring privileged stack and loading up stack arguments
|
||||
* 3) Dispatching the system call
|
||||
* 4) Restoring stack and calling back to the caller of the SVC
|
||||
* 2) Dispatching the system call
|
||||
* 3) Restoring stack and calling back to the caller of the SVC
|
||||
*
|
||||
*/
|
||||
SECTION_FUNC(TEXT, z_arm_do_syscall)
|
||||
@@ -328,41 +296,7 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
|
||||
* At this point PSPLIM is already configured to guard the default (user)
|
||||
* stack, so pushing to the default thread's stack is safe.
|
||||
*/
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* save current stack pointer (user stack) */
|
||||
mov ip, sp
|
||||
/* temporarily push to user stack */
|
||||
push {r0,r1}
|
||||
/* setup privileged stack */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
adds r0, r0, #_thread_offset_to_priv_stack_start
|
||||
ldr r0, [r0] /* priv stack ptr */
|
||||
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r0, r1
|
||||
|
||||
/* Store current SP and LR at the beginning of the priv stack */
|
||||
subs r0, #8
|
||||
mov r1, ip
|
||||
str r1, [r0, #0]
|
||||
mov r1, lr
|
||||
str r1, [r0, #4]
|
||||
mov ip, r0
|
||||
/* Restore user stack and original r0, r1 */
|
||||
pop {r0, r1}
|
||||
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
/* setup privileged stack */
|
||||
ldr ip, =_kernel
|
||||
ldr ip, [ip, #_kernel_offset_to_current]
|
||||
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
add ip, #CONFIG_PRIVILEGED_STACK_SIZE
|
||||
|
||||
/* Store current SP and LR at the beginning of the priv stack */
|
||||
subs ip, #8
|
||||
str sp, [ip, #0]
|
||||
str lr, [ip, #4]
|
||||
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
/*
|
||||
* The SVC handler has already switched to the privileged stack.
|
||||
* Store the user SP and LR at the beginning of the priv stack.
|
||||
@@ -373,11 +307,6 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
|
||||
push {ip, lr}
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
/* switch to privileged stack */
|
||||
msr PSP, ip
|
||||
#endif
|
||||
|
||||
/* Note (applies when using stack limit checking):
|
||||
* We do not need to lock IRQs after switching PSP to the privileged stack;
|
||||
* PSPLIM is guarding the default (user) stack, which, by design, is
|
||||
@@ -386,14 +315,6 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
|
||||
* the maximum exception stack frame.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_BUILTIN_STACK_GUARD)
|
||||
/* Set stack pointer limit (needed in privileged mode) */
|
||||
ldr ip, =_kernel
|
||||
ldr ip, [ip, #_kernel_offset_to_current]
|
||||
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
msr PSPLIM, ip
|
||||
#endif
|
||||
|
||||
/*
|
||||
* r0-r5 contain arguments
|
||||
* r6 contains call_id
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
|
||||
* Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -77,7 +78,8 @@ static ALWAYS_INLINE void arch_switch(void *switch_to, void **switched_from)
|
||||
extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
uint32_t stack_end,
|
||||
uint32_t stack_start);
|
||||
uint32_t stack_start,
|
||||
uint32_t sp_is_priv);
|
||||
|
||||
extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
|
||||
* Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -75,7 +76,8 @@ extern FUNC_NORETURN void z_arm_switch_to_main_no_multithreading(k_thread_entry_
|
||||
#endif /* !CONFIG_MULTITHREADING */
|
||||
|
||||
extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2,
|
||||
void *p3, uint32_t stack_end, uint32_t stack_start);
|
||||
void *p3, uint32_t stack_end, uint32_t stack_start,
|
||||
uint32_t sp_is_priv);
|
||||
|
||||
extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
|
||||
|
||||
|
||||
@@ -45,10 +45,10 @@
|
||||
#define _thread_offset_to_priv_stack_start \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
|
||||
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
#define _thread_offset_to_priv_stack_end \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_end_OFFSET)
|
||||
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
#define _thread_offset_to_sp_usr \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_sp_usr_OFFSET)
|
||||
#endif
|
||||
|
||||
@@ -311,7 +311,7 @@ You can debug an application in the usual way. Here is an example for the
|
||||
:goals: debug
|
||||
|
||||
.. _pyOCD:
|
||||
https://github.com/mbedmicro/pyOCD
|
||||
https://github.com/pyocd/pyOCD
|
||||
|
||||
.. _CMSIS DAP:
|
||||
https://developer.mbed.org/handbook/CMSIS-DAP
|
||||
@@ -323,7 +323,7 @@ You can debug an application in the usual way. Here is an example for the
|
||||
http://wiki.seeed.cc/BLE_Nitrogen/
|
||||
|
||||
.. _pyOCD issue 259:
|
||||
https://github.com/mbedmicro/pyOCD/issues/259
|
||||
https://github.com/pyocd/pyOCD/issues/259
|
||||
|
||||
.. _96Boards IE Specification:
|
||||
https://linaro.co/ie-specification
|
||||
|
||||
@@ -108,9 +108,9 @@
|
||||
pinctrl-names = "default";
|
||||
clock-frequency = <I2C_BITRATE_FAST>;
|
||||
|
||||
vl53l1x: vl53l1x@52 {
|
||||
vl53l1x: vl53l1x@29 {
|
||||
compatible = "st,vl53l1x";
|
||||
reg = <0x52>;
|
||||
reg = <0x29>;
|
||||
status = "okay";
|
||||
xshut-gpios = <&gpiog 10 GPIO_ACTIVE_HIGH>;
|
||||
int-gpios = <&gpiod 8 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
@@ -20,4 +20,5 @@ if(CONFIG_BOARD_MPS2_AN521_CPU1 AND NOT CONFIG_OPENAMP)
|
||||
BUILD_BYPRODUCTS "${CPU0_BINARY_DIR}/${KERNEL_BIN_NAME}"
|
||||
BUILD_ALWAYS True
|
||||
)
|
||||
add_dependencies(app empty_cpu0)
|
||||
endif()
|
||||
|
||||
@@ -33,7 +33,7 @@ config TEST_EXTRA_STACK_SIZE
|
||||
|
||||
endif # COVERAGE_GCOV
|
||||
|
||||
endif
|
||||
endif # BOARD_MPS2_AN383 || BOARD_MPS2_AN385 || BOARD_MPS2_AN386 || BOARD_MPS2_AN500
|
||||
|
||||
if BOARD_MPS2_AN521_CPU0 || BOARD_MPS2_AN521_CPU0_NS || BOARD_MPS2_AN521_CPU1
|
||||
|
||||
@@ -58,4 +58,11 @@ config UART_INTERRUPT_DRIVEN
|
||||
|
||||
endif # SERIAL
|
||||
|
||||
endif # BOARD_MPS2_AN521_CPU0 || BOARD_MPS2_AN521_CPU0_NS || BOARD_MPS2_AN521_CPU1
|
||||
|
||||
if QEMU_TARGET
|
||||
|
||||
config ISR_STACK_SIZE
|
||||
default 4096
|
||||
|
||||
endif
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
compatible = "gpio-keys";
|
||||
user_button_0: button_0 {
|
||||
label = "User SW2";
|
||||
gpios = <&gpioc 6 GPIO_ACTIVE_LOW>;
|
||||
gpios = <&gpioc 6 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
|
||||
zephyr,code = <INPUT_KEY_0>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@@ -35,6 +35,6 @@
|
||||
};
|
||||
};
|
||||
|
||||
arduino_i2c: &i2c3 {};
|
||||
arduino_i2c: &i2c1 {};
|
||||
arduino_spi: &spi1 {};
|
||||
arduino_serial: &uart4 {};
|
||||
|
||||
@@ -49,6 +49,9 @@ function(zephyr_mcuboot_tasks)
|
||||
elseif(NOT (CONFIG_BUILD_OUTPUT_BIN OR CONFIG_BUILD_OUTPUT_HEX))
|
||||
message(FATAL_ERROR "Can't sign images for MCUboot: Neither CONFIG_BUILD_OUTPUT_BIN nor CONFIG_BUILD_OUTPUT_HEX is enabled, so there's nothing to sign.")
|
||||
endif()
|
||||
|
||||
# Add key file as CMake dependency so a file change will rerun the build
|
||||
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${${file}})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
|
||||
2
doc/_templates/zversions.html
vendored
2
doc/_templates/zversions.html
vendored
@@ -2,7 +2,7 @@
|
||||
<div class="rst-versions" data-toggle="rst-versions" role="note" aria-label="versions">
|
||||
<span class="rst-current-version" data-toggle="rst-current-version">
|
||||
<span class="fa fa-book"> Zephyr Project</span>
|
||||
v: latest
|
||||
v: {{ current_version if is_release else "latest" }}
|
||||
<span class="fa fa-caret-down"></span>
|
||||
</span>
|
||||
<div class="rst-other-versions">
|
||||
|
||||
@@ -363,4 +363,4 @@ in the log.
|
||||
|
||||
.. _Eclipse IDE for C/C++ Developers: https://www.eclipse.org/downloads/packages/eclipse-ide-cc-developers/oxygen2
|
||||
.. _GNU MCU Eclipse plug-ins: https://gnu-mcu-eclipse.github.io/plugins/install/
|
||||
.. _pyOCD v0.11.0: https://github.com/mbedmicro/pyOCD/releases/tag/v0.11.0
|
||||
.. _pyOCD v0.11.0: https://github.com/pyocd/pyOCD/releases/tag/v0.11.0
|
||||
|
||||
@@ -367,12 +367,11 @@ If multiple boards are placed in the same board folder, then the file
|
||||
Write your devicetree
|
||||
*********************
|
||||
|
||||
The devicetree file :file:`boards/<vendor>/plank/plank.dts` or
|
||||
:file:`boards/<vendor>/plank/plank_<qualifiers>.dts` describes your board
|
||||
The devicetree file :file:`boards/<vendor>/plank/plank_<qualifiers>.dts` describes your board
|
||||
hardware in the Devicetree Source (DTS) format (as usual, change ``plank`` to
|
||||
your board's name). If you're new to devicetree, see :ref:`devicetree-intro`.
|
||||
|
||||
In general, :file:`plank.dts` should look like this:
|
||||
In general, :file:`plank_<qualifiers>.dts` should look like this:
|
||||
|
||||
.. code-block:: devicetree
|
||||
|
||||
@@ -422,16 +421,9 @@ In general, :file:`plank.dts` should look like this:
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
Only one ``.dts`` file will be used, and the most specific file which exists
|
||||
will be used.
|
||||
|
||||
This means that if both :file:`plank.dts` and :file:`plank_soc1_foo.dts` exist,
|
||||
then when building for ``plank`` / ``plank/soc1``, then :file:`plank.dts` is
|
||||
used. When building for ``plank//foo`` / ``plank/soc1/foo`` the
|
||||
:file:`plank_soc1_foo.dts` is used.
|
||||
|
||||
This allows board maintainers to write a base devicetree file for the board
|
||||
or write specific devicetree files for a given board's SoC or variant.
|
||||
In the case a board has only a single SoC, without any board variants then the dts file can be
|
||||
named :file:`<plank>.dts` instead, however this is not recommended due to the file silently be
|
||||
unused if a variant or other SoC is added to the board.
|
||||
|
||||
If you're in a hurry, simple hardware can usually be supported by copy/paste
|
||||
followed by trial and error. If you want to understand details, you will need
|
||||
@@ -516,7 +508,6 @@ files for a board named ``plank``:
|
||||
├── Kconfig
|
||||
├── Kconfig.plank
|
||||
├── Kconfig.defconfig
|
||||
├── plank_defconfig
|
||||
└── plank_<qualifiers>_defconfig
|
||||
|
||||
:file:`Kconfig.plank`
|
||||
@@ -573,23 +564,28 @@ files for a board named ``plank``:
|
||||
default y
|
||||
|
||||
if NETWORKING
|
||||
|
||||
config SOC_ETHERNET_DRIVER
|
||||
default y
|
||||
|
||||
endif # NETWORKING
|
||||
|
||||
endif # BOARD_PLANK
|
||||
|
||||
:file:`plank_defconfig` / :file:`plank_<qualifiers>_defconfig`
|
||||
:file:`plank_<qualifiers>_defconfig` (or :file:`plank_defconfig` in limited circumstances)
|
||||
A Kconfig fragment that is merged as-is into the final build directory
|
||||
:file:`.config` whenever an application is compiled for your board.
|
||||
|
||||
If both the common :file:`plank_defconfig` file and one or more board
|
||||
qualifiers specific :file:`plank_<qualifiers>_defconfig` files exist, then
|
||||
all matching files will be used.
|
||||
This allows you to place configuration which is common for all board SoCs,
|
||||
CPU clusters, and board variants in the base :file:`plank_defconfig` and only
|
||||
place the adjustments specific for a given SoC or board variant in the
|
||||
:file:`plank_<qualifiers>_defconfig`.
|
||||
:file:`plank_defconfig` can only be used with boards that have no qualifiers, no variants and a
|
||||
single SoC present, though this style of naming is not recommended due to samples/tests or
|
||||
downstream usage breaking suddenly without warning if a new SoC or board variant/qualifier is
|
||||
added to an board in upstream Zephyr.
|
||||
|
||||
.. note::
|
||||
Multiple files are not merged and there is no fallback mechanism for files, this means if there
|
||||
is a board with 2 different SoCs and each one has 2 board variants, a :file:`plank_defconfig`
|
||||
file would be wholly unused, for the first qualifier and variant
|
||||
:file:`plank_<soc1>_<variant1>_defconfig` will be used, it will not include other file.
|
||||
|
||||
The ``_defconfig`` should contain mandatory settings for your system clock,
|
||||
console, etc. The results are architecture-specific, but typically look
|
||||
|
||||
@@ -26,6 +26,121 @@
|
||||
|
||||
.. _zephyr_4.1:
|
||||
|
||||
.. _zephyr_4.1.1:
|
||||
|
||||
Zephyr 4.1.1
|
||||
############
|
||||
|
||||
This is a bugfix release for Zephyr 4.1.0.
|
||||
|
||||
Security Vulnerability Related
|
||||
******************************
|
||||
|
||||
The following CVEs are addressed by this release:
|
||||
|
||||
* :cve:`2025-27809` `TLS clients may unwittingly skip server authentication
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-03-1/>`_
|
||||
* :cve:`2025-27810` `Potential authentication bypass in TLS handshake
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-03-2/>`_
|
||||
* :cve:`2025-47917` `Misleading memory management in mbedtls_x509_string_to_names()
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-06-7/>`_
|
||||
* :cve:`2025-48965` `NULL pointer dereference after using mbedtls_asn1_store_named_data()
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-06-6/>`_
|
||||
* :cve:`2025-49087` `Timing side-channel in block cipher decryption with PKCS#7 padding
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-06-5/>`_
|
||||
* :cve:`2025-49600` `Unchecked return value in LMS verification allows signature bypass
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-06-3/>`_
|
||||
* :cve:`2025-49601` `Out-of-bounds read in mbedtls_lms_import_public_key()
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-06-4/>`_
|
||||
* :cve:`2025-52496` `Race condition in AESNI support detection
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-06-1/>`_
|
||||
* :cve:`2025-52497` `Heap buffer under-read when parsing PEM-encrypted material
|
||||
<https://mbed-tls.readthedocs.io/en/latest/security-advisories/mbedtls-security-advisory-2025-06-2/>`_
|
||||
|
||||
|
||||
More detailed information can be found in:
|
||||
https://docs.zephyrproject.org/latest/security/vulnerabilities.html
|
||||
|
||||
Issues fixed
|
||||
************
|
||||
|
||||
The following issues are addressed by this release:
|
||||
|
||||
* :github:`70344` - Current Atmel SAM flash driver will not erase last page
|
||||
* :github:`84842` - STM32WBA: zephyr/drivers/flash/flash_stm32wba_fm.c: flash_stm32_erase: unexpected erase size
|
||||
* :github:`85025` - ``tests/drivers/udc/drivers.usb.udc`` fails on rpi_pico
|
||||
* :github:`86062` - ``samples/hello_world`` not working on ``intel_socfpga_agilex_socdk/agilex``
|
||||
* :github:`86609` - net: dhcpv4: deadlock during client stop
|
||||
* :github:`86735` - frdm_mcxw71 Button is not working.
|
||||
* :github:`86776` - drivers: retained_mem: Compilation error when using multiple nRF retained memory regions
|
||||
* :github:`86930` - NXP LPSPI Multi byte word formation incorrect
|
||||
* :github:`86954` - sample.smf.hsm_psicc2 is flaky on SMP platforms
|
||||
* :github:`87108` - tests: tests/drivers/spi/spi_loopback: mimxrt1170_evk@A/mimxrt1176/cm7: failed: Failed harness:'Testsuite failed'
|
||||
* :github:`87223` - net: if: NET_EVENT_L4_CONNECTED is no longer sent when !IPV4_ACD and/or !IPV6_DAD
|
||||
* :github:`87224` - drivers: counter: nrfx_timer: High current consumption after stopping
|
||||
* :github:`87323` - Network buffer leak if sending IPv4 TTL 0 or IPv6 hop limit 0 packets
|
||||
* :github:`87739` - Build of ICE40 bitbang driver fails with PINCTRL
|
||||
* :github:`87947` - Check possible recursion when parsing DNS response
|
||||
* :github:`88109` - drivers: nrfx_uarte: pm_device_runtime_put() locks irqs for >500us
|
||||
* :github:`88233` - net: coap_client library asserts on boot
|
||||
* :github:`88319` - i2c_mcux_lpi2c_rtio does not build for MCXN947
|
||||
* :github:`88434` - mbedtls: update to 3.6.3 in 4.1 branch
|
||||
* :github:`88558` - disco_l475_iot1/arduino_r3_connector.dtsi arduino_i2c should be i2c1
|
||||
* :github:`89295` - drivers: gpio: adp5585: fix wrong output register during pin configure
|
||||
* :github:`89331` - Many threads locking two mutexes can cause a crash
|
||||
* :github:`89342` - mgmt: hawkbit: hawkbit_autohandler_wait() returns to early
|
||||
* :github:`89349` - Flash Map: Bounds checking not immune to integer overflow
|
||||
* :github:`89413` - Zephyr SPDX does not pass validation
|
||||
* :github:`89530` - mgmt: hawkbit: cancellation of action failed
|
||||
* :github:`89641` - net: dhcp: current DHCP client implementation does not follow RFC2131 and RFC2132
|
||||
* :github:`89645` - ASSERTION FAIL: prepare_cb: Actual EVENT_OVERHEAD_START_US
|
||||
* :github:`89990` - STM32: USB: USB CDC IN Endpoint stuck forever after receiving Clear feature - endpoint halt request
|
||||
* :github:`90005` - net: dns: OOB memory write
|
||||
* :github:`90103` - http_server: Wildcard resource matching reports incorrect path length
|
||||
* :github:`90688` - coap_resource_parse_observe returns 0 (register) instead of 1 (deregister) upon a deregister of an unknown observer
|
||||
* :github:`90738` - wrong lvgl initialization sequence
|
||||
* :github:`90777` - arch.shared_interrupt.lto.speed compile fail on apollo5 platform
|
||||
* :github:`90989` - gpio: gpio_adp5585_gpio incorrect gpio gaps handling
|
||||
* :github:`91118` - Zephyr 4.1 Socket zsock_connect() Fails Immediately with errno 116 (ETIMEDOUT)
|
||||
* :github:`91428` - soc_flash_nrf: flash_write timeout after bt_disable
|
||||
* :github:`91799` - Docs version switcher always displays "v: latest"
|
||||
* :github:`92397` - Missing fallback to a base board.dts when using a board variant without specific dts file
|
||||
* :github:`93011` - stm32u0: interrupt vectors for lpuart1 and lpuart2 are wrong in device tree
|
||||
* :github:`93424` - Missing break after IPV6_MULTICAST_IF option processing
|
||||
* :github:`93594` - Nicla Vision has bad I2C address for VL53L1X sensor
|
||||
* :github:`93943` - Unable to build LVGL samples on Ubuntu 22.04 for ``native_sim``
|
||||
* :github:`93986` - Unable to build LVGL with picolibc from source/module
|
||||
* :github:`94047` - Shell device name tab completion for regulator parents show incorrect names
|
||||
* :github:`94924` - mbedtls: update to 3.6.4 in 4.1 branch
|
||||
* :github:`95297` - Links to the pyOCD repository are outdated
|
||||
* :github:`95768` - Possible TCP connection leak when creating a socket
|
||||
* :github:`95850` - Applications do not watch MCUboot key file for changes
|
||||
* :github:`96172` - arch: arm: switch to privilege stack in SVC handler
|
||||
* :github:`96841` - FRDM_K64F: ``arch.arm.user.stack.float`` test fail
|
||||
* :github:`97304` - arch: arm: start threads on privileged stack
|
||||
* :github:`98231` - Build System: defconfig: Base defconfig not used on fully qualified oot board
|
||||
* :github:`98469` - drivers: bluetooth: h4: insufficient buffer for header + payload
|
||||
* :github:`98668` - drivers: entropy: non-stop RNG ISR firing on STM32WB09
|
||||
* :github:`98936` - net: ICMPv4 packets can be reported to ICMPv6 handlers (and vice versa)
|
||||
|
||||
Mbed TLS
|
||||
********
|
||||
|
||||
Mbed TLS was updated to version 3.6.4 (from 3.6.2). The release notes can be found at:
|
||||
https://github.com/Mbed-TLS/mbedtls/releases/tag/mbedtls-3.6.4
|
||||
https://github.com/Mbed-TLS/mbedtls/releases/tag/mbedtls-3.6.3
|
||||
|
||||
Mbed TLS 3.6 is an LTS release that will be supported
|
||||
with security and bug fixes until at least March 2027.
|
||||
|
||||
Trusted Firmware-M (TF-M)
|
||||
*************************
|
||||
|
||||
TF-M was updated to version 2.1.2 (from 2.1.1). The release notes can be found at:
|
||||
https://trustedfirmware-m.readthedocs.io/en/tf-mv2.1.2/releases/2.1.2.html
|
||||
|
||||
.. _zephyr_4.1.0:
|
||||
|
||||
Zephyr 4.1.0
|
||||
############
|
||||
|
||||
|
||||
@@ -312,7 +312,7 @@ static inline void read_payload(const struct device *dev)
|
||||
LOG_DBG("Allocated rx.buf %p", h4->rx.buf);
|
||||
|
||||
buf_tailroom = net_buf_tailroom(h4->rx.buf);
|
||||
if (buf_tailroom < h4->rx.remaining) {
|
||||
if (buf_tailroom < (h4->rx.remaining + h4->rx.hdr_len)) {
|
||||
LOG_ERR("Not enough space in buffer %u/%zu", h4->rx.remaining,
|
||||
buf_tailroom);
|
||||
h4->rx.discard = h4->rx.remaining;
|
||||
|
||||
@@ -109,7 +109,13 @@ static int stop(const struct device *dev)
|
||||
{
|
||||
const struct counter_nrfx_config *config = dev->config;
|
||||
|
||||
#if NRF_TIMER_HAS_SHUTDOWN
|
||||
nrf_timer_task_trigger(config->timer, NRF_TIMER_TASK_SHUTDOWN);
|
||||
#else
|
||||
nrf_timer_task_trigger(config->timer, NRF_TIMER_TASK_STOP);
|
||||
nrf_timer_task_trigger(config->timer, NRF_TIMER_TASK_CLEAR);
|
||||
#endif
|
||||
|
||||
#ifdef COUNTER_ANY_FAST
|
||||
struct counter_nrfx_data *data = dev->data;
|
||||
|
||||
|
||||
@@ -301,9 +301,14 @@ static int recover_seed_error(RNG_TypeDef *rng)
|
||||
{
|
||||
ll_rng_clear_seis(rng);
|
||||
|
||||
#if !defined(CONFIG_SOC_SERIES_STM32WB0X)
|
||||
/* After a noise source error is detected, 12 words must be read from the RNG_DR register
|
||||
* and discarded to restart the entropy generation.
|
||||
*/
|
||||
for (int i = 0; i < 12; ++i) {
|
||||
(void)ll_rng_read_rand_data(rng);
|
||||
}
|
||||
#endif /* !CONFIG_SOC_SERIES_STM32WB0X */
|
||||
|
||||
if (ll_rng_is_active_seis(rng) != 0) {
|
||||
return -EIO;
|
||||
@@ -419,7 +424,7 @@ static uint16_t generate_from_isr(uint8_t *buf, uint16_t len)
|
||||
byte = random_byte_get();
|
||||
#if !IRQLESS_TRNG
|
||||
NVIC_ClearPendingIRQ(IRQN);
|
||||
#endif /* IRQLESS_TRNG */
|
||||
#endif /* !IRQLESS_TRNG */
|
||||
|
||||
if (byte < 0) {
|
||||
continue;
|
||||
|
||||
@@ -56,6 +56,7 @@ static inline void ll_rng_clear_seis(RNG_TypeDef *RNGx)
|
||||
#if defined(CONFIG_SOC_SERIES_STM32WB0X)
|
||||
# if defined(CONFIG_SOC_STM32WB09XX)
|
||||
LL_RNG_SetResetHealthErrorFlags(RNGx, 1);
|
||||
WRITE_REG(RNGx->IRQ_SR, RNG_IRQ_SR_ERROR_IRQ);
|
||||
# else
|
||||
LL_RNG_ClearFlag_FAULT(RNGx);
|
||||
# endif
|
||||
|
||||
@@ -396,17 +396,12 @@ static bool sam_flash_erase_foreach_page(const struct flash_pages_info *info, vo
|
||||
/* Check if we've reached the end of pages to erase */
|
||||
if (info->start_offset >= erase_data->section_end) {
|
||||
/* Succeeded, stop iterating */
|
||||
erase_data->succeeded = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (sam_flash_unlock_page(dev, info) < 0) {
|
||||
/* Failed to unlock page, stop iterating */
|
||||
return false;
|
||||
}
|
||||
|
||||
if (sam_flash_erase_page(dev, info) < 0) {
|
||||
/* Failed to erase page, stop iterating */
|
||||
if (sam_flash_unlock_page(dev, info) || sam_flash_erase_page(dev, info)) {
|
||||
/* Failed to unlock page and erase page, stop iterating */
|
||||
erase_data->succeeded = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -436,7 +431,7 @@ static int sam_flash_erase(const struct device *dev, off_t offset, size_t size)
|
||||
key = k_spin_lock(&sam_data->lock);
|
||||
sam_data->erase_data.section_start = offset;
|
||||
sam_data->erase_data.section_end = offset + size;
|
||||
sam_data->erase_data.succeeded = false;
|
||||
sam_data->erase_data.succeeded = true;
|
||||
flash_page_foreach(dev, sam_flash_erase_foreach_page, sam_data);
|
||||
if (!sam_data->erase_data.succeeded) {
|
||||
k_spin_unlock(&sam_data->lock, key);
|
||||
|
||||
@@ -93,7 +93,7 @@ static int flash_stm32_erase(const struct device *dev, off_t offset,
|
||||
size_t len)
|
||||
{
|
||||
int rc;
|
||||
int sect_num = (len / FLASH_PAGE_SIZE) + 1;
|
||||
int sect_num;
|
||||
|
||||
if (!flash_stm32_valid_range(dev, offset, len, true)) {
|
||||
LOG_ERR("Erase range invalid. Offset: %p, len: %zu",
|
||||
@@ -105,6 +105,9 @@ static int flash_stm32_erase(const struct device *dev, off_t offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* len is a multiple of FLASH_PAGE_SIZE */
|
||||
sect_num = len / FLASH_PAGE_SIZE;
|
||||
|
||||
flash_stm32_sem_take(dev);
|
||||
|
||||
LOG_DBG("Erase offset: %p, page: %ld, len: %zu, sect num: %d",
|
||||
|
||||
@@ -229,7 +229,7 @@ unlock:
|
||||
(void)gpio_pin_configure_dt(&config_bitbang->clk, GPIO_DISCONNECTED);
|
||||
(void)gpio_pin_configure_dt(&config_bitbang->pico, GPIO_DISCONNECTED);
|
||||
#ifdef CONFIG_PINCTRL
|
||||
(void)pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
|
||||
(void)pinctrl_apply_state(config_bitbang->pincfg, PINCTRL_STATE_DEFAULT);
|
||||
#endif /* CONFIG_PINCTRL */
|
||||
|
||||
k_spin_unlock(&data->lock, key);
|
||||
|
||||
@@ -76,8 +76,7 @@ static int gpio_adp5585_config(const struct device *dev, gpio_pin_t pin, gpio_fl
|
||||
uint8_t reg_value;
|
||||
|
||||
/* ADP5585 has non-contiguous gpio pin layouts, account for this */
|
||||
if ((pin & cfg->common.port_pin_mask) == 0) {
|
||||
LOG_ERR("pin %d is invalid for this device", pin);
|
||||
if ((BIT(pin) & cfg->common.port_pin_mask) == 0) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
@@ -143,14 +142,14 @@ static int gpio_adp5585_config(const struct device *dev, gpio_pin_t pin, gpio_fl
|
||||
data->output |= BIT(pin);
|
||||
}
|
||||
if (bank == 0) {
|
||||
/* reg_value for ADP5585_GPO_OUT_MODE */
|
||||
/* reg_value for ADP5585_GPO_DATA_OUT */
|
||||
reg_value = (uint8_t)data->output;
|
||||
} else {
|
||||
/* reg_value for ADP5585_GPO_OUT_MODE */
|
||||
/* reg_value for ADP5585_GPO_DATA_OUT */
|
||||
reg_value = (uint8_t)(data->output >> 8);
|
||||
}
|
||||
ret = i2c_reg_write_byte_dt(&parent_cfg->i2c_bus,
|
||||
ADP5585_GPO_OUT_MODE_A + bank,
|
||||
ADP5585_GPO_DATA_OUT_A + bank,
|
||||
reg_value);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
@@ -194,7 +193,7 @@ static int gpio_adp5585_port_read(const struct device *dev, gpio_port_value_t *v
|
||||
|
||||
/** Read Input Register */
|
||||
|
||||
uint8_t gpi_status_reg;
|
||||
uint8_t gpi_status_reg = ADP5585_GPI_STATUS_A;
|
||||
uint8_t gpi_status_buf[2];
|
||||
|
||||
ret = i2c_write_read_dt(&parent_cfg->i2c_bus, &gpi_status_reg, 1U,
|
||||
@@ -225,6 +224,11 @@ static int gpio_adp5585_port_write(const struct device *dev, gpio_port_pins_t ma
|
||||
uint8_t reg_value;
|
||||
int ret;
|
||||
|
||||
/* ADP5585 has non-contiguous gpio pin layouts, account for this */
|
||||
if ((mask & cfg->common.port_pin_mask) == 0) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
/* Can't do I2C bus operations from an ISR */
|
||||
if (k_is_in_isr()) {
|
||||
return -EWOULDBLOCK;
|
||||
@@ -288,8 +292,7 @@ static int gpio_adp5585_pin_interrupt_configure(const struct device *dev, gpio_p
|
||||
}
|
||||
|
||||
/* ADP5585 has non-contiguous gpio pin layouts, account for this */
|
||||
if ((pin & cfg->common.port_pin_mask) == 0) {
|
||||
LOG_ERR("pin %d is invalid for this device", pin);
|
||||
if ((BIT(pin) & cfg->common.port_pin_mask) == 0) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/irq.h>
|
||||
#include <fsl_lpi2c.h>
|
||||
#if CONFIG_NXP_LP_FLEXCOMM
|
||||
#include <zephyr/drivers/mfd/nxp_lp_flexcomm.h>
|
||||
#endif
|
||||
|
||||
#include <zephyr/drivers/pinctrl.h>
|
||||
|
||||
@@ -280,7 +283,11 @@ static void mcux_lpi2c_isr(const struct device *dev)
|
||||
struct mcux_lpi2c_data *data = dev->data;
|
||||
LPI2C_Type *base = (LPI2C_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
|
||||
|
||||
#if CONFIG_HAS_MCUX_FLEXCOMM
|
||||
LPI2C_MasterTransferHandleIRQ(LPI2C_GetInstance(base), &data->handle);
|
||||
#else
|
||||
LPI2C_MasterTransferHandleIRQ(base, &data->handle);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int mcux_lpi2c_init(const struct device *dev)
|
||||
@@ -346,10 +353,41 @@ static DEVICE_API(i2c, mcux_lpi2c_driver_api) = {
|
||||
#define I2C_MCUX_LPI2C_SDA_INIT(n)
|
||||
#endif /* CONFIG_I2C_MCUX_LPI2C_BUS_RECOVERY */
|
||||
|
||||
#define I2C_MCUX_LPI2C_MODULE_IRQ_CONNECT(n) \
|
||||
do { \
|
||||
IRQ_CONNECT(DT_INST_IRQN(n), \
|
||||
DT_INST_IRQ(n, priority), \
|
||||
mcux_lpi2c_isr, \
|
||||
DEVICE_DT_INST_GET(n), 0); \
|
||||
irq_enable(DT_INST_IRQN(n)); \
|
||||
} while (false)
|
||||
|
||||
#define I2C_MCUX_LPI2C_MODULE_IRQ(n) \
|
||||
IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \
|
||||
(I2C_MCUX_LPI2C_MODULE_IRQ_CONNECT(n)))
|
||||
|
||||
/* When using LP Flexcomm driver, register the interrupt handler
|
||||
* so we receive notification from the LP Flexcomm interrupt handler.
|
||||
*/
|
||||
#define I2C_MCUX_LPI2C_LPFLEXCOMM_IRQ_FUNC(n) \
|
||||
nxp_lp_flexcomm_setirqhandler(DEVICE_DT_GET(DT_INST_PARENT(n)), \
|
||||
DEVICE_DT_INST_GET(n), \
|
||||
LP_FLEXCOMM_PERIPH_LPI2C, \
|
||||
mcux_lpi2c_isr)
|
||||
|
||||
#define I2C_MCUX_LPI2C_IRQ_SETUP_FUNC(n) \
|
||||
COND_CODE_1(DT_NODE_HAS_COMPAT(DT_INST_PARENT(n), \
|
||||
nxp_lp_flexcomm), \
|
||||
(I2C_MCUX_LPI2C_LPFLEXCOMM_IRQ_FUNC(n)), \
|
||||
(I2C_MCUX_LPI2C_MODULE_IRQ(n)))
|
||||
|
||||
#define I2C_MCUX_LPI2C_INIT(n) \
|
||||
PINCTRL_DT_INST_DEFINE(n); \
|
||||
\
|
||||
static void mcux_lpi2c_config_func_##n(const struct device *dev); \
|
||||
static void mcux_lpi2c_config_func_##n(const struct device *dev)\
|
||||
{ \
|
||||
I2C_MCUX_LPI2C_IRQ_SETUP_FUNC(n); \
|
||||
} \
|
||||
\
|
||||
static const struct mcux_lpi2c_config mcux_lpi2c_config_##n = { \
|
||||
DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)), \
|
||||
@@ -378,16 +416,6 @@ static DEVICE_API(i2c, mcux_lpi2c_driver_api) = {
|
||||
&mcux_lpi2c_data_##n, \
|
||||
&mcux_lpi2c_config_##n, POST_KERNEL, \
|
||||
CONFIG_I2C_INIT_PRIORITY, \
|
||||
&mcux_lpi2c_driver_api); \
|
||||
\
|
||||
static void mcux_lpi2c_config_func_##n(const struct device *dev)\
|
||||
{ \
|
||||
IRQ_CONNECT(DT_INST_IRQN(n), \
|
||||
DT_INST_IRQ(n, priority), \
|
||||
mcux_lpi2c_isr, \
|
||||
DEVICE_DT_INST_GET(n), 0); \
|
||||
\
|
||||
irq_enable(DT_INST_IRQN(n)); \
|
||||
}
|
||||
&mcux_lpi2c_driver_api);
|
||||
|
||||
DT_INST_FOREACH_STATUS_OKAY(I2C_MCUX_LPI2C_INIT)
|
||||
|
||||
@@ -527,7 +527,12 @@ static bool device_is_regulator(const struct device *dev)
|
||||
return DEVICE_API_IS(regulator, dev);
|
||||
}
|
||||
|
||||
static void device_name_get(size_t idx, struct shell_static_entry *entry)
|
||||
static bool device_is_regulator_parent(const struct device *dev)
|
||||
{
|
||||
return DEVICE_API_IS(regulator_parent, dev);
|
||||
}
|
||||
|
||||
static void device_name_get_regulator(size_t idx, struct shell_static_entry *entry)
|
||||
{
|
||||
const struct device *dev = shell_device_filter(idx, device_is_regulator);
|
||||
|
||||
@@ -537,7 +542,18 @@ static void device_name_get(size_t idx, struct shell_static_entry *entry)
|
||||
entry->subcmd = NULL;
|
||||
}
|
||||
|
||||
SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get);
|
||||
static void device_name_get_regulator_parent(size_t idx, struct shell_static_entry *entry)
|
||||
{
|
||||
const struct device *dev = shell_device_filter(idx, device_is_regulator_parent);
|
||||
|
||||
entry->syntax = (dev != NULL) ? dev->name : NULL;
|
||||
entry->handler = NULL;
|
||||
entry->help = NULL;
|
||||
entry->subcmd = NULL;
|
||||
}
|
||||
|
||||
SHELL_DYNAMIC_CMD_CREATE(dsub_device_name, device_name_get_regulator);
|
||||
SHELL_DYNAMIC_CMD_CREATE(dsub_device_name_parent, device_name_get_regulator_parent);
|
||||
|
||||
SHELL_STATIC_SUBCMD_SET_CREATE(
|
||||
sub_regulator_cmds,
|
||||
@@ -601,11 +617,11 @@ SHELL_STATIC_SUBCMD_SET_CREATE(
|
||||
"Get errors\n"
|
||||
"Usage: errors <device>",
|
||||
cmd_errors, 2, 0),
|
||||
SHELL_CMD_ARG(dvsset, &dsub_device_name,
|
||||
SHELL_CMD_ARG(dvsset, &dsub_device_name_parent,
|
||||
"Set regulator dynamic voltage scaling state\n"
|
||||
"Usage: dvsset <device> <state identifier>",
|
||||
cmd_dvsset, 3, 0),
|
||||
SHELL_CMD_ARG(shipmode, &dsub_device_name,
|
||||
SHELL_CMD_ARG(shipmode, &dsub_device_name_parent,
|
||||
"Enable regulator ship mode\n"
|
||||
"Usage: shipmode <device>",
|
||||
cmd_shipmode, 2, 0),
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
#define _BUILD_MEM_REGION(node_id) \
|
||||
{.dt_addr = DT_REG_ADDR(DT_PARENT(node_id)),\
|
||||
.dt_size = DT_REG_SIZE(DT_PARENT(node_id))}
|
||||
.dt_size = DT_REG_SIZE(DT_PARENT(node_id))},
|
||||
|
||||
struct ret_mem_region {
|
||||
uintptr_t dt_addr;
|
||||
|
||||
@@ -2506,6 +2506,18 @@ static int uarte_instance_init(const struct device *dev,
|
||||
: UART_CFG_FLOW_CTRL_NONE, \
|
||||
}
|
||||
|
||||
/* Macro determines if PM actions are interrupt safe. They are in case of
|
||||
* asynchronous API (except for instance in fast power domain) and non-asynchronous
|
||||
* API if RX is disabled. Macro must resolve to a literal 1 or 0.
|
||||
*/
|
||||
#define UARTE_PM_ISR_SAFE(idx) \
|
||||
COND_CODE_1(INSTANCE_IS_FAST_PD(_, /*empty*/, idx, _), \
|
||||
(0), \
|
||||
(COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \
|
||||
(PM_DEVICE_ISR_SAFE), \
|
||||
(COND_CODE_1(UARTE_PROP(idx, disable_rx), \
|
||||
(PM_DEVICE_ISR_SAFE), (0)))))) \
|
||||
|
||||
#define UART_NRF_UARTE_DEVICE(idx) \
|
||||
NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(UARTE(idx)); \
|
||||
UARTE_INT_DRIVEN(idx); \
|
||||
@@ -2578,8 +2590,7 @@ static int uarte_instance_init(const struct device *dev,
|
||||
} \
|
||||
\
|
||||
PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action, \
|
||||
COND_CODE_1(INSTANCE_IS_FAST_PD(_, /*empty*/, idx, _),\
|
||||
(0), (PM_DEVICE_ISR_SAFE))); \
|
||||
UARTE_PM_ISR_SAFE(idx)); \
|
||||
\
|
||||
DEVICE_DT_DEFINE(UARTE(idx), \
|
||||
uarte_##idx##_init, \
|
||||
|
||||
@@ -621,7 +621,7 @@ static int uart_ns16550_configure(const struct device *dev,
|
||||
}
|
||||
|
||||
ret = clock_control_on(dev_cfg->clock_dev, dev_cfg->clock_subsys);
|
||||
if (ret != 0 && ret != -EALREADY) {
|
||||
if (ret != 0 && ret != -EALREADY && ret != -ENOSYS) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ static inline uint32_t lpspi_next_tx_word(const struct device *dev, int offset)
|
||||
uint32_t next_word = 0;
|
||||
|
||||
for (uint8_t i = 0; i < num_bytes; i++) {
|
||||
next_word |= *byte << (BITS_PER_BYTE * i);
|
||||
next_word |= byte[i] << (BITS_PER_BYTE * i);
|
||||
}
|
||||
|
||||
return next_word;
|
||||
@@ -238,7 +238,8 @@ static int transceive(const struct device *dev, const struct spi_config *spi_cfg
|
||||
LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base);
|
||||
struct spi_mcux_data *data = dev->data;
|
||||
struct lpspi_driver_data *lpspi_data = (struct lpspi_driver_data *)data->driver_data;
|
||||
int ret;
|
||||
struct spi_context *ctx = &data->ctx;
|
||||
int ret = 0;
|
||||
|
||||
spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
|
||||
|
||||
@@ -246,14 +247,14 @@ static int transceive(const struct device *dev, const struct spi_config *spi_cfg
|
||||
if (lpspi_data->word_size_bytes > 4) {
|
||||
LOG_ERR("Maximum 4 byte word size");
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
goto error;
|
||||
}
|
||||
|
||||
spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, lpspi_data->word_size_bytes);
|
||||
spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, lpspi_data->word_size_bytes);
|
||||
|
||||
ret = spi_mcux_configure(dev, spi_cfg);
|
||||
if (ret) {
|
||||
return ret;
|
||||
goto error;
|
||||
}
|
||||
|
||||
LPSPI_FlushFifo(base, true, true);
|
||||
@@ -261,7 +262,7 @@ static int transceive(const struct device *dev, const struct spi_config *spi_cfg
|
||||
LPSPI_DisableInterrupts(base, (uint32_t)kLPSPI_AllInterruptEnable);
|
||||
|
||||
LOG_DBG("Starting LPSPI transfer");
|
||||
spi_context_cs_control(&data->ctx, true);
|
||||
spi_context_cs_control(ctx, true);
|
||||
|
||||
LPSPI_SetFifoWatermarks(base, 0, 0);
|
||||
LPSPI_Enable(base, true);
|
||||
@@ -277,7 +278,11 @@ static int transceive(const struct device *dev, const struct spi_config *spi_cfg
|
||||
LPSPI_EnableInterrupts(base, (uint32_t)kLPSPI_TxInterruptEnable |
|
||||
(uint32_t)kLPSPI_RxInterruptEnable);
|
||||
|
||||
return spi_context_wait_for_completion(&data->ctx);
|
||||
return spi_context_wait_for_completion(ctx);
|
||||
|
||||
error:
|
||||
spi_context_release(ctx, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spi_mcux_transceive_sync(const struct device *dev, const struct spi_config *spi_cfg,
|
||||
|
||||
@@ -865,6 +865,10 @@ int usb_dc_ep_clear_stall(const uint8_t ep)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ep_state->ep_stalled) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
status = HAL_PCD_EP_ClrStall(&usb_dc_stm32_state.pcd, ep);
|
||||
if (status != HAL_OK) {
|
||||
LOG_ERR("HAL_PCD_EP_ClrStall failed(0x%02x), %d", ep,
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
flash-controller@400e0c00 {
|
||||
flash0: flash@400000 {
|
||||
reg = <0x00400000 DT_SIZE_K(2048)>;
|
||||
erase-blocks = <&eefc 8 2048>, <&eefc 252 8192>;
|
||||
erase-blocks = <&eefc 8 2048>, <&eefc 254 8192>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -229,7 +229,7 @@
|
||||
reg = <0x40008000 0x400>;
|
||||
clocks = <&rcc STM32_CLOCK(APB1, 20U)>;
|
||||
resets = <&rctl STM32_RESET(APB1L, 20U)>;
|
||||
interrupts = <28 0>;
|
||||
interrupts = <29 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
@@ -238,7 +238,7 @@
|
||||
reg = <0x40008400 0x400>;
|
||||
clocks = <&rcc STM32_CLOCK(APB1, 7U)>;
|
||||
resets = <&rctl STM32_RESET(APB1L, 7U)>;
|
||||
interrupts = <29 0>;
|
||||
interrupts = <28 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
||||
@@ -26,6 +26,41 @@
|
||||
#define _EXC_RETURN_SPSEL_Msk (1 << 2)
|
||||
#define _EXC_RETURN_FTYPE_Msk (1 << 4)
|
||||
|
||||
/*
|
||||
* Cortex-M Exception Stack Frame Layouts
|
||||
*
|
||||
* When an exception is taken, the processor automatically pushes
|
||||
* registers to the current stack. The layout depends on whether
|
||||
* the FPU is active.
|
||||
*/
|
||||
|
||||
/* Basic hardware-saved exception stack frame (no FPU context):
|
||||
* R0-R3 (4 x 4B = 16B)
|
||||
* R12 (4B)
|
||||
* LR (4B)
|
||||
* Return address (4B)
|
||||
* RETPSR (4B)
|
||||
*--------------------------
|
||||
* Total: 32 bytes
|
||||
*/
|
||||
#define _EXC_HW_SAVED_BASIC_SF_SIZE (32)
|
||||
#define _EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET (24)
|
||||
#define _EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET (28)
|
||||
|
||||
/* Extended hardware saved stack frame consists of:
|
||||
* R0-R3 (16B)
|
||||
* R12 (4B)
|
||||
* LR (R14) (4B)
|
||||
* Return address (4B)
|
||||
* RETPSR (4B)
|
||||
* S0-S15 (16 x 4B = 64B)
|
||||
* FPSCR (4B)
|
||||
* Reserved (4B)
|
||||
*--------------------------
|
||||
* Total: 104 bytes
|
||||
*/
|
||||
#define _EXC_HW_SAVED_EXTENDED_SF_SIZE (104)
|
||||
|
||||
#else
|
||||
#include <stdint.h>
|
||||
|
||||
|
||||
@@ -128,8 +128,8 @@ struct _thread_arch {
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
uint32_t priv_stack_start;
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
uint32_t priv_stack_end;
|
||||
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
|
||||
uint32_t sp_usr;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -764,7 +764,6 @@ struct bt_bap_stream {
|
||||
void *user_data;
|
||||
|
||||
#if defined(CONFIG_BT_BAP_UNICAST_CLIENT) || defined(__DOXYGEN__)
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
/**
|
||||
* @brief Audio ISO reference
|
||||
*
|
||||
@@ -781,6 +780,7 @@ struct bt_bap_stream {
|
||||
uint16_t _prev_seq_num;
|
||||
#endif /* CONFIG_BT_BAP_DEBUG_STREAM_SEQ_NUM */
|
||||
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
/** Internally used list node */
|
||||
sys_snode_t _node;
|
||||
/** @endcond */
|
||||
|
||||
@@ -128,6 +128,10 @@ static inline int clock_control_on(const struct device *dev,
|
||||
const struct clock_control_driver_api *api =
|
||||
(const struct clock_control_driver_api *)dev->api;
|
||||
|
||||
if (api->on == NULL) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
return api->on(dev, sys);
|
||||
}
|
||||
|
||||
@@ -147,6 +151,10 @@ static inline int clock_control_off(const struct device *dev,
|
||||
const struct clock_control_driver_api *api =
|
||||
(const struct clock_control_driver_api *)dev->api;
|
||||
|
||||
if (api->off == NULL) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
return api->off(dev, sys);
|
||||
}
|
||||
|
||||
|
||||
@@ -401,12 +401,12 @@ static inline char z_log_minimal_level_to_char(int level)
|
||||
#define LOG_FILTER_SLOT_GET(_filters, _id) \
|
||||
((*(_filters) >> LOG_FILTER_SLOT_SHIFT(_id)) & LOG_FILTER_SLOT_MASK)
|
||||
|
||||
#define LOG_FILTER_SLOT_SET(_filters, _id, _filter) \
|
||||
do { \
|
||||
*(_filters) &= ~(LOG_FILTER_SLOT_MASK << \
|
||||
LOG_FILTER_SLOT_SHIFT(_id)); \
|
||||
*(_filters) |= ((_filter) & LOG_FILTER_SLOT_MASK) << \
|
||||
LOG_FILTER_SLOT_SHIFT(_id); \
|
||||
#define LOG_FILTER_SLOT_SET(_filters, _id, _filter) \
|
||||
do { \
|
||||
uint32_t others = *(_filters) & ~(LOG_FILTER_SLOT_MASK << \
|
||||
LOG_FILTER_SLOT_SHIFT(_id)); \
|
||||
*(_filters) = others | (((_filter) & LOG_FILTER_SLOT_MASK) << \
|
||||
LOG_FILTER_SLOT_SHIFT(_id)); \
|
||||
} while (false)
|
||||
|
||||
#define LOG_FILTER_AGGR_SLOT_IDX 0
|
||||
|
||||
@@ -93,6 +93,9 @@ struct net_icmp_ctx {
|
||||
/** Opaque user supplied data */
|
||||
void *user_data;
|
||||
|
||||
/** Address family the handler is registered for */
|
||||
uint8_t family;
|
||||
|
||||
/** ICMP type of the response we are waiting */
|
||||
uint8_t type;
|
||||
|
||||
@@ -157,12 +160,13 @@ struct net_icmp_ping_params {
|
||||
* system.
|
||||
*
|
||||
* @param ctx ICMP context used in this request.
|
||||
* @param family Address family the context is using.
|
||||
* @param type Type of ICMP message we are handling.
|
||||
* @param code Code of ICMP message we are handling.
|
||||
* @param handler Callback function that is called when a response is received.
|
||||
*/
|
||||
int net_icmp_init_ctx(struct net_icmp_ctx *ctx, uint8_t type, uint8_t code,
|
||||
net_icmp_handler_t handler);
|
||||
int net_icmp_init_ctx(struct net_icmp_ctx *ctx, uint8_t family, uint8_t type,
|
||||
uint8_t code, net_icmp_handler_t handler);
|
||||
|
||||
/**
|
||||
* @brief Cleanup the ICMP context structure. This will unregister the ICMP handler
|
||||
|
||||
@@ -699,6 +699,16 @@ bool z_thread_prio_set(struct k_thread *thread, int prio)
|
||||
}
|
||||
|
||||
update_cache(1);
|
||||
} else if (z_is_thread_pending(thread)) {
|
||||
/* Thread is pending, remove it from the waitq
|
||||
* and reinsert it with the new priority to avoid
|
||||
* violating waitq ordering and rb assumptions.
|
||||
*/
|
||||
_wait_q_t *wait_q = pended_on_thread(thread);
|
||||
|
||||
_priq_wait_remove(&wait_q->waitq, thread);
|
||||
thread->base.prio = prio;
|
||||
_priq_wait_add(&wait_q->waitq, thread);
|
||||
} else {
|
||||
thread->base.prio = prio;
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ zephyr_include_directories(${LVGL_DIR}/src/)
|
||||
zephyr_include_directories(include)
|
||||
|
||||
zephyr_compile_definitions(LV_CONF_INCLUDE_SIMPLE=1)
|
||||
zephyr_library_compile_definitions(_POSIX_C_SOURCE=200809L)
|
||||
zephyr_compile_definitions(LV_CONF_PATH="${CMAKE_CURRENT_SOURCE_DIR}/include/lv_conf.h")
|
||||
|
||||
zephyr_library_sources(
|
||||
@@ -250,6 +251,8 @@ zephyr_library_sources(
|
||||
${LVGL_DIR}/src/stdlib/builtin/lv_sprintf_builtin.c
|
||||
${LVGL_DIR}/src/stdlib/builtin/lv_string_builtin.c
|
||||
${LVGL_DIR}/src/stdlib/builtin/lv_tlsf.c
|
||||
${LVGL_DIR}/src/stdlib/clib/lv_string_clib.c
|
||||
${LVGL_DIR}/src/stdlib/clib/lv_sprintf_clib.c
|
||||
|
||||
${LVGL_DIR}/src/stdlib/clib/lv_mem_core_clib.c
|
||||
${LVGL_DIR}/src/stdlib/clib/lv_sprintf_clib.c
|
||||
|
||||
@@ -12,7 +12,9 @@
|
||||
|
||||
/* Memory manager settings */
|
||||
|
||||
#define LV_USE_STDLIB_MALLOC LV_STDLIB_CUSTOM
|
||||
#define LV_USE_STDLIB_MALLOC LV_STDLIB_CUSTOM
|
||||
#define LV_USE_STDLIB_STRING LV_STDLIB_CLIB
|
||||
#define LV_USE_STDLIB_SPRINTF LV_STDLIB_CLIB
|
||||
|
||||
#if defined(CONFIG_LV_Z_MEM_POOL_HEAP_LIB_C)
|
||||
#define LV_STDLIB_INCLUDE "stdlib.h"
|
||||
@@ -26,9 +28,6 @@
|
||||
#define lv_free_core lvgl_free
|
||||
#endif
|
||||
|
||||
/* Misc settings */
|
||||
#define lv_snprintf snprintf
|
||||
#define lv_vsnprintf vsnprintf
|
||||
#define LV_ASSERT_HANDLER __ASSERT_NO_MSG(false);
|
||||
#define LV_ASSERT_HANDLER_INCLUDE "zephyr/sys/__assert.h"
|
||||
|
||||
|
||||
@@ -232,13 +232,13 @@ int lvgl_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
lv_init();
|
||||
lv_tick_set_cb(k_uptime_get_32);
|
||||
|
||||
#if CONFIG_LV_Z_LOG_LEVEL != 0
|
||||
lv_log_register_print_cb(lvgl_log);
|
||||
#endif
|
||||
|
||||
lv_init();
|
||||
lv_tick_set_cb(k_uptime_get_32);
|
||||
|
||||
#ifdef CONFIG_LV_Z_USE_FILESYSTEM
|
||||
lvgl_fs_init();
|
||||
#endif
|
||||
|
||||
@@ -139,11 +139,21 @@ tests:
|
||||
integration_platforms:
|
||||
- qemu_x86
|
||||
sample.net.sockets.echo_server.nsos:
|
||||
harness: console
|
||||
platform_allow:
|
||||
- native_sim
|
||||
- native_sim/native/64
|
||||
extra_args:
|
||||
- EXTRA_CONF_FILE="overlay-nsos.conf"
|
||||
harness_config:
|
||||
type: multi_line
|
||||
ordered: false
|
||||
regex:
|
||||
- "Network connected"
|
||||
- "Waiting for TCP.*IPv4"
|
||||
- "Waiting for TCP.*IPv6"
|
||||
- "Waiting for UDP.*IPv4"
|
||||
- "Waiting for UDP.*IPv6"
|
||||
sample.net.sockets.echo_server.802154.subg:
|
||||
extra_args: EXTRA_CONF_FILE="overlay-802154-subg.conf"
|
||||
platform_allow: beagleconnect_freedom
|
||||
|
||||
@@ -41,7 +41,7 @@ manifest:
|
||||
groups:
|
||||
- optional
|
||||
- name: tf-m-tests
|
||||
revision: 502ea90105ee18f20c78f710e2ba2ded0fc0756e
|
||||
revision: c712761dd5391bf3f38033643d28a736cae89a19
|
||||
path: modules/tee/tf-m/tf-m-tests
|
||||
remote: upstream
|
||||
groups:
|
||||
|
||||
@@ -784,8 +784,18 @@ int ll_init(struct k_sem *sem_rx)
|
||||
|
||||
int ll_deinit(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
ll_reset();
|
||||
return lll_deinit();
|
||||
|
||||
err = lll_deinit();
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = ticker_deinit(TICKER_INSTANCE_ID_CTLR);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void ll_reset(void)
|
||||
|
||||
@@ -2227,8 +2227,8 @@ void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_
|
||||
uint16_t conn_interval_unit_old;
|
||||
uint16_t conn_interval_unit_new;
|
||||
uint32_t ticks_win_offset = 0U;
|
||||
uint16_t conn_interval_old_us;
|
||||
uint16_t conn_interval_new_us;
|
||||
uint32_t conn_interval_old_us;
|
||||
uint32_t conn_interval_new_us;
|
||||
uint32_t ticks_slot_overhead;
|
||||
uint16_t conn_interval_old;
|
||||
uint16_t conn_interval_new;
|
||||
|
||||
@@ -3464,6 +3464,30 @@ uint8_t ticker_init(uint8_t instance_index, uint8_t count_node, void *node,
|
||||
return TICKER_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Deinitialize ticker instance
|
||||
*
|
||||
* @param instance_index Index of ticker instance
|
||||
*/
|
||||
int ticker_deinit(uint8_t instance_index)
|
||||
{
|
||||
struct ticker_instance *instance;
|
||||
|
||||
if (instance_index >= TICKER_INSTANCE_MAX) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
instance = &_instance[instance_index];
|
||||
|
||||
if (instance->ticker_id_head != TICKER_NULL) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
instance->count_node = 0U;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if ticker instance is initialized
|
||||
*
|
||||
|
||||
@@ -171,6 +171,7 @@ uint8_t ticker_init(uint8_t instance_index, uint8_t count_node, void *node,
|
||||
void *user_op, ticker_caller_id_get_cb_t caller_id_get_cb,
|
||||
ticker_sched_cb_t sched_cb,
|
||||
ticker_trigger_set_cb_t trigger_set_cb);
|
||||
int ticker_deinit(uint8_t instance_index);
|
||||
bool ticker_is_initialized(uint8_t instance_index);
|
||||
void ticker_trigger(uint8_t instance_index);
|
||||
void ticker_worker(void *param);
|
||||
|
||||
@@ -288,6 +288,14 @@ void log_core_init(void)
|
||||
if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
|
||||
z_log_runtime_filters_init();
|
||||
}
|
||||
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
uint32_t id;
|
||||
/* As first slot in filtering mask is reserved, backend ID has offset.*/
|
||||
id = LOG_FILTER_FIRST_BACKEND_SLOT_IDX;
|
||||
id += backend - log_backend_get(0);
|
||||
log_backend_id_set(backend, id);
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t activate_foreach_backend(uint32_t mask)
|
||||
@@ -331,12 +339,6 @@ static uint32_t z_log_init(bool blocking, bool can_sleep)
|
||||
int backend_index = 0;
|
||||
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
uint32_t id;
|
||||
/* As first slot in filtering mask is reserved, backend ID has offset.*/
|
||||
id = LOG_FILTER_FIRST_BACKEND_SLOT_IDX;
|
||||
id += backend - log_backend_get(0);
|
||||
log_backend_id_set(backend, id);
|
||||
|
||||
/* Activate autostart backends */
|
||||
if (backend->autostart) {
|
||||
log_backend_init(backend);
|
||||
|
||||
@@ -638,7 +638,7 @@ static int hawkbit_find_cancel_action_id(struct hawkbit_ctl_res *res,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
helper += sizeof("cancelAction/");
|
||||
helper += sizeof("cancelAction/") - 1;
|
||||
|
||||
*cancel_action_id = strtol(helper, NULL, 10);
|
||||
if (*cancel_action_id <= 0) {
|
||||
|
||||
@@ -115,6 +115,8 @@ int hawkbit_autohandler_set_delay(k_timeout_t timeout, bool if_bigger)
|
||||
|
||||
void hawkbit_autohandler(bool auto_reschedule)
|
||||
{
|
||||
k_event_clear(&hawkbit_autohandler_event, UINT32_MAX);
|
||||
|
||||
if (auto_reschedule) {
|
||||
k_work_reschedule(&hawkbit_work_handle, K_NO_WAIT);
|
||||
} else {
|
||||
|
||||
@@ -50,16 +50,22 @@ static sys_slist_t offload_handlers = SYS_SLIST_STATIC_INIT(&offload_handlers);
|
||||
|
||||
#define PKT_WAIT_TIME K_SECONDS(1)
|
||||
|
||||
int net_icmp_init_ctx(struct net_icmp_ctx *ctx, uint8_t type, uint8_t code,
|
||||
net_icmp_handler_t handler)
|
||||
int net_icmp_init_ctx(struct net_icmp_ctx *ctx, uint8_t family, uint8_t type,
|
||||
uint8_t code, net_icmp_handler_t handler)
|
||||
{
|
||||
if (ctx == NULL || handler == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (family != AF_INET && family != AF_INET6) {
|
||||
NET_ERR("Wrong address family");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(ctx, 0, sizeof(struct net_icmp_ctx));
|
||||
|
||||
ctx->handler = handler;
|
||||
ctx->family = family;
|
||||
ctx->type = type;
|
||||
ctx->code = code;
|
||||
|
||||
@@ -511,6 +517,10 @@ static int icmp_call_handlers(struct net_pkt *pkt,
|
||||
k_mutex_lock(&lock, K_FOREVER);
|
||||
|
||||
SYS_SLIST_FOR_EACH_CONTAINER(&handlers, ctx, node) {
|
||||
if (ip_hdr->family != ctx->family) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ctx->type == icmp_hdr->type &&
|
||||
(ctx->code == icmp_hdr->code || ctx->code == 0U)) {
|
||||
/* Do not use a handler that is expecting data from different
|
||||
|
||||
@@ -762,14 +762,15 @@ void net_icmpv4_init(void)
|
||||
static struct net_icmp_ctx ctx;
|
||||
int ret;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV4_ECHO_REQUEST, 0, icmpv4_handle_echo_request);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET, NET_ICMPV4_ECHO_REQUEST, 0,
|
||||
icmpv4_handle_echo_request);
|
||||
if (ret < 0) {
|
||||
NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV4_ECHO_REQUEST),
|
||||
ret);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NET_IPV4_PMTU)
|
||||
ret = net_icmp_init_ctx(&dst_unreach_ctx, NET_ICMPV4_DST_UNREACH, 0,
|
||||
ret = net_icmp_init_ctx(&dst_unreach_ctx, AF_INET, NET_ICMPV4_DST_UNREACH, 0,
|
||||
icmpv4_handle_dst_unreach);
|
||||
if (ret < 0) {
|
||||
NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV4_DST_UNREACH),
|
||||
|
||||
@@ -384,7 +384,8 @@ void net_icmpv6_init(void)
|
||||
static struct net_icmp_ctx ctx;
|
||||
int ret;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV6_ECHO_REQUEST, 0, icmpv6_handle_echo_request);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET6, NET_ICMPV6_ECHO_REQUEST, 0,
|
||||
icmpv6_handle_echo_request);
|
||||
if (ret < 0) {
|
||||
NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV6_ECHO_REQUEST),
|
||||
ret);
|
||||
|
||||
@@ -138,13 +138,18 @@ void net_ipv4_autoconf_start(struct net_if *iface)
|
||||
void net_ipv4_autoconf_reset(struct net_if *iface)
|
||||
{
|
||||
struct net_if_config *cfg;
|
||||
struct net_if_addr *ifaddr;
|
||||
struct net_if *ret;
|
||||
|
||||
cfg = net_if_get_config(iface);
|
||||
if (!cfg) {
|
||||
return;
|
||||
}
|
||||
|
||||
net_if_ipv4_addr_rm(iface, &cfg->ipv4auto.requested_ip);
|
||||
ifaddr = net_if_ipv4_addr_lookup(&cfg->ipv4auto.requested_ip, &ret);
|
||||
if (ifaddr != NULL && ret == iface) {
|
||||
net_if_ipv4_addr_rm(iface, &cfg->ipv4auto.requested_ip);
|
||||
}
|
||||
|
||||
NET_DBG("Autoconf reset for %p", iface);
|
||||
}
|
||||
|
||||
@@ -461,7 +461,7 @@ void net_ipv6_mld_init(void)
|
||||
static struct net_icmp_ctx ctx;
|
||||
int ret;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV6_MLD_QUERY, 0, handle_mld_query);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET6, NET_ICMPV6_MLD_QUERY, 0, handle_mld_query);
|
||||
if (ret < 0) {
|
||||
NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV6_MLD_QUERY),
|
||||
ret);
|
||||
|
||||
@@ -2865,13 +2865,13 @@ void net_ipv6_nbr_init(void)
|
||||
int ret;
|
||||
|
||||
#if defined(CONFIG_NET_IPV6_NBR_CACHE)
|
||||
ret = net_icmp_init_ctx(&ns_ctx, NET_ICMPV6_NS, 0, handle_ns_input);
|
||||
ret = net_icmp_init_ctx(&ns_ctx, AF_INET6, NET_ICMPV6_NS, 0, handle_ns_input);
|
||||
if (ret < 0) {
|
||||
NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV6_NS),
|
||||
ret);
|
||||
}
|
||||
|
||||
ret = net_icmp_init_ctx(&na_ctx, NET_ICMPV6_NA, 0, handle_na_input);
|
||||
ret = net_icmp_init_ctx(&na_ctx, AF_INET6, NET_ICMPV6_NA, 0, handle_na_input);
|
||||
if (ret < 0) {
|
||||
NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV6_NA),
|
||||
ret);
|
||||
@@ -2880,7 +2880,7 @@ void net_ipv6_nbr_init(void)
|
||||
k_work_init_delayable(&ipv6_ns_reply_timer, ipv6_ns_reply_timeout);
|
||||
#endif
|
||||
#if defined(CONFIG_NET_IPV6_ND)
|
||||
ret = net_icmp_init_ctx(&ra_ctx, NET_ICMPV6_RA, 0, handle_ra_input);
|
||||
ret = net_icmp_init_ctx(&ra_ctx, AF_INET6, NET_ICMPV6_RA, 0, handle_ra_input);
|
||||
if (ret < 0) {
|
||||
NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV6_RA),
|
||||
ret);
|
||||
@@ -2891,7 +2891,7 @@ void net_ipv6_nbr_init(void)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NET_IPV6_PMTU)
|
||||
ret = net_icmp_init_ctx(&ptb_ctx, NET_ICMPV6_PACKET_TOO_BIG, 0, handle_ptb_input);
|
||||
ret = net_icmp_init_ctx(&ptb_ctx, AF_INET6, NET_ICMPV6_PACKET_TOO_BIG, 0, handle_ptb_input);
|
||||
if (ret < 0) {
|
||||
NET_ERR("Cannot register %s handler (%d)", STRINGIFY(NET_ICMPV6_PACKET_TOO_BIG),
|
||||
ret);
|
||||
|
||||
@@ -616,6 +616,13 @@ int net_context_get(sa_family_t family, enum net_sock_type type, uint16_t proto,
|
||||
k_sem_give(&contexts_lock);
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret == -EADDRINUSE &&
|
||||
!net_if_is_ip_offloaded(net_if_get_default()) &&
|
||||
proto == IPPROTO_TCP) {
|
||||
/* Free the TCP context that we allocated earlier */
|
||||
net_tcp_put(&contexts[i]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -215,7 +215,8 @@ static inline int check_ip(struct net_pkt *pkt)
|
||||
family = net_pkt_family(pkt);
|
||||
ret = 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
|
||||
if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
|
||||
net_pkt_ll_proto_type(pkt) == NET_ETH_PTYPE_IPV6) {
|
||||
/* Drop IPv6 packet if hop limit is 0 */
|
||||
if (NET_IPV6_HDR(pkt)->hop_limit == 0) {
|
||||
NET_DBG("DROP: IPv6 hop limit");
|
||||
@@ -288,7 +289,8 @@ static inline int check_ip(struct net_pkt *pkt)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
|
||||
} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
|
||||
net_pkt_ll_proto_type(pkt) == NET_ETH_PTYPE_IP) {
|
||||
/* Drop IPv4 packet if ttl is 0 */
|
||||
if (NET_IPV4_HDR(pkt)->ttl == 0) {
|
||||
NET_DBG("DROP: IPv4 ttl");
|
||||
@@ -392,6 +394,7 @@ int net_send_data(struct net_pkt *pkt)
|
||||
* we just silently drop the packet by returning 0.
|
||||
*/
|
||||
if (status == -ENOMSG) {
|
||||
net_pkt_unref(pkt);
|
||||
ret = 0;
|
||||
goto err;
|
||||
}
|
||||
|
||||
@@ -1469,12 +1469,7 @@ static inline void iface_ipv6_dad_init(void)
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void net_if_ipv6_start_dad(struct net_if *iface,
|
||||
struct net_if_addr *ifaddr)
|
||||
{
|
||||
ifaddr->addr_state = NET_ADDR_PREFERRED;
|
||||
}
|
||||
|
||||
#define net_if_ipv6_start_dad(...)
|
||||
#define iface_ipv6_dad_init(...)
|
||||
#endif /* CONFIG_NET_IPV6_DAD */
|
||||
|
||||
@@ -1816,11 +1811,7 @@ static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
|
||||
}
|
||||
#else /* CONFIG_NET_NATIVE_IPV6 */
|
||||
#define address_start_timer(...)
|
||||
static inline void net_if_ipv6_start_dad(struct net_if *iface,
|
||||
struct net_if_addr *ifaddr)
|
||||
{
|
||||
ifaddr->addr_state = NET_ADDR_PREFERRED;
|
||||
}
|
||||
#define net_if_ipv6_start_dad(...)
|
||||
#define join_mcast_nodes(...)
|
||||
#endif /* CONFIG_NET_NATIVE_IPV6 */
|
||||
|
||||
@@ -2037,7 +2028,8 @@ struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
|
||||
net_sprint_ipv6_addr(addr),
|
||||
net_addr_type2str(addr_type));
|
||||
|
||||
if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
|
||||
if (IS_ENABLED(CONFIG_NET_IPV6_DAD) &&
|
||||
!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
|
||||
!net_ipv6_is_addr_loopback(addr) &&
|
||||
!net_if_flag_is_set(iface, NET_IF_IPV6_NO_ND)) {
|
||||
/* The groups are joined without locks held */
|
||||
@@ -4285,6 +4277,11 @@ void net_if_ipv4_acd_failed(struct net_if *iface, struct net_if_addr *ifaddr)
|
||||
|
||||
void net_if_ipv4_start_acd(struct net_if *iface, struct net_if_addr *ifaddr)
|
||||
{
|
||||
if ((l2_flags_get(iface) & NET_L2_POINT_TO_POINT) ||
|
||||
net_ipv4_is_addr_loopback(&ifaddr->address.in_addr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
ifaddr->addr_state = NET_ADDR_TENTATIVE;
|
||||
|
||||
if (net_if_is_up(iface)) {
|
||||
@@ -4369,13 +4366,7 @@ out:
|
||||
net_if_unlock(iface);
|
||||
}
|
||||
#else
|
||||
void net_if_ipv4_start_acd(struct net_if *iface, struct net_if_addr *ifaddr)
|
||||
{
|
||||
ARG_UNUSED(iface);
|
||||
|
||||
ifaddr->addr_state = NET_ADDR_PREFERRED;
|
||||
}
|
||||
|
||||
#define net_if_ipv4_start_acd(...)
|
||||
#define net_if_start_acd(...)
|
||||
#endif /* CONFIG_NET_IPV4_ACD */
|
||||
|
||||
@@ -4388,6 +4379,7 @@ struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
|
||||
struct net_if_addr *ifaddr = NULL;
|
||||
struct net_if_addr_ipv4 *cur;
|
||||
struct net_if_ipv4 *ipv4;
|
||||
bool do_acd = false;
|
||||
int idx;
|
||||
|
||||
net_if_lock(iface);
|
||||
@@ -4456,10 +4448,11 @@ struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
|
||||
net_sprint_ipv4_addr(addr),
|
||||
net_addr_type2str(addr_type));
|
||||
|
||||
if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
|
||||
if (IS_ENABLED(CONFIG_NET_IPV4_ACD) &&
|
||||
!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT) &&
|
||||
!net_ipv4_is_addr_loopback(addr)) {
|
||||
/* ACD is started after the lock is released. */
|
||||
;
|
||||
do_acd = true;
|
||||
} else {
|
||||
ifaddr->addr_state = NET_ADDR_PREFERRED;
|
||||
}
|
||||
@@ -4472,7 +4465,9 @@ struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
|
||||
|
||||
net_if_unlock(iface);
|
||||
|
||||
net_if_ipv4_start_acd(iface, ifaddr);
|
||||
if (do_acd) {
|
||||
net_if_ipv4_start_acd(iface, ifaddr);
|
||||
}
|
||||
|
||||
return ifaddr;
|
||||
}
|
||||
|
||||
@@ -1121,6 +1121,10 @@ struct coap_client_option coap_client_option_initial_block2(void)
|
||||
return block2;
|
||||
}
|
||||
|
||||
#define COAP_CLIENT_THREAD_PRIORITY CLAMP(CONFIG_COAP_CLIENT_THREAD_PRIORITY, \
|
||||
K_HIGHEST_APPLICATION_THREAD_PRIO, \
|
||||
K_LOWEST_APPLICATION_THREAD_PRIO)
|
||||
|
||||
K_THREAD_DEFINE(coap_client_recv_thread, CONFIG_COAP_CLIENT_STACK_SIZE,
|
||||
coap_client_recv, NULL, NULL, NULL,
|
||||
CONFIG_COAP_CLIENT_THREAD_PRIORITY, 0, 0);
|
||||
COAP_CLIENT_THREAD_PRIORITY, 0, 0);
|
||||
|
||||
@@ -697,6 +697,12 @@ int coap_resource_parse_observe(struct coap_resource *resource, const struct coa
|
||||
ret = coap_service_remove_observer(service, resource, addr, token, tkl);
|
||||
if (ret < 0) {
|
||||
LOG_WRN("Failed to remove observer (%d)", ret);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
/* Observer not found */
|
||||
ret = -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -544,8 +544,6 @@ static uint32_t dhcpv4_send_request(struct net_if *iface)
|
||||
struct net_pkt *pkt = NULL;
|
||||
uint32_t timeout = UINT32_MAX;
|
||||
|
||||
iface->config.dhcpv4.xid++;
|
||||
|
||||
switch (iface->config.dhcpv4.state) {
|
||||
case NET_DHCPV4_DISABLED:
|
||||
case NET_DHCPV4_INIT:
|
||||
@@ -978,6 +976,13 @@ static bool dhcpv4_parse_options(struct net_pkt *pkt,
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (type == DHCPV4_OPTIONS_PAD) {
|
||||
/* Pad option has a fixed 1-byte length and should be
|
||||
* ignored.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
if (net_pkt_read_u8(pkt, &length)) {
|
||||
NET_ERR("option parsing, bad length");
|
||||
return false;
|
||||
@@ -1555,6 +1560,11 @@ static void dhcpv4_iface_event_handler(struct net_mgmt_event_callback *cb,
|
||||
{
|
||||
sys_snode_t *node = NULL;
|
||||
|
||||
if (mgmt_event != NET_EVENT_IF_UP &&
|
||||
mgmt_event != NET_EVENT_IF_DOWN) {
|
||||
return;
|
||||
}
|
||||
|
||||
k_mutex_lock(&lock, K_FOREVER);
|
||||
|
||||
SYS_SLIST_FOR_EACH_NODE(&dhcpv4_ifaces, node) {
|
||||
@@ -1602,6 +1612,16 @@ static void dhcpv4_acd_event_handler(struct net_mgmt_event_callback *cb,
|
||||
sys_snode_t *node = NULL;
|
||||
struct in_addr *addr;
|
||||
|
||||
if (mgmt_event != NET_EVENT_IPV4_ACD_FAILED &&
|
||||
mgmt_event != NET_EVENT_IPV4_ACD_CONFLICT) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cb->info_length != sizeof(struct in_addr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
addr = (struct in_addr *)cb->info;
|
||||
|
||||
k_mutex_lock(&lock, K_FOREVER);
|
||||
|
||||
@@ -1615,17 +1635,6 @@ static void dhcpv4_acd_event_handler(struct net_mgmt_event_callback *cb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mgmt_event != NET_EVENT_IPV4_ACD_FAILED &&
|
||||
mgmt_event != NET_EVENT_IPV4_ACD_CONFLICT) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cb->info_length != sizeof(struct in_addr)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
addr = (struct in_addr *)cb->info;
|
||||
|
||||
if (!net_ipv4_addr_cmp(&iface->config.dhcpv4.requested_ip, addr)) {
|
||||
goto out;
|
||||
}
|
||||
@@ -1676,8 +1685,11 @@ const char *net_dhcpv4_msg_type_name(enum net_dhcpv4_msg_type msg_type)
|
||||
"inform"
|
||||
};
|
||||
|
||||
__ASSERT_NO_MSG(msg_type >= 1 && msg_type <= sizeof(name));
|
||||
return name[msg_type - 1];
|
||||
if (msg_type >= 1 && msg_type <= sizeof(name)) {
|
||||
return name[msg_type - 1];
|
||||
}
|
||||
|
||||
return "invalid";
|
||||
}
|
||||
|
||||
static void dhcpv4_start_internal(struct net_if *iface, bool first_start)
|
||||
|
||||
@@ -51,6 +51,7 @@ struct dhcp_msg {
|
||||
#define DHCPV4_SERVER_PORT 67
|
||||
#define DHCPV4_CLIENT_PORT 68
|
||||
|
||||
#define DHCPV4_OPTIONS_PAD 0
|
||||
#define DHCPV4_OPTIONS_SUBNET_MASK 1
|
||||
#define DHCPV4_OPTIONS_ROUTER 3
|
||||
#define DHCPV4_OPTIONS_DNS_SERVER 6
|
||||
|
||||
@@ -879,7 +879,7 @@ out:
|
||||
|
||||
static int dhcpv4_server_probing_init(struct dhcpv4_server_ctx *ctx)
|
||||
{
|
||||
return net_icmp_init_ctx(&ctx->probe_ctx.icmp_ctx,
|
||||
return net_icmp_init_ctx(&ctx->probe_ctx.icmp_ctx, AF_INET,
|
||||
NET_ICMPV4_ECHO_REPLY, 0,
|
||||
echo_reply_handler);
|
||||
}
|
||||
|
||||
@@ -345,6 +345,10 @@ int dns_dispatcher_unregister(struct dns_socket_dispatcher *ctx)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ctx->fds[i].fd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
dispatch_table[ctx->fds[i].fd].ctx = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <zephyr/sys/bitarray.h>
|
||||
#include <zephyr/net/dns_resolve.h>
|
||||
#include <zephyr/net_buf.h>
|
||||
|
||||
#include "dns_pack.h"
|
||||
@@ -367,10 +369,11 @@ int dns_unpack_response_query(struct dns_msg_t *dns_msg)
|
||||
int dns_copy_qname(uint8_t *buf, uint16_t *len, uint16_t size,
|
||||
struct dns_msg_t *dns_msg, uint16_t pos)
|
||||
{
|
||||
SYS_BITARRAY_DEFINE(visited, DNS_RESOLVER_MAX_BUF_SIZE);
|
||||
uint16_t msg_size = dns_msg->msg_size;
|
||||
uint8_t *msg = dns_msg->msg;
|
||||
uint16_t lb_size;
|
||||
int rc = -EINVAL;
|
||||
int rc = -EINVAL, ret, prev;
|
||||
|
||||
*len = 0U;
|
||||
|
||||
@@ -383,7 +386,7 @@ int dns_copy_qname(uint8_t *buf, uint16_t *len, uint16_t size,
|
||||
lb_size = msg[pos];
|
||||
|
||||
/* pointer */
|
||||
if (lb_size > DNS_LABEL_MAX_SIZE) {
|
||||
if ((lb_size & NS_CMPRSFLGS) == NS_CMPRSFLGS) {
|
||||
uint8_t mask = DNS_LABEL_MAX_SIZE;
|
||||
|
||||
if (pos + 1 >= msg_size) {
|
||||
@@ -394,7 +397,21 @@ int dns_copy_qname(uint8_t *buf, uint16_t *len, uint16_t size,
|
||||
/* See: RFC 1035, 4.1.4. Message compression */
|
||||
pos = ((msg[pos] & mask) << 8) + msg[pos + 1];
|
||||
|
||||
ret = sys_bitarray_test_and_set_bit(&visited, pos, &prev);
|
||||
if (ret < 0) {
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (prev) {
|
||||
rc = -ELOOP;
|
||||
break;
|
||||
}
|
||||
|
||||
continue;
|
||||
} else if (lb_size & NS_CMPRSFLGS) {
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* validate that the label (i.e. size + elements),
|
||||
@@ -484,7 +501,7 @@ static int dns_unpack_name(const uint8_t *msg, int maxlen, const uint8_t *src,
|
||||
}
|
||||
|
||||
while ((val = *curr_src++)) {
|
||||
if (val & NS_CMPRSFLGS) {
|
||||
if ((val & NS_CMPRSFLGS) == NS_CMPRSFLGS) {
|
||||
/* Follow pointer */
|
||||
int pos;
|
||||
|
||||
|
||||
@@ -163,7 +163,8 @@ config HTTP_SERVER_WEBSOCKET
|
||||
|
||||
config HTTP_SERVER_RESOURCE_WILDCARD
|
||||
bool "Allow wildcard matching of resources"
|
||||
select FNMATCH
|
||||
# The POSIX_C_LIB_EXT will get fnmatch() support
|
||||
select POSIX_C_LIB_EXT
|
||||
help
|
||||
Allow user to specify wildcards when setting up resource strings.
|
||||
This means that instead of specifying multiple resources with exact
|
||||
|
||||
@@ -762,7 +762,7 @@ struct http_resource_detail *get_resource_detail(const struct http_service_desc
|
||||
|
||||
ret = fnmatch(resource->resource, path, (FNM_PATHNAME | FNM_LEADING_DIR));
|
||||
if (ret == 0) {
|
||||
*path_len = strlen(resource->resource);
|
||||
*path_len = path_len_without_query(path);
|
||||
return resource->detail;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -456,7 +456,7 @@ static int cmd_net_ping(const struct shell *sh, size_t argc, char *argv[])
|
||||
net_addr_pton(AF_INET6, host, &ping_ctx.addr6.sin6_addr) == 0) {
|
||||
ping_ctx.addr6.sin6_family = AF_INET6;
|
||||
|
||||
ret = net_icmp_init_ctx(&ping_ctx.icmp, NET_ICMPV6_ECHO_REPLY, 0,
|
||||
ret = net_icmp_init_ctx(&ping_ctx.icmp, AF_INET6, NET_ICMPV6_ECHO_REPLY, 0,
|
||||
handle_ipv6_echo_reply);
|
||||
if (ret < 0) {
|
||||
PR_WARNING("Cannot initialize ICMP context for %s\n", "IPv6");
|
||||
@@ -466,7 +466,7 @@ static int cmd_net_ping(const struct shell *sh, size_t argc, char *argv[])
|
||||
net_addr_pton(AF_INET, host, &ping_ctx.addr4.sin_addr) == 0) {
|
||||
ping_ctx.addr4.sin_family = AF_INET;
|
||||
|
||||
ret = net_icmp_init_ctx(&ping_ctx.icmp, NET_ICMPV4_ECHO_REPLY, 0,
|
||||
ret = net_icmp_init_ctx(&ping_ctx.icmp, AF_INET, NET_ICMPV4_ECHO_REPLY, 0,
|
||||
handle_ipv4_echo_reply);
|
||||
if (ret < 0) {
|
||||
PR_WARNING("Cannot initialize ICMP context for %s\n", "IPv4");
|
||||
|
||||
@@ -2068,6 +2068,8 @@ int zsock_getsockopt_ctx(struct net_context *ctx, int level, int optname,
|
||||
return 0;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case IPV6_MULTICAST_HOPS:
|
||||
ret = net_context_get_option(ctx,
|
||||
NET_OPT_MCAST_HOP_LIMIT,
|
||||
|
||||
@@ -679,7 +679,7 @@ static void send_ping(const struct shell *sh,
|
||||
struct net_icmp_ctx ctx;
|
||||
int ret;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV6_ECHO_REPLY, 0, ping_handler);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET6, NET_ICMPV6_ECHO_REPLY, 0, ping_handler);
|
||||
if (ret < 0) {
|
||||
shell_fprintf(sh, SHELL_WARNING, "Cannot send ping (%d)\n", ret);
|
||||
return;
|
||||
|
||||
@@ -33,7 +33,7 @@ static inline struct flash_area const *get_flash_area_from_id(int idx)
|
||||
static inline bool is_in_flash_area_bounds(const struct flash_area *fa,
|
||||
off_t off, size_t len)
|
||||
{
|
||||
return (off >= 0) && ((off + len) <= fa->fa_size);
|
||||
return (off >= 0) && (off < fa->fa_size) && (len <= (fa->fa_size - off));
|
||||
}
|
||||
|
||||
#endif /* ZEPHYR_SUBSYS_STORAGE_FLASH_MAP_PRIV_H_ */
|
||||
|
||||
8
tests/arch/arm/arm_user_stack_test/CMakeLists.txt
Normal file
8
tests/arch/arm/arm_user_stack_test/CMakeLists.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
cmake_minimum_required(VERSION 3.20.0)
|
||||
|
||||
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
|
||||
project(arm_user_stack_test)
|
||||
|
||||
target_sources(app PRIVATE src/main.c)
|
||||
@@ -0,0 +1 @@
|
||||
CONFIG_ARM_MPU=y
|
||||
@@ -0,0 +1 @@
|
||||
CONFIG_ARM_MPU=y
|
||||
1
tests/arch/arm/arm_user_stack_test/prj.conf
Normal file
1
tests/arch/arm/arm_user_stack_test/prj.conf
Normal file
@@ -0,0 +1 @@
|
||||
CONFIG_ZTEST=y
|
||||
131
tests/arch/arm/arm_user_stack_test/src/main.c
Normal file
131
tests/arch/arm/arm_user_stack_test/src/main.c
Normal file
@@ -0,0 +1,131 @@
|
||||
/*
|
||||
* Copyright The Zephyr Project Contributors
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_FPU_SHARING
|
||||
#include <math.h>
|
||||
#endif
|
||||
#include <zephyr/ztest.h>
|
||||
#include <zephyr/ztest_error_hook.h>
|
||||
#include <zephyr/syscall_list.h>
|
||||
|
||||
struct k_thread th0, th1;
|
||||
K_THREAD_STACK_DEFINE(stk0, 2048);
|
||||
K_THREAD_STACK_DEFINE(stk1, 2048);
|
||||
|
||||
ZTEST_BMEM int attack_stack[128];
|
||||
ZTEST_BMEM uint64_t sys_ret; /* 64 syscalls take result address in r0 */
|
||||
|
||||
volatile int kernel_secret;
|
||||
volatile int *const attack_sp = &attack_stack[128];
|
||||
const int sysno = K_SYSCALL_K_UPTIME_TICKS;
|
||||
k_tid_t low_tid, hi_tid;
|
||||
|
||||
struct k_timer timer;
|
||||
volatile ZTEST_BMEM uint64_t hi_thread_runs, test_completed;
|
||||
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
|
||||
{
|
||||
test_completed = 1;
|
||||
k_timer_stop(&timer);
|
||||
ztest_test_pass();
|
||||
k_thread_abort(low_tid);
|
||||
|
||||
/* This check is to handle a case where low prio thread has started and
|
||||
* resulted in a fault while changing the sp but
|
||||
* the high prio thread is not created yet
|
||||
*/
|
||||
if (hi_tid) {
|
||||
k_thread_abort(hi_tid);
|
||||
}
|
||||
}
|
||||
|
||||
static void timeout_handler(struct k_timer *timer)
|
||||
{
|
||||
if (!test_completed) {
|
||||
|
||||
printf("hi_thread_runs: %lld\n", hi_thread_runs);
|
||||
/* the timer times out after 120s,
|
||||
* by then hi_fn would have ran multiple times so
|
||||
* compare against a random number like 1000 to make sure that
|
||||
* hi_fn actually ran for a while
|
||||
*/
|
||||
if (hi_thread_runs > 1000) {
|
||||
ztest_test_pass();
|
||||
} else {
|
||||
ztest_test_fail();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void attack_entry(void)
|
||||
{
|
||||
printf("Call %s from %s\n", __func__, k_is_user_context() ? "user" : "kernel");
|
||||
/* kernel_secret can only be updated in privilege mode so updating it here should result in
|
||||
* a fault. If it doesn't we fail the test.
|
||||
*/
|
||||
kernel_secret = 1;
|
||||
|
||||
printf("Changed the kernel_secret so marking test as failed\n");
|
||||
ztest_test_fail();
|
||||
|
||||
k_thread_abort(low_tid);
|
||||
k_thread_abort(hi_tid);
|
||||
}
|
||||
|
||||
void low_fn(void *arg1, void *arg2, void *arg3)
|
||||
{
|
||||
#ifdef CONFIG_FPU_SHARING
|
||||
double x = 1.2345;
|
||||
double y = 6.789;
|
||||
|
||||
/* some random fp stuff so that an extended stack frame is saved on svc */
|
||||
zassert_equal(x, 1.2345);
|
||||
zassert_equal(y, 6.789);
|
||||
#endif
|
||||
printf("Call %s from %s\n", __func__, k_is_user_context() ? "user" : "kernel");
|
||||
attack_stack[0] = 1;
|
||||
__asm__ volatile("mov sp, %0;"
|
||||
"1:;"
|
||||
"ldr r0, =sys_ret;"
|
||||
"ldr r6, =sysno;"
|
||||
"ldr r6, [r6];"
|
||||
"svc 3;"
|
||||
"b 1b;" ::"r"(attack_sp));
|
||||
}
|
||||
|
||||
void hi_fn(void *arg1, void *arg2, void *arg3)
|
||||
{
|
||||
printf("Call %s from %s\n", __func__, k_is_user_context() ? "user" : "kernel");
|
||||
while (1) {
|
||||
attack_sp[-2] = (int)attack_entry;
|
||||
k_msleep(1);
|
||||
hi_thread_runs++;
|
||||
}
|
||||
}
|
||||
|
||||
ZTEST(arm_user_stack_test, test_arm_user_stack_corruption)
|
||||
{
|
||||
k_timer_init(&timer, timeout_handler, NULL);
|
||||
k_timer_start(&timer, K_SECONDS(120), K_NO_WAIT);
|
||||
|
||||
low_tid = k_thread_create(&th0, stk0, K_THREAD_STACK_SIZEOF(stk0), low_fn, NULL, NULL, NULL,
|
||||
2,
|
||||
#ifdef CONFIG_FPU_SHARING
|
||||
K_INHERIT_PERMS | K_USER | K_FP_REGS,
|
||||
#else
|
||||
K_INHERIT_PERMS | K_USER,
|
||||
#endif
|
||||
K_NO_WAIT);
|
||||
|
||||
k_msleep(6); /* let low_fn start looping */
|
||||
hi_tid = k_thread_create(&th1, stk1, K_THREAD_STACK_SIZEOF(stk1), hi_fn, NULL, NULL, NULL,
|
||||
1, K_INHERIT_PERMS | K_USER, K_NO_WAIT);
|
||||
|
||||
k_thread_join(&th0, K_FOREVER);
|
||||
k_thread_join(&th1, K_FOREVER);
|
||||
}
|
||||
|
||||
ZTEST_SUITE(arm_user_stack_test, NULL, NULL, NULL, NULL, NULL);
|
||||
18
tests/arch/arm/arm_user_stack_test/testcase.yaml
Normal file
18
tests/arch/arm/arm_user_stack_test/testcase.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
common:
|
||||
tags:
|
||||
- arm
|
||||
timeout: 120
|
||||
# TODO: remove the platform_exclude once the issue with MPS2/AN385 is fixed.
|
||||
platform_exclude:
|
||||
- mps2/an385
|
||||
tests:
|
||||
arch.arm.user.stack:
|
||||
filter: CONFIG_CPU_CORTEX_M
|
||||
extra_configs:
|
||||
- CONFIG_USERSPACE=y
|
||||
arch.arm.user.stack.float:
|
||||
filter: CONFIG_CPU_CORTEX_M and CONFIG_CPU_HAS_FPU
|
||||
extra_configs:
|
||||
- CONFIG_USERSPACE=y
|
||||
- CONFIG_FPU=y
|
||||
- CONFIG_FPU_SHARING=y
|
||||
@@ -97,7 +97,7 @@ ZTEST(ethernet, test_icmp_check)
|
||||
gw_addr_4 = net_if_ipv4_get_gw(iface);
|
||||
zassert_not_equal(gw_addr_4.s_addr, 0, "Gateway address is not set");
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV4_ECHO_REPLY, 0, icmp_event);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET, NET_ICMPV4_ECHO_REPLY, 0, icmp_event);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
dst4.sin_family = AF_INET;
|
||||
|
||||
@@ -282,7 +282,7 @@ ZTEST(wifi, test_2_icmp)
|
||||
gw_addr_4 = net_if_ipv4_get_gw(wifi_ctx.iface);
|
||||
zassert_not_equal(gw_addr_4.s_addr, 0, "Gateway address is not set");
|
||||
|
||||
ret = net_icmp_init_ctx(&icmp_ctx, NET_ICMPV4_ECHO_REPLY, 0, icmp_event);
|
||||
ret = net_icmp_init_ctx(&icmp_ctx, AF_INET, NET_ICMPV4_ECHO_REPLY, 0, icmp_event);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
dst4.sin_family = AF_INET;
|
||||
|
||||
@@ -30,6 +30,7 @@ tests:
|
||||
drivers.spi.loopback.rtio:
|
||||
extra_configs:
|
||||
- CONFIG_SPI_RTIO=y
|
||||
- CONFIG_SPI_RTIO_FALLBACK_MSGS=5
|
||||
platform_allow:
|
||||
- robokit1
|
||||
- mimxrt1170_evk/mimxrt1176/cm7
|
||||
|
||||
@@ -18,6 +18,8 @@ LOG_MODULE_REGISTER(udc_test, LOG_LEVEL_INF);
|
||||
* connected to the host as this state is not covered by this test.
|
||||
*/
|
||||
|
||||
#define BULK_OUT_EP_ADDR 0x01U
|
||||
#define BULK_IN_EP_ADDR 0x81U
|
||||
#define FALSE_EP_ADDR 0x0FU
|
||||
|
||||
K_MSGQ_DEFINE(test_msgq, sizeof(struct udc_event), 8, sizeof(uint32_t));
|
||||
@@ -127,7 +129,7 @@ static void test_udc_ep_enable(const struct device *dev,
|
||||
uint8_t ctrl_ep = USB_EP_DIR_IS_IN(ed->bEndpointAddress) ?
|
||||
USB_CONTROL_EP_IN : USB_CONTROL_EP_OUT;
|
||||
/* Possible return values 0, -EINVAL, -ENODEV, -EALREADY, -EPERM. */
|
||||
int err1, err2, err3, err4;
|
||||
int err1, err2, err3;
|
||||
|
||||
err1 = udc_ep_enable(dev, ed->bEndpointAddress, ed->bmAttributes,
|
||||
sys_le16_to_cpu(ed->wMaxPacketSize),
|
||||
@@ -135,28 +137,22 @@ static void test_udc_ep_enable(const struct device *dev,
|
||||
err2 = udc_ep_enable(dev, ed->bEndpointAddress, ed->bmAttributes,
|
||||
sys_le16_to_cpu(ed->wMaxPacketSize),
|
||||
ed->bInterval);
|
||||
err3 = udc_ep_enable(dev, FALSE_EP_ADDR, ed->bmAttributes,
|
||||
sys_le16_to_cpu(ed->wMaxPacketSize),
|
||||
ed->bInterval);
|
||||
err4 = udc_ep_enable(dev, ctrl_ep, ed->bmAttributes,
|
||||
err3 = udc_ep_enable(dev, ctrl_ep, ed->bmAttributes,
|
||||
sys_le16_to_cpu(ed->wMaxPacketSize),
|
||||
ed->bInterval);
|
||||
|
||||
if (!udc_is_initialized(dev) && !udc_is_enabled(dev)) {
|
||||
zassert_equal(err1, -EPERM, "Not failed to enable endpoint");
|
||||
zassert_equal(err2, -EPERM, "Not failed to enable endpoint");
|
||||
zassert_equal(err3, -EPERM, "Not failed to enable endpoint");
|
||||
zassert_equal(err4, -EINVAL, "Not failed to enable endpoint");
|
||||
zassert_equal(err3, -EINVAL, "Not failed to enable endpoint");
|
||||
} else if (udc_is_initialized(dev) && !udc_is_enabled(dev)) {
|
||||
zassert_equal(err1, -EPERM, "Not failed to enable endpoint");
|
||||
zassert_equal(err2, -EPERM, "Not failed to enable endpoint");
|
||||
zassert_equal(err3, -EPERM, "Not failed to enable endpoint");
|
||||
zassert_equal(err4, -EINVAL, "Not failed to enable endpoint");
|
||||
zassert_equal(err3, -EINVAL, "Not failed to enable endpoint");
|
||||
} else {
|
||||
zassert_equal(err1, 0, "Failed to enable endpoint");
|
||||
zassert_equal(err2, -EALREADY, "Not failed to enable endpoint");
|
||||
zassert_equal(err3, -ENODEV, "Not failed to enable endpoint");
|
||||
zassert_equal(err4, -EINVAL, "Not failed to enable endpoint");
|
||||
zassert_equal(err3, -EINVAL, "Not failed to enable endpoint");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,28 +162,24 @@ static void test_udc_ep_disable(const struct device *dev,
|
||||
uint8_t ctrl_ep = USB_EP_DIR_IS_IN(ed->bEndpointAddress) ?
|
||||
USB_CONTROL_EP_IN : USB_CONTROL_EP_OUT;
|
||||
/* Possible return values 0, -EINVAL, -ENODEV, -EALREADY, -EPERM. */
|
||||
int err1, err2, err3, err4;
|
||||
int err1, err2, err3;
|
||||
|
||||
err1 = udc_ep_disable(dev, ed->bEndpointAddress);
|
||||
err2 = udc_ep_disable(dev, ed->bEndpointAddress);
|
||||
err3 = udc_ep_disable(dev, FALSE_EP_ADDR);
|
||||
err4 = udc_ep_disable(dev, ctrl_ep);
|
||||
err3 = udc_ep_disable(dev, ctrl_ep);
|
||||
|
||||
if (!udc_is_initialized(dev) && !udc_is_enabled(dev)) {
|
||||
zassert_equal(err1, -EPERM, "Not failed to disable endpoint");
|
||||
zassert_equal(err2, -EPERM, "Not failed to disable endpoint");
|
||||
zassert_equal(err3, -EPERM, "Not failed to disable endpoint");
|
||||
zassert_equal(err4, -EINVAL, "Not failed to disable endpoint");
|
||||
zassert_equal(err3, -EINVAL, "Not failed to disable endpoint");
|
||||
} else if (udc_is_initialized(dev) && !udc_is_enabled(dev)) {
|
||||
zassert_equal(err1, -EALREADY, "Failed to disable endpoint");
|
||||
zassert_equal(err2, -EALREADY, "Not failed to disable endpoint");
|
||||
zassert_equal(err3, -ENODEV, "Not failed to disable endpoint");
|
||||
zassert_equal(err4, -EINVAL, "Not failed to disable endpoint");
|
||||
zassert_equal(err3, -EINVAL, "Not failed to disable endpoint");
|
||||
} else {
|
||||
zassert_equal(err1, 0, "Failed to disable endpoint");
|
||||
zassert_equal(err2, -EALREADY, "Not failed to disable endpoint");
|
||||
zassert_equal(err3, -ENODEV, "Not failed to disable endpoint");
|
||||
zassert_equal(err4, -EINVAL, "Not failed to disable endpoint");
|
||||
zassert_equal(err3, -EINVAL, "Not failed to disable endpoint");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,17 +276,14 @@ static void test_udc_ep_dequeue(const struct device *dev,
|
||||
struct usb_ep_descriptor *ed)
|
||||
{
|
||||
/* Possible return values 0, -EPERM, -ENODEV, -EACCES(TBD) */
|
||||
int err1, err2;
|
||||
int err;
|
||||
|
||||
err1 = udc_ep_dequeue(dev, ed->bEndpointAddress);
|
||||
err2 = udc_ep_dequeue(dev, FALSE_EP_ADDR);
|
||||
err = udc_ep_dequeue(dev, ed->bEndpointAddress);
|
||||
|
||||
if (!udc_is_initialized(dev)) {
|
||||
zassert_equal(err1, -EPERM, "Not failed to dequeue");
|
||||
zassert_equal(err2, -EPERM, "Not failed to dequeue");
|
||||
zassert_equal(err, -EPERM, "Not failed to dequeue");
|
||||
} else {
|
||||
zassert_equal(err1, 0, "Failed to dequeue");
|
||||
zassert_equal(err2, -ENODEV, "Not failed to dequeue");
|
||||
zassert_equal(err, 0, "Failed to dequeue");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -462,7 +451,7 @@ static struct usb_ep_descriptor ed_ctrl_in = {
|
||||
static struct usb_ep_descriptor ed_bulk_out = {
|
||||
.bLength = sizeof(struct usb_ep_descriptor),
|
||||
.bDescriptorType = USB_DESC_ENDPOINT,
|
||||
.bEndpointAddress = 0x01,
|
||||
.bEndpointAddress = BULK_OUT_EP_ADDR,
|
||||
.bmAttributes = USB_EP_TYPE_BULK,
|
||||
.wMaxPacketSize = sys_cpu_to_le16(64),
|
||||
.bInterval = 0,
|
||||
@@ -471,7 +460,7 @@ static struct usb_ep_descriptor ed_bulk_out = {
|
||||
static struct usb_ep_descriptor ed_bulk_in = {
|
||||
.bLength = sizeof(struct usb_ep_descriptor),
|
||||
.bDescriptorType = USB_DESC_ENDPOINT,
|
||||
.bEndpointAddress = 0x81,
|
||||
.bEndpointAddress = BULK_IN_EP_ADDR,
|
||||
.bmAttributes = USB_EP_TYPE_BULK,
|
||||
.wMaxPacketSize = sys_cpu_to_le16(64),
|
||||
.bInterval = 0,
|
||||
|
||||
@@ -7,6 +7,10 @@ tests:
|
||||
depends_on: usbd
|
||||
integration_platforms:
|
||||
- nrf52840dk/nrf52840
|
||||
- frdm_k64f
|
||||
- nucleo_f413zh
|
||||
- mimxrt1050_evk/mimxrt1052/hyperflash
|
||||
- rpi_pico
|
||||
platform_exclude:
|
||||
- nrf54h20dk/nrf54h20/cpuapp
|
||||
drivers.usb.udc.build_only:
|
||||
|
||||
@@ -2,6 +2,6 @@
|
||||
* Copyright (c) 2021 Intel Corporation
|
||||
* Copyright (c) 2022 Huawei France Technologies SASU
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0 and UNLICENSED
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#include "pcie_ivshmem.dtsi"
|
||||
|
||||
290
tests/kernel/mutex/mutex_api/src/complex_inversion.c
Normal file
290
tests/kernel/mutex/mutex_api/src/complex_inversion.c
Normal file
@@ -0,0 +1,290 @@
|
||||
/*
|
||||
* Copyright 2024 by Garmin Ltd. or its subsidiaries.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief Test complex mutex priority inversion
|
||||
*
|
||||
* This module demonstrates the kernel's priority inheritance algorithm
|
||||
* with two mutexes and four threads, ensuring that boosting priority of
|
||||
* a thread waiting on another mutex does not break assumptions of the
|
||||
* mutex's waitq, causing the incorrect thread to run or a crash.
|
||||
*
|
||||
* Sequence for priority inheritance testing:
|
||||
* - thread_08 takes mutex_1
|
||||
* - thread_07 takes mutex_0 then waits on mutex_1
|
||||
* - thread_06 waits on mutex_1
|
||||
* - thread_05 waits on mutex_0, boosting priority of thread_07
|
||||
* - thread_08 gives mutex_1, thread_07 takes mutex_1
|
||||
* - thread_07 gives mutex_1, thread_06 takes mutex_1
|
||||
* - thread_07 gives mutex_0, thread_05 takes mutex_0
|
||||
* - thread_06 gives mutex_1
|
||||
* - thread_05 gives mutex_0
|
||||
*/
|
||||
|
||||
#include <zephyr/tc_util.h>
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/ztest.h>
|
||||
#include <zephyr/sys/mutex.h>
|
||||
|
||||
#define STACKSIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
|
||||
|
||||
static ZTEST_DMEM int tc_rc = TC_PASS; /* test case return code */
|
||||
|
||||
static K_MUTEX_DEFINE(mutex_0);
|
||||
static K_MUTEX_DEFINE(mutex_1);
|
||||
|
||||
#define PARTICIPANT_THREAD_OPTIONS (K_INHERIT_PERMS)
|
||||
|
||||
#define DEFINE_PARTICIPANT_THREAD(id) \
|
||||
static K_THREAD_STACK_DEFINE(thread_##id##_stack_area, STACKSIZE); \
|
||||
static struct k_thread thread_##id##_thread_data; \
|
||||
static k_tid_t thread_##id##_tid; \
|
||||
static K_SEM_DEFINE(thread_##id##_wait, 0, 1); \
|
||||
static K_SEM_DEFINE(thread_##id##_done, 0, 1);
|
||||
|
||||
#define CREATE_PARTICIPANT_THREAD(id, pri) \
|
||||
thread_##id##_tid = k_thread_create(&thread_##id##_thread_data, thread_##id##_stack_area, \
|
||||
K_THREAD_STACK_SIZEOF(thread_##id##_stack_area), \
|
||||
(k_thread_entry_t)thread_##id, &thread_##id##_wait, \
|
||||
&thread_##id##_done, NULL, pri, \
|
||||
PARTICIPANT_THREAD_OPTIONS, K_FOREVER); \
|
||||
k_thread_name_set(thread_##id##_tid, "thread_" STRINGIFY(id));
|
||||
#define START_PARTICIPANT_THREAD(id) k_thread_start(&(thread_##id##_thread_data));
|
||||
#define JOIN_PARTICIPANT_THREAD(id) k_thread_join(&(thread_##id##_thread_data), K_FOREVER);
|
||||
|
||||
#define WAIT_FOR_MAIN() \
|
||||
k_sem_give(done); \
|
||||
k_sem_take(wait, K_FOREVER);
|
||||
|
||||
#define ADVANCE_THREAD(id) \
|
||||
SIGNAL_THREAD(id); \
|
||||
WAIT_FOR_THREAD(id);
|
||||
|
||||
#define SIGNAL_THREAD(id) k_sem_give(&thread_##id##_wait);
|
||||
|
||||
#define WAIT_FOR_THREAD(id) zassert_ok(k_sem_take(&thread_##id##_done, K_MSEC(100)));
|
||||
|
||||
/**
|
||||
*
|
||||
* thread_05 -
|
||||
*
|
||||
*/
|
||||
|
||||
static void thread_05(struct k_sem *wait, struct k_sem *done)
|
||||
{
|
||||
int rv;
|
||||
|
||||
/*
|
||||
* Wait for mutex_0, boosting the priority of thread_07 so it will lock mutex_1 first.
|
||||
*/
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
rv = k_mutex_lock(&mutex_0, K_FOREVER);
|
||||
if (rv != 0) {
|
||||
tc_rc = TC_FAIL;
|
||||
TC_ERROR("Failed to take mutex %p\n", &mutex_0);
|
||||
return;
|
||||
}
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
k_mutex_unlock(&mutex_0);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* thread_06 -
|
||||
*
|
||||
*/
|
||||
|
||||
static void thread_06(struct k_sem *wait, struct k_sem *done)
|
||||
{
|
||||
int rv;
|
||||
|
||||
/*
|
||||
* Wait for mutex_1. Initially it will be the highest priority waiter, but
|
||||
* thread_07 will be boosted above thread_06 so thread_07 will lock it first.
|
||||
*/
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
rv = k_mutex_lock(&mutex_1, K_FOREVER);
|
||||
if (rv != 0) {
|
||||
tc_rc = TC_FAIL;
|
||||
TC_ERROR("Failed to take mutex %p\n", &mutex_1);
|
||||
return;
|
||||
}
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
k_mutex_unlock(&mutex_1);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* thread_07 -
|
||||
*
|
||||
*/
|
||||
|
||||
static void thread_07(struct k_sem *wait, struct k_sem *done)
|
||||
{
|
||||
int rv;
|
||||
|
||||
/*
|
||||
* Lock mutex_0 and wait for mutex_1. After thread_06 is also waiting for
|
||||
* mutex_1, thread_05 will wait for mutex_0, boosting the priority for
|
||||
* thread_07 so it should lock mutex_1 first when it is unlocked by thread_08.
|
||||
*/
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
rv = k_mutex_lock(&mutex_0, K_NO_WAIT);
|
||||
if (rv != 0) {
|
||||
tc_rc = TC_FAIL;
|
||||
TC_ERROR("Failed to take mutex %p\n", &mutex_0);
|
||||
return;
|
||||
}
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
rv = k_mutex_lock(&mutex_1, K_FOREVER);
|
||||
if (rv != 0) {
|
||||
tc_rc = TC_FAIL;
|
||||
TC_ERROR("Failed to take mutex %p\n", &mutex_1);
|
||||
k_mutex_unlock(&mutex_0);
|
||||
return;
|
||||
}
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
k_mutex_unlock(&mutex_1);
|
||||
k_mutex_unlock(&mutex_0);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* thread_08 -
|
||||
*
|
||||
*/
|
||||
|
||||
static void thread_08(struct k_sem *wait, struct k_sem *done)
|
||||
{
|
||||
int rv;
|
||||
|
||||
/*
|
||||
* Lock mutex_1 and hold until priority has been boosted on thread_07
|
||||
* to ensure that thread_07 is the first to lock mutex_1 when thread_08
|
||||
* unlocks it.
|
||||
*/
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
rv = k_mutex_lock(&mutex_1, K_NO_WAIT);
|
||||
if (rv != 0) {
|
||||
tc_rc = TC_FAIL;
|
||||
TC_ERROR("Failed to take mutex %p\n", &mutex_1);
|
||||
return;
|
||||
}
|
||||
|
||||
WAIT_FOR_MAIN();
|
||||
|
||||
k_mutex_unlock(&mutex_1);
|
||||
}
|
||||
|
||||
DEFINE_PARTICIPANT_THREAD(05);
|
||||
DEFINE_PARTICIPANT_THREAD(06);
|
||||
DEFINE_PARTICIPANT_THREAD(07);
|
||||
DEFINE_PARTICIPANT_THREAD(08);
|
||||
|
||||
static void create_participant_threads(void)
|
||||
{
|
||||
CREATE_PARTICIPANT_THREAD(05, 5);
|
||||
CREATE_PARTICIPANT_THREAD(06, 6);
|
||||
CREATE_PARTICIPANT_THREAD(07, 7);
|
||||
CREATE_PARTICIPANT_THREAD(08, 8);
|
||||
}
|
||||
|
||||
static void start_participant_threads(void)
|
||||
{
|
||||
START_PARTICIPANT_THREAD(05);
|
||||
START_PARTICIPANT_THREAD(06);
|
||||
START_PARTICIPANT_THREAD(07);
|
||||
START_PARTICIPANT_THREAD(08);
|
||||
}
|
||||
|
||||
static void join_participant_threads(void)
|
||||
{
|
||||
JOIN_PARTICIPANT_THREAD(05);
|
||||
JOIN_PARTICIPANT_THREAD(06);
|
||||
JOIN_PARTICIPANT_THREAD(07);
|
||||
JOIN_PARTICIPANT_THREAD(08);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Main thread to test mutex locking
|
||||
*
|
||||
* This thread orchestrates mutex locking on other threads and verifies that
|
||||
* the correct thread is holding mutexes at any given step.
|
||||
*
|
||||
*/
|
||||
|
||||
ZTEST(mutex_api, test_complex_inversion)
|
||||
{
|
||||
create_participant_threads();
|
||||
start_participant_threads();
|
||||
|
||||
/* Wait for all the threads to start up */
|
||||
WAIT_FOR_THREAD(08);
|
||||
WAIT_FOR_THREAD(07);
|
||||
WAIT_FOR_THREAD(06);
|
||||
WAIT_FOR_THREAD(05);
|
||||
|
||||
ADVANCE_THREAD(08); /* thread_08 takes mutex_1 */
|
||||
zassert_equal(thread_08_tid, mutex_1.owner, "expected owner %s, not %s\n",
|
||||
thread_08_tid->name, mutex_1.owner->name);
|
||||
|
||||
ADVANCE_THREAD(07); /* thread_07 takes mutex_0 */
|
||||
zassert_equal(thread_07_tid, mutex_0.owner, "expected owner %s, not %s\n",
|
||||
thread_07_tid->name, mutex_0.owner->name);
|
||||
|
||||
SIGNAL_THREAD(07); /* thread_07 waits on mutex_1 */
|
||||
k_sleep(K_MSEC(100)); /* Give thread_07 some time to wait on mutex_1 */
|
||||
|
||||
SIGNAL_THREAD(06); /* thread_06 waits on mutex_1 */
|
||||
k_sleep(K_MSEC(100)); /* Give thread_06 some time to wait on mutex_1 */
|
||||
|
||||
SIGNAL_THREAD(05); /* thread_05 waits on mutex_0, boosting priority of thread_07 */
|
||||
|
||||
SIGNAL_THREAD(08); /* thread_08 gives mutex_1 */
|
||||
|
||||
/* If thread_06 erroneously took mutex_1, giving it could cause a crash
|
||||
* when CONFIG_WAITQ_SCALABLE is set. Give it a chance to run to make sure
|
||||
* this crash isn't hit.
|
||||
*/
|
||||
SIGNAL_THREAD(06);
|
||||
|
||||
WAIT_FOR_THREAD(07); /* thread_07 takes mutex_1 */
|
||||
zassert_equal(thread_07_tid, mutex_1.owner, "expected owner %s, not %s\n",
|
||||
thread_07_tid->name, mutex_1.owner->name);
|
||||
|
||||
SIGNAL_THREAD(07); /* thread_07 gives mutex_1 then gives mutex_0 */
|
||||
WAIT_FOR_THREAD(06); /* thread_06 takes mutex_1 */
|
||||
WAIT_FOR_THREAD(05); /* thread_05 takes mutex_0 */
|
||||
zassert_equal(thread_06_tid, mutex_1.owner, "expected owner %s, not %s\n",
|
||||
thread_06_tid->name, mutex_1.owner->name);
|
||||
zassert_equal(thread_05_tid, mutex_0.owner, "expected owner %s, not %s\n",
|
||||
thread_05_tid->name, mutex_0.owner->name);
|
||||
|
||||
SIGNAL_THREAD(06); /* thread_06 gives mutex_1 */
|
||||
SIGNAL_THREAD(05); /* thread_05 gives mutex_0 */
|
||||
|
||||
zassert_equal(tc_rc, TC_PASS);
|
||||
|
||||
join_participant_threads();
|
||||
}
|
||||
@@ -3,3 +3,9 @@ tests:
|
||||
tags:
|
||||
- kernel
|
||||
- userspace
|
||||
|
||||
kernel.mutex.scalable:
|
||||
tags:
|
||||
- kernel
|
||||
extra_configs:
|
||||
- CONFIG_WAITQ_SCALABLE=y
|
||||
|
||||
@@ -876,7 +876,7 @@ static void test_tx_chksum_icmp_frag(sa_family_t family, bool offloaded)
|
||||
|
||||
test_icmp_init(family, offloaded, &dst_addr, &iface);
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, 0, 0, dummy_icmp_handler);
|
||||
ret = net_icmp_init_ctx(&ctx, family, 0, 0, dummy_icmp_handler);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
test_started = true;
|
||||
@@ -1212,7 +1212,7 @@ static void test_rx_chksum_icmp_frag(sa_family_t family, bool offloaded)
|
||||
|
||||
test_icmp_init(family, offloaded, &dst_addr, &iface);
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx,
|
||||
ret = net_icmp_init_ctx(&ctx, family,
|
||||
family == AF_INET6 ? NET_ICMPV6_ECHO_REPLY :
|
||||
NET_ICMPV4_ECHO_REPLY,
|
||||
0, icmp_handler);
|
||||
@@ -1269,7 +1269,7 @@ static void test_rx_chksum_icmp_frag_bad(sa_family_t family, bool offloaded)
|
||||
|
||||
test_icmp_init(family, offloaded, &dst_addr, &iface);
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx,
|
||||
ret = net_icmp_init_ctx(&ctx, family,
|
||||
family == AF_INET6 ? NET_ICMPV6_ECHO_REPLY :
|
||||
NET_ICMPV4_ECHO_REPLY,
|
||||
0, icmp_handler);
|
||||
|
||||
@@ -37,7 +37,7 @@ LOG_MODULE_REGISTER(net_test, CONFIG_NET_DHCPV4_LOG_LEVEL);
|
||||
#include "net_private.h"
|
||||
|
||||
/* Sample DHCP offer (420 bytes) */
|
||||
static const unsigned char offer[420] = {
|
||||
static const unsigned char offer[] = {
|
||||
0x02, 0x01, 0x06, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x0a, 0xed, 0x48, 0x9e, 0x0a, 0xb8,
|
||||
@@ -70,6 +70,8 @@ static const unsigned char offer[420] = {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
/* Magic cookie: DHCP */
|
||||
0x63, 0x82, 0x53, 0x63,
|
||||
/* [0] Pad option */
|
||||
0x00,
|
||||
/* [53] DHCP Message Type: OFFER */
|
||||
0x35, 0x01, 0x02,
|
||||
/* [1] Subnet Mask: 255.255.255.0 */
|
||||
@@ -124,7 +126,7 @@ static const unsigned char offer[420] = {
|
||||
};
|
||||
|
||||
/* Sample DHCPv4 ACK */
|
||||
static const unsigned char ack[420] = {
|
||||
static const unsigned char ack[] = {
|
||||
0x02, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x0a, 0xed, 0x48, 0x9e, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -157,6 +159,8 @@ static const unsigned char ack[420] = {
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
/* Magic cookie: DHCP */
|
||||
0x63, 0x82, 0x53, 0x63,
|
||||
/* [0] Pad option */
|
||||
0x00,
|
||||
/* [53] DHCP Message Type: ACK */
|
||||
0x35, 0x01, 0x05,
|
||||
/* [58] Renewal Time Value: (21600s) 6 hours */
|
||||
@@ -230,6 +234,9 @@ struct dhcp_msg {
|
||||
uint8_t type;
|
||||
};
|
||||
|
||||
static uint32_t offer_xid;
|
||||
static uint32_t request_xid;
|
||||
|
||||
static struct k_sem test_lock;
|
||||
|
||||
#define WAIT_TIME K_SECONDS(CONFIG_NET_DHCPV4_INITIAL_DELAY_MAX + 1)
|
||||
@@ -306,6 +313,8 @@ struct net_pkt *prepare_dhcp_offer(struct net_if *iface, uint32_t xid)
|
||||
|
||||
net_ipv4_finalize(pkt, IPPROTO_UDP);
|
||||
|
||||
offer_xid = xid;
|
||||
|
||||
return pkt;
|
||||
|
||||
fail:
|
||||
@@ -392,6 +401,10 @@ static int parse_dhcp_message(struct net_pkt *pkt, struct dhcp_msg *msg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (msg->type == NET_DHCPV4_MSG_TYPE_REQUEST) {
|
||||
request_xid = msg->xid;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -687,6 +700,10 @@ ZTEST(dhcpv4_tests, test_dhcp)
|
||||
zassert_true(false, "Timeout while waiting");
|
||||
}
|
||||
}
|
||||
|
||||
/* Verify that Request xid matched Offer xid. */
|
||||
zassert_equal(offer_xid, request_xid, "Offer/Request xid mismatch, "
|
||||
"Offer 0x%08x, Request 0x%08x", offer_xid, request_xid);
|
||||
}
|
||||
|
||||
/**test case main entry */
|
||||
|
||||
@@ -465,7 +465,7 @@ ZTEST(icmp_tests, test_icmpv6_echo_request)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV6_ECHO_REPLY, 0, icmp_handler);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET6, NET_ICMPV6_ECHO_REPLY, 0, icmp_handler);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
dst6.sin6_family = AF_INET6;
|
||||
@@ -508,7 +508,7 @@ ZTEST(icmp_tests, test_icmpv4_echo_request)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV4_ECHO_REPLY, 0, icmp_handler);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET, NET_ICMPV4_ECHO_REPLY, 0, icmp_handler);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
dst4.sin_family = AF_INET;
|
||||
@@ -549,7 +549,7 @@ ZTEST(icmp_tests, test_offload_icmpv4_echo_request)
|
||||
struct net_icmp_ctx ctx;
|
||||
int ret;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV4_ECHO_REPLY, 0, icmp_handler);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET, NET_ICMPV4_ECHO_REPLY, 0, icmp_handler);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
dst4.sin_family = AF_INET;
|
||||
@@ -588,7 +588,7 @@ ZTEST(icmp_tests, test_offload_icmpv6_echo_request)
|
||||
struct net_icmp_ctx ctx;
|
||||
int ret;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV6_ECHO_REPLY, 0, icmp_handler);
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET6, NET_ICMPV6_ECHO_REPLY, 0, icmp_handler);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
dst6.sin6_family = AF_INET6;
|
||||
@@ -620,6 +620,126 @@ ZTEST(icmp_tests, test_offload_icmpv6_echo_request)
|
||||
#endif
|
||||
#endif /* CONFIG_NET_OFFLOADING_SUPPORT */
|
||||
|
||||
/* Need to have both IPv4/IPv6 for those */
|
||||
#if defined(CONFIG_NET_IPV4) && defined(CONFIG_NET_IPV6)
|
||||
static K_SEM_DEFINE(test_req_sem, 0, 1);
|
||||
|
||||
static int icmp_request_handler(struct net_icmp_ctx *ctx,
|
||||
struct net_pkt *pkt,
|
||||
struct net_icmp_ip_hdr *hdr,
|
||||
struct net_icmp_hdr *icmp_hdr,
|
||||
void *user_data)
|
||||
{
|
||||
k_sem_give(&test_req_sem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZTEST(icmp_tests, test_malformed_icmpv6_echo_request_on_ipv4)
|
||||
{
|
||||
struct in_addr dst4 = { 0 };
|
||||
const struct in_addr *src4;
|
||||
struct net_icmp_ctx ctx;
|
||||
struct net_if *iface;
|
||||
struct net_pkt *pkt;
|
||||
int ret;
|
||||
|
||||
k_sem_reset(&test_req_sem);
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET6, NET_ICMPV6_ECHO_REQUEST, 0,
|
||||
icmp_request_handler);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
memcpy(&dst4, &recv_addr_4, sizeof(recv_addr_4));
|
||||
|
||||
/* Prepare malformed NET_ICMPV6_ECHO_REQUEST on IPv4 packet */
|
||||
iface = net_if_ipv4_select_src_iface(&dst4);
|
||||
zassert_not_null(iface, "NULL iface");
|
||||
|
||||
src4 = net_if_ipv4_select_src_addr(iface, &dst4);
|
||||
zassert_not_null(src4, "NULL addr");
|
||||
|
||||
pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_icmpv4_echo_req),
|
||||
AF_INET, IPPROTO_ICMP, K_MSEC(100));
|
||||
zassert_not_null(pkt, "NULL pkt");
|
||||
|
||||
if (net_ipv4_create(pkt, src4, &dst4) != 0 ||
|
||||
net_icmpv4_create(pkt, NET_ICMPV6_ECHO_REQUEST, 0) != 0) {
|
||||
net_pkt_unref(pkt);
|
||||
zassert_true(false, "Failed to create ICMP packet");
|
||||
}
|
||||
|
||||
net_pkt_cursor_init(pkt);
|
||||
net_ipv4_finalize(pkt, IPPROTO_ICMP);
|
||||
|
||||
if (net_send_data(pkt) != 0) {
|
||||
net_pkt_unref(pkt);
|
||||
zassert_true(false, "Failed to send packet");
|
||||
}
|
||||
|
||||
ret = k_sem_take(&test_req_sem, K_MSEC(100));
|
||||
if (ret != -EAGAIN) {
|
||||
(void)net_icmp_cleanup_ctx(&ctx);
|
||||
zassert_true(false, "ICMP request shouldn't be processed");
|
||||
}
|
||||
|
||||
ret = net_icmp_cleanup_ctx(&ctx);
|
||||
zassert_equal(ret, 0, "Cannot cleanup ICMP (%d)", ret);
|
||||
}
|
||||
|
||||
ZTEST(icmp_tests, test_malformed_icmpv4_echo_request_on_ipv6)
|
||||
{
|
||||
struct in6_addr dst6 = { 0 };
|
||||
const struct in6_addr *src6;
|
||||
struct net_icmp_ctx ctx;
|
||||
struct net_if *iface;
|
||||
struct net_pkt *pkt;
|
||||
int ret;
|
||||
|
||||
k_sem_reset(&test_req_sem);
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET, NET_ICMPV4_ECHO_REQUEST, 0,
|
||||
icmp_request_handler);
|
||||
zassert_equal(ret, 0, "Cannot init ICMP (%d)", ret);
|
||||
|
||||
memcpy(&dst6, &recv_addr_6, sizeof(recv_addr_6));
|
||||
|
||||
/* Prepare malformed NET_ICMPV4_ECHO_REQUEST on IPv6 packet */
|
||||
iface = net_if_ipv6_select_src_iface(&dst6);
|
||||
zassert_not_null(iface, "NULL iface");
|
||||
|
||||
src6 = net_if_ipv6_select_src_addr(iface, &dst6);
|
||||
zassert_not_null(src6, "NULL addr");
|
||||
|
||||
pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_icmpv6_echo_req),
|
||||
AF_INET6, IPPROTO_ICMPV6, K_MSEC(100));
|
||||
zassert_not_null(pkt, "NULL pkt");
|
||||
|
||||
if (net_ipv6_create(pkt, src6, &dst6) != 0 ||
|
||||
net_icmpv6_create(pkt, NET_ICMPV4_ECHO_REQUEST, 0) != 0) {
|
||||
net_pkt_unref(pkt);
|
||||
zassert_true(false, "Failed to create ICMP packet");
|
||||
}
|
||||
|
||||
net_pkt_cursor_init(pkt);
|
||||
net_ipv6_finalize(pkt, IPPROTO_ICMPV6);
|
||||
|
||||
if (net_send_data(pkt) != 0) {
|
||||
net_pkt_unref(pkt);
|
||||
zassert_true(false, "Failed to send packet");
|
||||
}
|
||||
|
||||
ret = k_sem_take(&test_req_sem, K_MSEC(100));
|
||||
if (ret != -EAGAIN) {
|
||||
(void)net_icmp_cleanup_ctx(&ctx);
|
||||
zassert_true(false, "ICMP request shouldn't be processed");
|
||||
}
|
||||
|
||||
ret = net_icmp_cleanup_ctx(&ctx);
|
||||
zassert_equal(ret, 0, "Cannot cleanup ICMP (%d)", ret);
|
||||
}
|
||||
#endif /* defined(CONFIG_NET_IPV4) && defined(CONFIG_NET_IPV6) */
|
||||
|
||||
static void *setup(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE)) {
|
||||
|
||||
@@ -464,7 +464,7 @@ static void icmpv4_send_echo_rep(void)
|
||||
struct net_pkt *pkt;
|
||||
int ret;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV4_ECHO_REPLY,
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET, NET_ICMPV4_ECHO_REPLY,
|
||||
0, handle_reply_msg);
|
||||
zassert_equal(ret, 0, "Cannot register %s handler (%d)",
|
||||
STRINGIFY(NET_ICMPV4_ECHO_REPLY), ret);
|
||||
|
||||
@@ -182,12 +182,12 @@ ZTEST(icmpv6_fn, test_icmpv6)
|
||||
struct net_pkt *pkt;
|
||||
int ret;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx1, NET_ICMPV6_ECHO_REPLY,
|
||||
ret = net_icmp_init_ctx(&ctx1, AF_INET6, NET_ICMPV6_ECHO_REPLY,
|
||||
0, handle_test_msg);
|
||||
zassert_equal(ret, 0, "Cannot register %s handler (%d)",
|
||||
STRINGIFY(NET_ICMPV6_ECHO_REPLY), ret);
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx2, NET_ICMPV6_ECHO_REQUEST,
|
||||
ret = net_icmp_init_ctx(&ctx2, AF_INET6, NET_ICMPV6_ECHO_REQUEST,
|
||||
0, handle_test_msg);
|
||||
zassert_equal(ret, 0, "Cannot register %s handler (%d)",
|
||||
STRINGIFY(NET_ICMPV6_ECHO_REQUEST), ret);
|
||||
|
||||
@@ -2091,6 +2091,7 @@ ZTEST(net_ipv6_fragment, test_send_ipv6_fragment_without_hbho)
|
||||
AF_UNSPEC, 0, ALLOC_TIMEOUT);
|
||||
zassert_not_null(pkt, "packet");
|
||||
|
||||
net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_IPV6);
|
||||
net_pkt_set_family(pkt, AF_INET6);
|
||||
net_pkt_set_ip_hdr_len(pkt, sizeof(struct net_ipv6_hdr));
|
||||
net_pkt_set_ipv6_ext_len(pkt, NET_IPV6_FRAGH_LEN); /* without hbho*/
|
||||
@@ -2299,7 +2300,7 @@ ZTEST(net_ipv6_fragment, test_recv_ipv6_fragment)
|
||||
int ret;
|
||||
struct net_icmp_ctx ctx;
|
||||
|
||||
ret = net_icmp_init_ctx(&ctx, NET_ICMPV6_ECHO_REPLY,
|
||||
ret = net_icmp_init_ctx(&ctx, AF_INET6, NET_ICMPV6_ECHO_REPLY,
|
||||
0, handle_ipv6_echo_reply);
|
||||
zassert_equal(ret, 0, "Cannot register %s handler (%d)",
|
||||
STRINGIFY(NET_ICMPV6_ECHO_REPLY), ret);
|
||||
|
||||
@@ -1287,6 +1287,253 @@ ZTEST(dns_packet, test_dns_invalid_answer)
|
||||
zassert_equal(ret, -EINVAL, "DNS message answer check succeed (%d)", ret);
|
||||
}
|
||||
|
||||
static uint8_t recursive_query_resp_ipv4[] = {
|
||||
/* DNS msg header (12 bytes) */
|
||||
0x74, 0xe1, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
/* Query string (westus2-prod-2.notifications.teams.microsoft.com)
|
||||
* (length 50)
|
||||
*/
|
||||
0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32,
|
||||
0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d,
|
||||
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65,
|
||||
0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72,
|
||||
0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f,
|
||||
0x6d, 0x00,
|
||||
|
||||
/* Type (2 bytes) */
|
||||
0x00, 0x01,
|
||||
|
||||
/* Class (2 bytes) */
|
||||
0x00, 0x01,
|
||||
|
||||
/* Answer 1 */
|
||||
0xc0, 0x0c,
|
||||
|
||||
/* Answer type (cname) */
|
||||
0x00, 0x05,
|
||||
|
||||
/* Class */
|
||||
0x00, 0x01,
|
||||
|
||||
/* TTL */
|
||||
0x00, 0x00, 0x00, 0x04,
|
||||
|
||||
/* RR data length */
|
||||
0x00, 0x02,
|
||||
|
||||
/* Data */
|
||||
0xc0, 0x4e, /* <--- recursive pointer */
|
||||
};
|
||||
|
||||
NET_BUF_POOL_DEFINE(dns_qname_pool_for_test, 2, 128, 0, NULL);
|
||||
|
||||
ZTEST(dns_packet, test_dns_recursive_query)
|
||||
{
|
||||
static const uint8_t query[] = {
|
||||
/* Query string */
|
||||
0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32,
|
||||
0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d,
|
||||
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65,
|
||||
0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72,
|
||||
0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f,
|
||||
0x6d, 0x00,
|
||||
|
||||
/* Type */
|
||||
0x00, 0x01,
|
||||
};
|
||||
struct dns_msg_t dns_msg = { 0 };
|
||||
uint16_t dns_id = 0;
|
||||
int query_idx = -1;
|
||||
uint16_t query_hash = 0;
|
||||
struct net_buf *dns_cname;
|
||||
int ret;
|
||||
|
||||
dns_cname = net_buf_alloc(&dns_qname_pool_for_test, dns_ctx.buf_timeout);
|
||||
zassert_not_null(dns_cname, "Out of mem");
|
||||
|
||||
dns_msg.msg = recursive_query_resp_ipv4;
|
||||
dns_msg.msg_size = sizeof(recursive_query_resp_ipv4);
|
||||
|
||||
dns_id = dns_unpack_header_id(dns_msg.msg);
|
||||
|
||||
setup_dns_context(&dns_ctx, 0, dns_id, query, sizeof(query),
|
||||
DNS_QUERY_TYPE_A);
|
||||
|
||||
ret = dns_validate_msg(&dns_ctx, &dns_msg, &dns_id, &query_idx,
|
||||
dns_cname, &query_hash);
|
||||
zassert_true(ret == DNS_EAI_SYSTEM && errno == ELOOP,
|
||||
"[%s] DNS message was valid (%d / %d)",
|
||||
"recursive rsp", ret, errno);
|
||||
|
||||
net_buf_unref(dns_cname);
|
||||
}
|
||||
|
||||
static uint8_t invalid_compression_response_ipv4[] = {
|
||||
/* DNS msg header (12 bytes) */
|
||||
0x74, 0xe1, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
/* Query string */
|
||||
0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32,
|
||||
0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d,
|
||||
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65,
|
||||
0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72,
|
||||
0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f,
|
||||
0x6d, 0x00,
|
||||
|
||||
/* Type */
|
||||
0x00, 0x01,
|
||||
|
||||
/* Class */
|
||||
0x00, 0x01,
|
||||
|
||||
/* Answer 1 */
|
||||
0xb0, 0x0c, /* <--- invalid compression pointer */
|
||||
|
||||
/* Answer type (cname) */
|
||||
0x00, 0x05,
|
||||
|
||||
/* Class */
|
||||
0x00, 0x01,
|
||||
|
||||
/* TTL */
|
||||
0x00, 0x00, 0x00, 0x04,
|
||||
|
||||
/* RR data length */
|
||||
0x00, 0x02,
|
||||
|
||||
/* Data */
|
||||
0xc0, 0x0c,
|
||||
};
|
||||
|
||||
ZTEST(dns_packet, test_dns_invalid_compress_bits)
|
||||
{
|
||||
static const uint8_t query[] = {
|
||||
/* Query string */
|
||||
0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32,
|
||||
0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d,
|
||||
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65,
|
||||
0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72,
|
||||
0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f,
|
||||
0x6d, 0x00,
|
||||
|
||||
/* Type */
|
||||
0x00, 0x01,
|
||||
};
|
||||
struct dns_msg_t dns_msg = { 0 };
|
||||
uint16_t dns_id = 0;
|
||||
int query_idx = -1;
|
||||
uint16_t query_hash = 0;
|
||||
struct net_buf *dns_cname;
|
||||
int ret;
|
||||
|
||||
dns_cname = net_buf_alloc(&dns_qname_pool_for_test, dns_ctx.buf_timeout);
|
||||
zassert_not_null(dns_cname, "Out of mem");
|
||||
|
||||
dns_msg.msg = invalid_compression_response_ipv4;
|
||||
dns_msg.msg_size = sizeof(invalid_compression_response_ipv4);
|
||||
|
||||
dns_id = dns_unpack_header_id(dns_msg.msg);
|
||||
|
||||
setup_dns_context(&dns_ctx, 0, dns_id, query, sizeof(query),
|
||||
DNS_QUERY_TYPE_A);
|
||||
|
||||
ret = dns_validate_msg(&dns_ctx, &dns_msg, &dns_id, &query_idx,
|
||||
dns_cname, &query_hash);
|
||||
zassert_true(ret == DNS_EAI_SYSTEM && errno == EINVAL,
|
||||
"[%s] DNS message was valid (%d / %d)",
|
||||
"invalid compression rsp", ret, errno);
|
||||
|
||||
net_buf_unref(dns_cname);
|
||||
}
|
||||
|
||||
static uint8_t invalid_compression_response_cname_ipv4[] = {
|
||||
/* DNS msg header (12 bytes) */
|
||||
0x74, 0xe1, 0x81, 0x80, 0x00, 0x01, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
/* Query string */
|
||||
0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32,
|
||||
0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d,
|
||||
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65,
|
||||
0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72,
|
||||
0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f,
|
||||
0x6d, 0x00,
|
||||
|
||||
/* Type */
|
||||
0x00, 0x01,
|
||||
|
||||
/* Class */
|
||||
0x00, 0x01,
|
||||
|
||||
/* Answer 1 */
|
||||
0xc0, 0x0c,
|
||||
|
||||
/* Answer type (cname) */
|
||||
0x00, 0x05,
|
||||
|
||||
/* Class */
|
||||
0x00, 0x01,
|
||||
|
||||
/* TTL */
|
||||
0x00, 0x00, 0x00, 0x04,
|
||||
|
||||
/* RR data length */
|
||||
0x00, 0x02,
|
||||
|
||||
/* Data */
|
||||
0xb0, 0x0c, /* <--- invalid compression pointer */
|
||||
};
|
||||
|
||||
ZTEST(dns_packet, test_dns_invalid_compress_bits_cname)
|
||||
{
|
||||
static const uint8_t query[] = {
|
||||
/* Query string */
|
||||
0x0e, 0x77, 0x65, 0x73, 0x74, 0x75, 0x73, 0x32,
|
||||
0x2d, 0x70, 0x72, 0x6f, 0x64, 0x2d, 0x32, 0x0d,
|
||||
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x05, 0x74, 0x65,
|
||||
0x61, 0x6d, 0x73, 0x09, 0x6d, 0x69, 0x63, 0x72,
|
||||
0x6f, 0x73, 0x6f, 0x66, 0x74, 0x03, 0x63, 0x6f,
|
||||
0x6d, 0x00,
|
||||
|
||||
/* Type */
|
||||
0x00, 0x01,
|
||||
};
|
||||
struct dns_msg_t dns_msg = { 0 };
|
||||
uint16_t dns_id = 0;
|
||||
int query_idx = -1;
|
||||
uint16_t query_hash = 0;
|
||||
struct net_buf *dns_cname;
|
||||
int ret;
|
||||
|
||||
dns_cname = net_buf_alloc(&dns_qname_pool_for_test, dns_ctx.buf_timeout);
|
||||
zassert_not_null(dns_cname, "Out of mem");
|
||||
|
||||
dns_msg.msg = invalid_compression_response_cname_ipv4;
|
||||
dns_msg.msg_size = sizeof(invalid_compression_response_cname_ipv4);
|
||||
|
||||
dns_id = dns_unpack_header_id(dns_msg.msg);
|
||||
|
||||
setup_dns_context(&dns_ctx, 0, dns_id, query, sizeof(query),
|
||||
DNS_QUERY_TYPE_A);
|
||||
|
||||
ret = dns_validate_msg(&dns_ctx, &dns_msg, &dns_id, &query_idx,
|
||||
dns_cname, &query_hash);
|
||||
zassert_true(ret == DNS_EAI_SYSTEM && errno == EINVAL,
|
||||
"[%s] DNS message was valid (%d / %d)",
|
||||
"invalid compression rsp", ret, errno);
|
||||
|
||||
net_buf_unref(dns_cname);
|
||||
}
|
||||
|
||||
ZTEST_SUITE(dns_packet, NULL, NULL, NULL, NULL, NULL);
|
||||
/* TODO:
|
||||
* 1) add malformed DNS data (mostly done)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user