Compare commits
71 Commits
v3.7.1
...
v3.3-branc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9669393895 | ||
|
|
532a37f0eb | ||
|
|
bd7f05ebbc | ||
|
|
a124fd1617 | ||
|
|
3714ca8a01 | ||
|
|
84091f5bc7 | ||
|
|
a094fc5e71 | ||
|
|
e6aed2d965 | ||
|
|
e720b03cfa | ||
|
|
a79d551056 | ||
|
|
485e591f07 | ||
|
|
f9d4df18b8 | ||
|
|
a042ba2b79 | ||
|
|
0b4af3278c | ||
|
|
d3f06e71f2 | ||
|
|
ea800e9a66 | ||
|
|
11a493c8fa | ||
|
|
099e0132ce | ||
|
|
909f965032 | ||
|
|
d330dd95ac | ||
|
|
9e22e7c770 | ||
|
|
38feec784c | ||
|
|
0edde4d5e2 | ||
|
|
a60d4b2a73 | ||
|
|
ff79476b11 | ||
|
|
09316bd851 | ||
|
|
17a7f73652 | ||
|
|
fa45721867 | ||
|
|
1786c74806 | ||
|
|
b1945b1f1e | ||
|
|
227bde0505 | ||
|
|
b68c2077ac | ||
|
|
3d2443f047 | ||
|
|
10eadebb1d | ||
|
|
338b7e112d | ||
|
|
240c69f903 | ||
|
|
a00160b00f | ||
|
|
9fa92c8413 | ||
|
|
70d6991e49 | ||
|
|
34c952fa02 | ||
|
|
bcb17a2cd3 | ||
|
|
12d6eb4a5f | ||
|
|
ee983e6ee4 | ||
|
|
9cca16d289 | ||
|
|
56b08ea2a9 | ||
|
|
44a5740b10 | ||
|
|
45015cdc78 | ||
|
|
0a863c779b | ||
|
|
4c491de03c | ||
|
|
1b19d5c510 | ||
|
|
f7dad4cd20 | ||
|
|
ba180d8adc | ||
|
|
a49eaeca35 | ||
|
|
3c7aadc603 | ||
|
|
5b45a4c669 | ||
|
|
3f5951c5fc | ||
|
|
dc9695135e | ||
|
|
a5ebd9c09c | ||
|
|
a290bd19e3 | ||
|
|
d1e95f74b3 | ||
|
|
f82e60bced | ||
|
|
a4f3aa13f2 | ||
|
|
32c297360a | ||
|
|
e41522d1e6 | ||
|
|
cd0ef2e58d | ||
|
|
fd999c2970 | ||
|
|
835777c47e | ||
|
|
ed55f72b2d | ||
|
|
6a92e5dee8 | ||
|
|
8f547b79ca | ||
|
|
333fcf2a79 |
2
.github/workflows/assigner.yml
vendored
2
.github/workflows/assigner.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
assignment:
|
||||
name: Pull Request Assignment
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Install Python dependencies
|
||||
|
||||
2
.github/workflows/backport_issue_check.yml
vendored
2
.github/workflows/backport_issue_check.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport Issue Check
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
|
||||
steps:
|
||||
|
||||
4
.github/workflows/bug_snapshot.yaml
vendored
4
.github/workflows/bug_snapshot.yaml
vendored
@@ -42,9 +42,9 @@ jobs:
|
||||
echo "BUGS_PICKLE_PATH=${BUGS_PICKLE_PATH}" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_BUILDS_ZEPHYR_BUG_SNAPSHOT_ACCESS_KEY_ID }}
|
||||
aws-access-key-id: ${{ vars.AWS_BUILDS_ZEPHYR_BUG_SNAPSHOT_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_BUILDS_ZEPHYR_BUG_SNAPSHOT_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
|
||||
9
.github/workflows/clang.yaml
vendored
9
.github/workflows/clang.yaml
vendored
@@ -80,15 +80,16 @@ jobs:
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "repo=${repo2}\n")
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1.2.0
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-clang-${{ matrix.platform }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -133,7 +134,7 @@ jobs:
|
||||
if: (success() || failure() ) && needs.clang-build.outputs.report_needed != 0
|
||||
steps:
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: artifacts
|
||||
- name: Merge Test Results
|
||||
|
||||
10
.github/workflows/codecov.yaml
vendored
10
.github/workflows/codecov.yaml
vendored
@@ -67,13 +67,13 @@ jobs:
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1.2.0
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_prop.outputs.repo }}-${{github.event_name}}-${{matrix.platform}}-codecov-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -121,7 +121,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: coverage/reports
|
||||
|
||||
@@ -165,7 +165,7 @@ jobs:
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v2
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
directory: ./coverage/reports
|
||||
env_vars: OS,PYTHON
|
||||
|
||||
6
.github/workflows/daily_test_version.yml
vendored
6
.github/workflows/daily_test_version.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: install-pip
|
||||
|
||||
6
.github/workflows/doc-publish-pr.yml
vendored
6
.github/workflows/doc-publish-pr.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
|
||||
- name: Check PR number
|
||||
id: check-pr
|
||||
uses: carpentries/actions/check-valid-pr@v0.8
|
||||
uses: carpentries/actions/check-valid-pr@v0.14.0
|
||||
with:
|
||||
pr: ${{ env.PR_NUM }}
|
||||
sha: ${{ github.event.workflow_run.head_sha }}
|
||||
@@ -48,9 +48,9 @@ jobs:
|
||||
tar xf html-output/html-output.tar.xz -C html-output
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_BUILDS_ZEPHYR_PR_ACCESS_KEY_ID }}
|
||||
aws-access-key-id: ${{ vars.AWS_BUILDS_ZEPHYR_PR_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_BUILDS_ZEPHYR_PR_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
|
||||
6
.github/workflows/doc-publish.yml
vendored
6
.github/workflows/doc-publish.yml
vendored
@@ -34,10 +34,10 @@ jobs:
|
||||
tar xf html-output/html-output.tar.xz -C html-output
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_DOCS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DOCS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Upload to AWS S3
|
||||
|
||||
6
.github/workflows/footprint-tracking.yml
vendored
6
.github/workflows/footprint-tracking.yml
vendored
@@ -58,10 +58,10 @@ jobs:
|
||||
west update 2>&1 1> west.update.log || west update 2>&1 1> west.update2.log
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.FOOTPRINT_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.FOOTPRINT_AWS_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Record Footprint
|
||||
|
||||
6
.github/workflows/issue_count.yml
vendored
6
.github/workflows/issue_count.yml
vendored
@@ -42,10 +42,10 @@ jobs:
|
||||
path: ${{ env.OUTPUT_FILE_NAME }}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Post Results
|
||||
|
||||
2
.github/workflows/manifest.yml
vendored
2
.github/workflows/manifest.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
|
||||
jobs:
|
||||
contribs:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
name: Manifest
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
|
||||
3
.github/workflows/stale_issue.yml
vendored
3
.github/workflows/stale_issue.yml
vendored
@@ -9,9 +9,8 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: 'This pull request has been marked as stale because it has been open (more
|
||||
than) 60 days with no activity. Remove the stale label or add a comment saying that you
|
||||
would like to have the label removed otherwise this pull request will automatically be
|
||||
|
||||
8
.github/workflows/twister.yaml
vendored
8
.github/workflows/twister.yaml
vendored
@@ -192,14 +192,14 @@ jobs:
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1.2.0
|
||||
continue-on-error: true
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-${{github.event_name}}-${{ matrix.subset }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -284,7 +284,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
|
||||
@@ -404,7 +404,8 @@ zephyr_compile_options(${COMPILER_OPT_AS_LIST})
|
||||
|
||||
# TODO: Include arch compiler options at this point.
|
||||
|
||||
if(NOT CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
||||
if(NOT CMAKE_C_COMPILER_ID STREQUAL "Clang" AND
|
||||
NOT CMAKE_C_COMPILER_ID STREQUAL "IntelLLVM")
|
||||
# GCC assumed
|
||||
zephyr_cc_option(-fno-reorder-functions)
|
||||
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
if(DEFINED TOOLCHAIN_HOME)
|
||||
set(find_program_clang_args PATHS ${TOOLCHAIN_HOME} ${ONEAPI_PYTHON_PATH} NO_DEFAULT_PATH)
|
||||
set(find_program_clang_args PATHS ${TOOLCHAIN_HOME} ${ONEAPI_LLVM_BIN_PATH} NO_DEFAULT_PATH)
|
||||
set(find_program_binutils_args PATHS ${TOOLCHAIN_HOME} )
|
||||
endif()
|
||||
|
||||
find_package(oneApi 2023.0.0 REQUIRED)
|
||||
|
||||
find_program(CMAKE_AR llvm-ar ${find_program_clang_args} )
|
||||
find_program(CMAKE_NM llvm-nm ${find_program_clang_args} )
|
||||
if(ONEAPI_VERSION VERSION_LESS_EQUAL "2023.0.0")
|
||||
find_program(CMAKE_NM nm ${find_program_binutils_args} )
|
||||
else()
|
||||
find_program(CMAKE_NM llvm-nm ${find_program_clang_args} )
|
||||
endif()
|
||||
# In OneApi installation directory on Windows, there is no llvm-objdump
|
||||
# binary, so would better use objdump from system environment both
|
||||
# on Linux and Windows.
|
||||
|
||||
@@ -38,6 +38,8 @@ else()
|
||||
elseif(CONFIG_FP_HARDABI)
|
||||
list(APPEND TOOLCHAIN_C_FLAGS -mfloat-abi=hard)
|
||||
endif()
|
||||
else()
|
||||
list(APPEND TOOLCHAIN_C_FLAGS -mfloat-abi=soft)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
@@ -35,8 +35,6 @@ zephyr_linker_section_configure(SECTION initshell
|
||||
zephyr_linker_section(NAME log_dynamic GROUP DATA_REGION NOINPUT)
|
||||
zephyr_linker_section_configure(SECTION log_dynamic KEEP INPUT ".log_dynamic_*")
|
||||
|
||||
zephyr_iterable_section(NAME _static_thread_data GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
|
||||
if(CONFIG_USERSPACE)
|
||||
# All kernel objects within are assumed to be either completely
|
||||
# initialized at build time, or initialized automatically at runtime
|
||||
@@ -57,6 +55,7 @@ zephyr_iterable_section(NAME k_pipe GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SU
|
||||
zephyr_iterable_section(NAME k_sem GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME k_queue GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME k_condvar GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME k_event GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
|
||||
zephyr_linker_section(NAME _net_buf_pool_area GROUP DATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_linker_section_configure(SECTION _net_buf_pool_area
|
||||
@@ -117,3 +116,36 @@ if(CONFIG_ZTEST_NEW_API)
|
||||
zephyr_iterable_section(NAME ztest_test_rule GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME ztest_expected_result_entry GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_ZBUS)
|
||||
zephyr_iterable_section(NAME zbus_channel GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME zbus_observer GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_UVB)
|
||||
zephyr_iterable_section(NAME uvb_node GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_BT_MESH_ADV_EXT)
|
||||
zephyr_iterable_section(NAME bt_mesh_ext_adv GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_LOG)
|
||||
zephyr_iterable_section(NAME log_mpsc_pbuf GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME log_msg_ptr GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_PCIE)
|
||||
zephyr_iterable_section(NAME pcie_dev GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_USB_DEVICE_STACK OR CONFIG_USB_DEVICE_STACK_NEXT)
|
||||
zephyr_iterable_section(NAME usb_cfg_data GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME usbd_contex GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME usbd_class_node GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_USB_HOST_STACK)
|
||||
zephyr_iterable_section(NAME usbh_contex GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME usbh_class_data GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
@@ -172,9 +172,6 @@ zephyr_linker_section_configure(SECTION log_strings INPUT ".log_strings*" KEEP S
|
||||
zephyr_linker_section(NAME log_const KVMA RAM_REGION GROUP RODATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT})
|
||||
zephyr_linker_section_configure(SECTION log_const INPUT ".log_const_*" KEEP SORT NAME)
|
||||
|
||||
zephyr_linker_section(NAME log_backends KVMA RAM_REGION GROUP RODATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT})
|
||||
zephyr_linker_section_configure(SECTION log_backends INPUT ".log_backends.*" KEEP)
|
||||
|
||||
zephyr_iterable_section(NAME shell KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
|
||||
zephyr_linker_section(NAME shell_root_cmds KVMA RAM_REGION GROUP RODATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT})
|
||||
@@ -191,3 +188,14 @@ zephyr_linker_section_configure(SECTION zephyr_dbg_info INPUT ".zephyr_dbg_info"
|
||||
zephyr_linker_section(NAME device_handles KVMA RAM_REGION GROUP RODATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT} ENDALIGN 16)
|
||||
zephyr_linker_section_configure(SECTION device_handles INPUT .__device_handles_pass1* KEEP SORT NAME PASS LINKER_DEVICE_HANDLES_PASS1)
|
||||
zephyr_linker_section_configure(SECTION device_handles INPUT .__device_handles_pass2* KEEP SORT NAME PASS NOT LINKER_DEVICE_HANDLES_PASS1)
|
||||
|
||||
zephyr_iterable_section(NAME _static_thread_data KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
|
||||
if (CONFIG_BT_IAS)
|
||||
zephyr_iterable_section(NAME bt_ias_cb KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if (CONFIG_LOG)
|
||||
zephyr_iterable_section(NAME log_link KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME log_backend KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
24
cmake/modules/FindoneApi.cmake
Normal file
24
cmake/modules/FindoneApi.cmake
Normal file
@@ -0,0 +1,24 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# FindoneApi module for locating oneAPI compiler, icx.
|
||||
#
|
||||
# The module defines the following variables:
|
||||
#
|
||||
# 'oneApi_FOUND', 'ONEAPI_FOUND'
|
||||
# True if the oneApi toolchain/compiler, icx, was found.
|
||||
#
|
||||
# 'ONEAPI_VERSION'
|
||||
# The version of the oneAPI toolchain.
|
||||
|
||||
if(CMAKE_C_COMPILER)
|
||||
# Parse the 'clang --version' output to find the installed version.
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} --version OUTPUT_VARIABLE ONEAPI_VERSION)
|
||||
string(REGEX REPLACE "[^0-9]*([0-9.]+) .*" "\\1" ONEAPI_VERSION ${ONEAPI_VERSION})
|
||||
endif()
|
||||
|
||||
find_package_handle_standard_args(oneApi
|
||||
REQUIRED_VARS CMAKE_C_COMPILER
|
||||
VERSION_VAR ONEAPI_VERSION
|
||||
)
|
||||
@@ -516,7 +516,7 @@ function(zephyr_library_cc_option)
|
||||
string(MAKE_C_IDENTIFIER check${option} check)
|
||||
zephyr_check_compiler_flag(C ${option} ${check})
|
||||
|
||||
if(${check})
|
||||
if(${${check}})
|
||||
zephyr_library_compile_options(${option})
|
||||
endif()
|
||||
endforeach()
|
||||
@@ -1016,9 +1016,9 @@ endfunction()
|
||||
function(zephyr_check_compiler_flag lang option check)
|
||||
# Check if the option is covered by any hardcoded check before doing
|
||||
# an automated test.
|
||||
zephyr_check_compiler_flag_hardcoded(${lang} "${option}" check exists)
|
||||
zephyr_check_compiler_flag_hardcoded(${lang} "${option}" _${check} exists)
|
||||
if(exists)
|
||||
set(check ${check} PARENT_SCOPE)
|
||||
set(${check} ${_${check}} PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
@@ -1123,11 +1123,11 @@ function(zephyr_check_compiler_flag_hardcoded lang option check exists)
|
||||
# because they would produce a warning instead of an error during
|
||||
# the test. Exclude them by toolchain-specific blocklist.
|
||||
if((${lang} STREQUAL CXX) AND ("${option}" IN_LIST CXX_EXCLUDED_OPTIONS))
|
||||
set(check 0 PARENT_SCOPE)
|
||||
set(exists 1 PARENT_SCOPE)
|
||||
set(${check} 0 PARENT_SCOPE)
|
||||
set(${exists} 1 PARENT_SCOPE)
|
||||
else()
|
||||
# There does not exist a hardcoded check for this option.
|
||||
set(exists 0 PARENT_SCOPE)
|
||||
set(${exists} 0 PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction(zephyr_check_compiler_flag_hardcoded)
|
||||
|
||||
@@ -2052,7 +2052,7 @@ function(check_set_linker_property)
|
||||
zephyr_check_compiler_flag(C "" ${check})
|
||||
set(CMAKE_REQUIRED_FLAGS ${SAVED_CMAKE_REQUIRED_FLAGS})
|
||||
|
||||
if(${check})
|
||||
if(${${check}})
|
||||
set_property(TARGET ${LINKER_PROPERTY_TARGET} ${APPEND} PROPERTY ${property} ${option})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
@@ -10,7 +10,7 @@ endif()
|
||||
string(TOLOWER ${CMAKE_HOST_SYSTEM_NAME} system)
|
||||
if(ONEAPI_TOOLCHAIN_PATH)
|
||||
set(TOOLCHAIN_HOME ${ONEAPI_TOOLCHAIN_PATH}/compiler/latest/${system}/bin/)
|
||||
set(ONEAPI_PYTHON_PATH ${ONEAPI_TOOLCHAIN_PATH}/intelpython/latest/bin)
|
||||
set(ONEAPI_LLVM_BIN_PATH ${ONEAPI_TOOLCHAIN_PATH}/compiler/latest/${system}/bin-llvm)
|
||||
endif()
|
||||
|
||||
set(ONEAPI_TOOLCHAIN_PATH ${ONEAPI_TOOLCHAIN_PATH} CACHE PATH "oneApi install directory")
|
||||
@@ -43,4 +43,6 @@ elseif(system STREQUAL "windows")
|
||||
add_link_options(--target=${triple})
|
||||
endif()
|
||||
|
||||
set(TOOLCHAIN_HAS_NEWLIB OFF CACHE BOOL "True if toolchain supports newlib")
|
||||
|
||||
message(STATUS "Found toolchain: host (clang/ld)")
|
||||
|
||||
@@ -78,40 +78,86 @@ int z_impl_can_add_rx_filter_msgq(const struct device *dev, struct k_msgq *msgq,
|
||||
return api->add_rx_filter(dev, can_msgq_put, msgq, filter);
|
||||
}
|
||||
|
||||
static int update_sampling_pnt(uint32_t ts, uint32_t sp, struct can_timing *res,
|
||||
/**
|
||||
* @brief Update the timing given a total number of time quanta and a sample point.
|
||||
*
|
||||
* @code{.text}
|
||||
*
|
||||
* +---------------------------------------------------+
|
||||
* | Nominal bit time in time quanta (total_tq) |
|
||||
* +--------------+----------+------------+------------+
|
||||
* | sync_seg | prop_seg | phase_seg1 | phase_seg2 |
|
||||
* +--------------+----------+------------+------------+
|
||||
* | CAN_SYNG_SEG | tseg1 | tseg2 |
|
||||
* +--------------+-----------------------+------------+
|
||||
* ^
|
||||
* sample_pnt
|
||||
* @endcode
|
||||
*
|
||||
* @see @a can_timing
|
||||
*
|
||||
* @param total_tq Total number of time quanta.
|
||||
* @param sample_pnt Sampling point in permill of the entire bit time.
|
||||
* @param[out] res Result is written into the @a can_timing struct provided.
|
||||
* @param max Maximum timing parameters values.
|
||||
* @param min Minimum timing parameters values.
|
||||
* @return Absolute sample point error.
|
||||
*/
|
||||
static int update_sampling_pnt(uint32_t total_tq, uint32_t sample_pnt,
|
||||
struct can_timing *res,
|
||||
const struct can_timing *max,
|
||||
const struct can_timing *min)
|
||||
{
|
||||
uint16_t ts1_max = max->phase_seg1 + max->prop_seg;
|
||||
uint16_t ts1_min = min->phase_seg1 + min->prop_seg;
|
||||
uint32_t sp_calc;
|
||||
uint16_t ts1, ts2;
|
||||
uint16_t tseg1_max = max->phase_seg1 + max->prop_seg;
|
||||
uint16_t tseg1_min = min->phase_seg1 + min->prop_seg;
|
||||
uint32_t sample_pnt_res;
|
||||
uint16_t tseg1, tseg2;
|
||||
|
||||
ts2 = ts - (ts * sp) / 1000;
|
||||
ts2 = CLAMP(ts2, min->phase_seg2, max->phase_seg2);
|
||||
ts1 = ts - CAN_SYNC_SEG - ts2;
|
||||
/* Calculate number of time quanta in tseg2 for given sample point */
|
||||
tseg2 = total_tq - (total_tq * sample_pnt) / 1000;
|
||||
tseg2 = CLAMP(tseg2, min->phase_seg2, max->phase_seg2);
|
||||
|
||||
if (ts1 > ts1_max) {
|
||||
ts1 = ts1_max;
|
||||
ts2 = ts - CAN_SYNC_SEG - ts1;
|
||||
if (ts2 > max->phase_seg2) {
|
||||
/* Calculate number of time quanta in tseg1 */
|
||||
tseg1 = total_tq - CAN_SYNC_SEG - tseg2;
|
||||
if (tseg1 > tseg1_max) {
|
||||
/* Sample point location must be decreased */
|
||||
tseg1 = tseg1_max;
|
||||
tseg2 = total_tq - CAN_SYNC_SEG - tseg1;
|
||||
if (tseg2 > max->phase_seg2) {
|
||||
return -1;
|
||||
}
|
||||
} else if (ts1 < ts1_min) {
|
||||
ts1 = ts1_min;
|
||||
ts2 = ts - ts1;
|
||||
if (ts2 < min->phase_seg2) {
|
||||
} else if (tseg1 < tseg1_min) {
|
||||
/* Sample point location must be increased */
|
||||
tseg1 = tseg1_min;
|
||||
tseg2 = total_tq - CAN_SYNC_SEG - tseg1;
|
||||
if (tseg2 < min->phase_seg2) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
res->prop_seg = CLAMP(ts1 / 2, min->prop_seg, max->prop_seg);
|
||||
res->phase_seg1 = ts1 - res->prop_seg;
|
||||
res->phase_seg2 = ts2;
|
||||
res->phase_seg2 = tseg2;
|
||||
|
||||
sp_calc = (CAN_SYNC_SEG + ts1) * 1000 / ts;
|
||||
/* Attempt to distribute tseg1 evenly between prop_seq and phase_seg1 */
|
||||
res->prop_seg = CLAMP(tseg1 / 2, min->prop_seg, max->prop_seg);
|
||||
res->phase_seg1 = tseg1 - res->prop_seg;
|
||||
|
||||
return sp_calc > sp ? sp_calc - sp : sp - sp_calc;
|
||||
if (res->phase_seg1 > max->phase_seg1) {
|
||||
/* Even tseg1 distribution not possible, decrease phase_seg1 */
|
||||
res->phase_seg1 = max->phase_seg1;
|
||||
res->prop_seg = tseg1 - res->phase_seg1;
|
||||
} else if (res->phase_seg1 < min->phase_seg1) {
|
||||
/* Even tseg1 distribution not possible, increase phase_seg1 */
|
||||
res->phase_seg1 = min->phase_seg1;
|
||||
res->prop_seg = tseg1 - res->phase_seg1;
|
||||
}
|
||||
|
||||
/* Calculate the resulting sample point */
|
||||
sample_pnt_res = (CAN_SYNC_SEG + tseg1) * 1000 / total_tq;
|
||||
|
||||
/* Return the absolute sample point error */
|
||||
return sample_pnt_res > sample_pnt ?
|
||||
sample_pnt_res - sample_pnt :
|
||||
sample_pnt - sample_pnt_res;
|
||||
}
|
||||
|
||||
/* Internal function to do the actual calculation */
|
||||
|
||||
@@ -213,7 +213,7 @@ static void can_loopback_remove_rx_filter(const struct device *dev, int filter_i
|
||||
{
|
||||
struct can_loopback_data *data = dev->data;
|
||||
|
||||
if (filter_id >= ARRAY_SIZE(data->filters)) {
|
||||
if (filter_id < 0 || filter_id >= ARRAY_SIZE(data->filters)) {
|
||||
LOG_ERR("filter ID %d out-of-bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -510,7 +510,8 @@ int can_mcan_init(const struct device *dev)
|
||||
#ifdef CONFIG_CAN_STM32FD
|
||||
can->ils = CAN_MCAN_ILS_RXFIFO0 | CAN_MCAN_ILS_RXFIFO1;
|
||||
#else
|
||||
can->ils = CAN_MCAN_ILS_RF0N | CAN_MCAN_ILS_RF1N;
|
||||
can->ils = CAN_MCAN_ILS_RF0N | CAN_MCAN_ILS_RF1N |
|
||||
CAN_MCAN_ILS_RF0L | CAN_MCAN_ILS_RF1L;
|
||||
#endif
|
||||
can->ile = CAN_MCAN_ILE_EINT0 | CAN_MCAN_ILE_EINT1;
|
||||
/* Interrupt on every TX fifo element*/
|
||||
@@ -1109,12 +1110,17 @@ void can_mcan_remove_rx_filter(const struct device *dev, int filter_id)
|
||||
{
|
||||
struct can_mcan_data *data = dev->data;
|
||||
struct can_mcan_msg_sram *msg_ram = data->msg_ram;
|
||||
if (filter_id < 0) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
k_mutex_lock(&data->inst_mutex, K_FOREVER);
|
||||
if (filter_id >= NUM_STD_FILTER_DATA) {
|
||||
filter_id -= NUM_STD_FILTER_DATA;
|
||||
if (filter_id >= NUM_EXT_FILTER_DATA) {
|
||||
LOG_ERR("Wrong filter id");
|
||||
LOG_ERR("filter ID %d out of bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -674,6 +674,11 @@ static void mcp2515_remove_rx_filter(const struct device *dev, int filter_id)
|
||||
{
|
||||
struct mcp2515_data *dev_data = dev->data;
|
||||
|
||||
if (filter_id < 0 || filter_id >= CONFIG_CAN_MAX_FILTER) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
k_mutex_lock(&dev_data->mutex, K_FOREVER);
|
||||
dev_data->filter_usage &= ~BIT(filter_id);
|
||||
k_mutex_unlock(&dev_data->mutex);
|
||||
|
||||
@@ -514,7 +514,7 @@ static int mcux_flexcan_add_rx_filter(const struct device *dev,
|
||||
}
|
||||
|
||||
if (alloc == -ENOSPC) {
|
||||
return alloc;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mcux_flexcan_can_filter_to_mbconfig(filter, &data->rx_cbs[alloc].mb_config,
|
||||
@@ -543,6 +543,7 @@ static int mcux_flexcan_add_rx_filter(const struct device *dev,
|
||||
alloc = -ENOSPC;
|
||||
}
|
||||
|
||||
unlock:
|
||||
k_mutex_unlock(&data->rx_mutex);
|
||||
|
||||
return alloc;
|
||||
@@ -603,9 +604,8 @@ static void mcux_flexcan_remove_rx_filter(const struct device *dev, int filter_i
|
||||
const struct mcux_flexcan_config *config = dev->config;
|
||||
struct mcux_flexcan_data *data = dev->data;
|
||||
|
||||
if (filter_id >= MCUX_FLEXCAN_MAX_RX) {
|
||||
LOG_ERR("Detach: Filter id >= MAX_RX (%d >= %d)", filter_id,
|
||||
MCUX_FLEXCAN_MAX_RX);
|
||||
if (filter_id < 0 || filter_id >= MCUX_FLEXCAN_MAX_RX) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -241,6 +241,7 @@ static void can_npl_remove_rx_filter(const struct device *dev, int filter_id)
|
||||
struct can_npl_data *data = dev->data;
|
||||
|
||||
if (filter_id < 0 || filter_id >= ARRAY_SIZE(data->filters)) {
|
||||
LOG_ERR("filter ID %d out of bounds");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -965,7 +965,8 @@ static void can_rcar_remove_rx_filter(const struct device *dev, int filter_id)
|
||||
{
|
||||
struct can_rcar_data *data = dev->data;
|
||||
|
||||
if (filter_id >= CONFIG_CAN_RCAR_MAX_FILTER) {
|
||||
if (filter_id < 0 || filter_id >= CONFIG_CAN_RCAR_MAX_FILTER) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -59,6 +59,14 @@ static inline int can_sja1000_enter_reset_mode(const struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void can_sja1000_leave_reset_mode_nowait(const struct device *dev)
|
||||
{
|
||||
uint8_t mod;
|
||||
|
||||
mod = can_sja1000_read_reg(dev, CAN_SJA1000_MOD);
|
||||
can_sja1000_write_reg(dev, CAN_SJA1000_MOD, mod & ~(CAN_SJA1000_MOD_RM));
|
||||
}
|
||||
|
||||
static inline int can_sja1000_leave_reset_mode(const struct device *dev)
|
||||
{
|
||||
int retries = CAN_SJA1000_RESET_MODE_RETRIES;
|
||||
@@ -610,7 +618,7 @@ static void can_sja1000_handle_error_warning_irq(const struct device *dev)
|
||||
can_sja1000_tx_done(dev, -ENETUNREACH);
|
||||
#ifdef CONFIG_CAN_AUTO_BUS_OFF_RECOVERY
|
||||
if (data->started) {
|
||||
(void)can_sja1000_leave_reset_mode(dev);
|
||||
can_sja1000_leave_reset_mode_nowait(dev);
|
||||
}
|
||||
#endif /* CONFIG_CAN_AUTO_BUS_OFF_RECOVERY */
|
||||
} else if ((sr & CAN_SJA1000_SR_ES) != 0) {
|
||||
|
||||
@@ -988,7 +988,10 @@ static void can_stm32_remove_rx_filter(const struct device *dev, int filter_id)
|
||||
int bank_num;
|
||||
bool bank_unused;
|
||||
|
||||
__ASSERT_NO_MSG(filter_id >= 0 && filter_id < CAN_STM32_MAX_FILTER_ID);
|
||||
if (filter_id < 0 || filter_id >= CAN_STM32_MAX_FILTER_ID) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
k_mutex_lock(&filter_mutex, K_FOREVER);
|
||||
k_mutex_lock(&data->inst_mutex, K_FOREVER);
|
||||
@@ -1067,10 +1070,10 @@ static const struct can_driver_api can_api_funcs = {
|
||||
.prescaler = 0x01
|
||||
},
|
||||
.timing_max = {
|
||||
.sjw = 0x07,
|
||||
.sjw = 0x04,
|
||||
.prop_seg = 0x00,
|
||||
.phase_seg1 = 0x0F,
|
||||
.phase_seg2 = 0x07,
|
||||
.phase_seg1 = 0x10,
|
||||
.phase_seg2 = 0x08,
|
||||
.prescaler = 0x400
|
||||
}
|
||||
};
|
||||
@@ -1132,8 +1135,8 @@ static const struct can_stm32_config can_stm32_cfg_##inst = { \
|
||||
}, \
|
||||
.config_irq = config_can_##inst##_irq, \
|
||||
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
|
||||
.phy = DEVICE_DT_GET_OR_NULL(DT_INST_PHANDLE(id, phys)), \
|
||||
.max_bitrate = DT_INST_CAN_TRANSCEIVER_MAX_BITRATE(id, 1000000), \
|
||||
.phy = DEVICE_DT_GET_OR_NULL(DT_INST_PHANDLE(inst, phys)), \
|
||||
.max_bitrate = DT_INST_CAN_TRANSCEIVER_MAX_BITRATE(inst, 1000000), \
|
||||
};
|
||||
|
||||
#define CAN_STM32_DATA_INST(inst) \
|
||||
|
||||
@@ -73,8 +73,7 @@ static uint32_t get_pll_div_frequency(uint32_t pllsrc_freq,
|
||||
{
|
||||
__ASSERT_NO_MSG(pllm_div && pllout_div);
|
||||
|
||||
return (pllsrc_freq * plln_mul) /
|
||||
(pllm_div * pllout_div);
|
||||
return pllsrc_freq / pllm_div * plln_mul / pllout_div;
|
||||
}
|
||||
|
||||
static uint32_t get_bus_clock(uint32_t clock, uint32_t prescaler)
|
||||
|
||||
@@ -490,7 +490,7 @@ static void uart_console_isr(const struct device *unused, void *user_data)
|
||||
}
|
||||
|
||||
/* Handle special control characters */
|
||||
if (!isprint(byte)) {
|
||||
if (isprint(byte) == 0) {
|
||||
switch (byte) {
|
||||
case BS:
|
||||
case DEL:
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#include <zephyr/device.h>
|
||||
#include <zephyr/drivers/dac.h>
|
||||
#include <zephyr/drivers/pinctrl.h>
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/logging/log.h>
|
||||
#include <zephyr/irq.h>
|
||||
LOG_MODULE_REGISTER(dac_sam, CONFIG_DAC_LOG_LEVEL);
|
||||
|
||||
@@ -774,7 +774,7 @@ static int spi_nor_erase(const struct device *dev, off_t addr, size_t size)
|
||||
|
||||
if ((etp->exp != 0)
|
||||
&& SPI_NOR_IS_ALIGNED(addr, etp->exp)
|
||||
&& SPI_NOR_IS_ALIGNED(size, etp->exp)
|
||||
&& (size >= BIT(etp->exp))
|
||||
&& ((bet == NULL)
|
||||
|| (etp->exp > bet->exp))) {
|
||||
bet = etp;
|
||||
|
||||
@@ -43,6 +43,19 @@ config INTC_INIT_PRIORITY
|
||||
help
|
||||
Interrupt controller device initialization priority.
|
||||
|
||||
if MCHP_ECIA_XEC
|
||||
|
||||
config XEC_GIRQ_INIT_PRIORITY
|
||||
int "XEX GIRQ Interrupt controller init priority"
|
||||
default 41
|
||||
help
|
||||
XEC GIRQ Interrupt controller device initialization priority.
|
||||
The priority value needs to be greater than INTC_INIT_PRIORITY
|
||||
So that the XEC GIRQ controllers are initialized after the
|
||||
xec_ecia.
|
||||
|
||||
endif
|
||||
|
||||
module = INTC
|
||||
module-str = intc
|
||||
source "subsys/logging/Kconfig.template.log_config"
|
||||
|
||||
@@ -67,7 +67,7 @@ static int arc_shared_intc_update_post_smp(const struct device *unused)
|
||||
|
||||
for (uint32_t i = 0; i < (CONFIG_NUM_IRQS - ARC_CONNECT_IDU_IRQ_START); i++) {
|
||||
/* TODO: take arc_connect_spinlock one time to avoid locking/unlocking every time */
|
||||
z_arc_connect_idu_set_dest(i, GENMASK(CONFIG_MP_NUM_CPUS, 0));
|
||||
z_arc_connect_idu_set_dest(i, BIT_MASK(arch_num_cpus()));
|
||||
}
|
||||
|
||||
z_arc_connect_idu_enable();
|
||||
|
||||
@@ -573,7 +573,7 @@ static int xec_ecia_init(const struct device *dev)
|
||||
\
|
||||
DEVICE_DT_DEFINE(n, xec_girq_init_##n, \
|
||||
NULL, &xec_data_girq_##n, &xec_config_girq_##n, \
|
||||
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, \
|
||||
PRE_KERNEL_1, CONFIG_XEC_GIRQ_INIT_PRIORITY, \
|
||||
NULL); \
|
||||
\
|
||||
static int xec_girq_init_##n(const struct device *dev) \
|
||||
|
||||
@@ -56,7 +56,7 @@ static int send(const struct device *dev, int wait, uint32_t id,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (size > MAX_MSG) {
|
||||
if ((size < 0) || (size > MAX_MSG)) {
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
|
||||
@@ -164,7 +164,7 @@ static int imx_mu_ipm_send(const struct device *dev, int wait, uint32_t id,
|
||||
{
|
||||
const struct imx_mu_config *config = dev->config;
|
||||
MU_Type *base = MU(config);
|
||||
uint32_t data32[IMX_IPM_DATA_REGS];
|
||||
uint32_t data32[IMX_IPM_DATA_REGS] = {0};
|
||||
#if !IS_ENABLED(CONFIG_IPM_IMX_REV2)
|
||||
mu_status_t status;
|
||||
#endif
|
||||
@@ -174,7 +174,7 @@ static int imx_mu_ipm_send(const struct device *dev, int wait, uint32_t id,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (size > CONFIG_IPM_IMX_MAX_DATA_SIZE) {
|
||||
if ((size < 0) || (size > CONFIG_IPM_IMX_MAX_DATA_SIZE)) {
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
|
||||
@@ -80,9 +80,8 @@ static int mcux_mailbox_ipm_send(const struct device *d, int wait,
|
||||
{
|
||||
const struct mcux_mailbox_config *config = d->config;
|
||||
MAILBOX_Type *base = config->base;
|
||||
uint32_t data32[MCUX_IPM_DATA_REGS]; /* Until we change API
|
||||
* to uint32_t array
|
||||
*/
|
||||
/* Until we change API to uint32_t array */
|
||||
uint32_t data32[MCUX_IPM_DATA_REGS] = {0};
|
||||
unsigned int flags;
|
||||
int i;
|
||||
|
||||
@@ -92,7 +91,7 @@ static int mcux_mailbox_ipm_send(const struct device *d, int wait,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (size > MCUX_IPM_DATA_REGS * sizeof(uint32_t)) {
|
||||
if ((size < 0) || (size > MCUX_IPM_DATA_REGS * sizeof(uint32_t))) {
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
|
||||
@@ -65,9 +65,9 @@ static void sys_timer_isr(const void *arg)
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
uint64_t now = get_systimer_alarm();
|
||||
|
||||
uint32_t dticks = (uint32_t)((now - last_count) / CYC_PER_TICK);
|
||||
uint64_t dticks = (uint64_t)((now - last_count) / CYC_PER_TICK);
|
||||
|
||||
last_count = now;
|
||||
last_count += dticks * CYC_PER_TICK;
|
||||
|
||||
if (!TICKLESS) {
|
||||
uint64_t next = last_count + CYC_PER_TICK;
|
||||
@@ -79,7 +79,7 @@ static void sys_timer_isr(const void *arg)
|
||||
}
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
|
||||
sys_clock_announce(dticks);
|
||||
}
|
||||
|
||||
void sys_clock_set_timeout(int32_t ticks, bool idle)
|
||||
|
||||
@@ -17,13 +17,16 @@
|
||||
#ifdef CONFIG_HARVARD
|
||||
#define ROMABLE_REGION ICCM
|
||||
#define RAMABLE_REGION DCCM
|
||||
#define ROM_RAM_IN_SAME_REGION 0
|
||||
#else
|
||||
#if defined(CONFIG_XIP) && (FLASH_SIZE != 0)
|
||||
#define ROMABLE_REGION FLASH
|
||||
#define RAMABLE_REGION SRAM
|
||||
#define ROM_RAM_IN_SAME_REGION 0
|
||||
#else
|
||||
#define ROMABLE_REGION SRAM
|
||||
#define RAMABLE_REGION SRAM
|
||||
#define ROM_RAM_IN_SAME_REGION 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -137,7 +140,13 @@ SECTIONS {
|
||||
_ectors = .;
|
||||
#endif /* CONFIG_CPP && !CONFIG_CPP_STATIC_INIT_GNU && __MWDT_LINKER_CMD__ */
|
||||
|
||||
/* This extra MPU alignment of RAMABLE_REGION is only required if we put ROMABLE_REGION and
|
||||
* RAMABLE_REGION into the same (continuous) memory - otherwise we can get beginning of the
|
||||
* RAMABLE_REGION in the end of ROMABLE_REGION MPU aperture.
|
||||
*/
|
||||
#if ROM_RAM_IN_SAME_REGION
|
||||
MPU_ALIGN(ABSOLUTE(.) - __rom_region_start);
|
||||
#endif
|
||||
} GROUP_LINK_IN(ROMABLE_REGION)
|
||||
|
||||
__rodata_region_end = .;
|
||||
|
||||
@@ -699,8 +699,8 @@
|
||||
* Example usage:
|
||||
*
|
||||
* @code{.c}
|
||||
* DT_PROP_HAS_NAME(nx, foos, event) // 1
|
||||
* DT_PROP_HAS_NAME(nx, foos, failure) // 0
|
||||
* DT_PROP_HAS_NAME(DT_NODELABEL(nx), foos, event) // 1
|
||||
* DT_PROP_HAS_NAME(DT_NODELABEL(nx), foos, failure) // 0
|
||||
* @endcode
|
||||
*
|
||||
* @param node_id node identifier
|
||||
|
||||
@@ -1357,7 +1357,7 @@ static inline uint8_t can_dlc_to_bytes(uint8_t dlc)
|
||||
static const uint8_t dlc_table[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 12,
|
||||
16, 20, 24, 32, 48, 64};
|
||||
|
||||
return dlc > 0x0F ? 64 : dlc_table[dlc];
|
||||
return dlc_table[MIN(dlc, ARRAY_SIZE(dlc_table) - 1)];
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -72,26 +72,29 @@
|
||||
#define RTC_SEL(val) STM32_CLOCK(val, 3, 8, BDCR_REG)
|
||||
|
||||
/** @brief RCC_DKCFGR register offset */
|
||||
#define DKCFGR1_REG 0x54
|
||||
#define DKCFGR2_REG 0x58
|
||||
#define DCKCFGR1_REG 0x8C
|
||||
#define DCKCFGR2_REG 0x90
|
||||
|
||||
/** @brief Dedicated clocks configuration register selection helpers */
|
||||
/** DKCFGR2 devices */
|
||||
#define USART1_SEL(val) STM32_CLOCK(val, 3, 0, DKCFGR2_REG)
|
||||
#define USART2_SEL(val) STM32_CLOCK(val, 3, 2, DKCFGR2_REG)
|
||||
#define USART3_SEL(val) STM32_CLOCK(val, 3, 4, DKCFGR2_REG)
|
||||
#define USART4_SEL(val) STM32_CLOCK(val, 1, 6, DKCFGR2_REG)
|
||||
#define USART5_SEL(val) STM32_CLOCK(val, 3, 8, DKCFGR2_REG)
|
||||
#define USART6_SEL(val) STM32_CLOCK(val, 3, 10, DKCFGR2_REG)
|
||||
#define USART7_SEL(val) STM32_CLOCK(val, 3, 12, DKCFGR2_REG)
|
||||
#define USART8_SEL(val) STM32_CLOCK(val, 3, 14, DKCFGR2_REG)
|
||||
#define I2C1_SEL(val) STM32_CLOCK(val, 3, 16, DKCFGR2_REG)
|
||||
#define I2C2_SEL(val) STM32_CLOCK(val, 3, 18, DKCFGR2_REG)
|
||||
#define I2C3_SEL(val) STM32_CLOCK(val, 3, 20, DKCFGR2_REG)
|
||||
#define LPTIM1_SEL(val) STM32_CLOCK(val, 3, 24, DKCFGR2_REG)
|
||||
#define CK48M_SEL(val) STM32_CLOCK(val, 1, 27, DKCFGR2_REG)
|
||||
#define SDMMC1_SEL(val) STM32_CLOCK(val, 1, 28, DKCFGR2_REG)
|
||||
#define SDMMC2_SEL(val) STM32_CLOCK(val, 1, 29, DKCFGR2_REG)
|
||||
#define USART1_SEL(val) STM32_CLOCK(val, 3, 0, DCKCFGR2_REG)
|
||||
#define USART2_SEL(val) STM32_CLOCK(val, 3, 2, DCKCFGR2_REG)
|
||||
#define USART3_SEL(val) STM32_CLOCK(val, 3, 4, DCKCFGR2_REG)
|
||||
#define USART4_SEL(val) STM32_CLOCK(val, 3, 6, DCKCFGR2_REG)
|
||||
#define USART5_SEL(val) STM32_CLOCK(val, 3, 8, DCKCFGR2_REG)
|
||||
#define USART6_SEL(val) STM32_CLOCK(val, 3, 10, DCKCFGR2_REG)
|
||||
#define USART7_SEL(val) STM32_CLOCK(val, 3, 12, DCKCFGR2_REG)
|
||||
#define USART8_SEL(val) STM32_CLOCK(val, 3, 14, DCKCFGR2_REG)
|
||||
#define I2C1_SEL(val) STM32_CLOCK(val, 3, 16, DCKCFGR2_REG)
|
||||
#define I2C2_SEL(val) STM32_CLOCK(val, 3, 18, DCKCFGR2_REG)
|
||||
#define I2C3_SEL(val) STM32_CLOCK(val, 3, 20, DCKCFGR2_REG)
|
||||
#define I2C4_SEL(val) STM32_CLOCK(val, 3, 22, DCKCFGR2_REG)
|
||||
#define LPTIM1_SEL(val) STM32_CLOCK(val, 3, 24, DCKCFGR2_REG)
|
||||
#define CEC_SEL(val) STM32_CLOCK(val, 1, 26, DCKCFGR2_REG)
|
||||
#define CK48M_SEL(val) STM32_CLOCK(val, 1, 27, DCKCFGR2_REG)
|
||||
#define SDMMC1_SEL(val) STM32_CLOCK(val, 1, 28, DCKCFGR2_REG)
|
||||
#define SDMMC2_SEL(val) STM32_CLOCK(val, 1, 29, DCKCFGR2_REG)
|
||||
#define DSI_SEL(val) STM32_CLOCK(val, 1, 30, DCKCFGR2_REG)
|
||||
/** Dummy: Add a specificier when no selection is possible */
|
||||
#define NO_SEL 0xFF
|
||||
|
||||
|
||||
@@ -78,22 +78,31 @@ static void canopen_detach_all_rx_filters(CO_CANmodule_t *CANmodule)
|
||||
}
|
||||
}
|
||||
|
||||
static void canopen_rx_callback(const struct device *dev, struct can_frame *frame, void *arg)
|
||||
static void canopen_rx_callback(const struct device *dev, struct can_frame *frame, void *user_data)
|
||||
{
|
||||
CO_CANrx_t *buffer = (CO_CANrx_t *)arg;
|
||||
CO_CANmodule_t *CANmodule = (CO_CANmodule_t *)user_data;
|
||||
CO_CANrxMsg_t rxMsg;
|
||||
CO_CANrx_t *buffer;
|
||||
int i;
|
||||
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
if (!buffer || !buffer->pFunct) {
|
||||
LOG_ERR("failed to process CAN rx callback");
|
||||
return;
|
||||
}
|
||||
/* Loop through registered rx buffers in priority order */
|
||||
for (i = 0; i < CANmodule->rx_size; i++) {
|
||||
buffer = &CANmodule->rx_array[i];
|
||||
|
||||
rxMsg.ident = frame->id;
|
||||
rxMsg.DLC = frame->dlc;
|
||||
memcpy(rxMsg.data, frame->data, frame->dlc);
|
||||
buffer->pFunct(buffer->object, &rxMsg);
|
||||
if (buffer->filter_id == -ENOSPC || buffer->pFunct == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (((frame->id ^ buffer->ident) & buffer->mask) == 0U) {
|
||||
rxMsg.ident = frame->id;
|
||||
rxMsg.DLC = frame->dlc;
|
||||
memcpy(rxMsg.data, frame->data, frame->dlc);
|
||||
buffer->pFunct(buffer->object, &rxMsg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void canopen_tx_callback(const struct device *dev, int error, void *arg)
|
||||
@@ -298,6 +307,8 @@ CO_ReturnError_t CO_CANrxBufferInit(CO_CANmodule_t *CANmodule, uint16_t index,
|
||||
buffer = &CANmodule->rx_array[index];
|
||||
buffer->object = object;
|
||||
buffer->pFunct = pFunct;
|
||||
buffer->ident = ident;
|
||||
buffer->mask = mask;
|
||||
|
||||
filter.flags = (rtr ? CAN_FILTER_RTR : CAN_FILTER_DATA);
|
||||
filter.id = ident;
|
||||
@@ -309,7 +320,7 @@ CO_ReturnError_t CO_CANrxBufferInit(CO_CANmodule_t *CANmodule, uint16_t index,
|
||||
|
||||
buffer->filter_id = can_add_rx_filter(CANmodule->dev,
|
||||
canopen_rx_callback,
|
||||
buffer, &filter);
|
||||
CANmodule, &filter);
|
||||
if (buffer->filter_id == -ENOSPC) {
|
||||
LOG_ERR("failed to add CAN rx callback, no free filter");
|
||||
CO_errorReport(CANmodule->em, CO_EM_MEMORY_ALLOCATION_ERROR,
|
||||
|
||||
@@ -66,6 +66,7 @@ typedef struct canopen_rx {
|
||||
void *object;
|
||||
CO_CANrxBufferCallback_t pFunct;
|
||||
uint16_t ident;
|
||||
uint16_t mask;
|
||||
} CO_CANrx_t;
|
||||
|
||||
typedef struct canopen_tx {
|
||||
|
||||
13
samples/boards/litex/i2s/sample.yaml
Normal file
13
samples/boards/litex/i2s/sample.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
sample:
|
||||
description: I2S sample
|
||||
|
||||
name: i2s example
|
||||
tests:
|
||||
sample.drivers.i2s.litex:
|
||||
tags: introduction
|
||||
harness: console
|
||||
platform_allow: litex_vexriscv
|
||||
harness_config:
|
||||
type: one_line
|
||||
regex:
|
||||
- "i2s example (.*)"
|
||||
@@ -1,11 +0,0 @@
|
||||
sample:
|
||||
description: I2S sample
|
||||
|
||||
name: i2s example
|
||||
common:
|
||||
tags: introduction
|
||||
harness: console
|
||||
harness_config:
|
||||
type: one_line
|
||||
regex:
|
||||
- "i2s example (.*)"
|
||||
@@ -22,8 +22,6 @@ CONFIG_HEAP_MEM_POOL_SIZE=8192
|
||||
|
||||
CONFIG_JSON_LIBRARY=y
|
||||
|
||||
CONFIG_NET_TEST_PROTOCOL=y
|
||||
|
||||
CONFIG_THREAD_NAME=y
|
||||
|
||||
CONFIG_ENTROPY_GENERATOR=y
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
sample:
|
||||
description: A sample for running TTCN-3 based sanity check for TCP
|
||||
name: tcp
|
||||
tests:
|
||||
sample.net.socket.tcp:
|
||||
harness: net
|
||||
platform_allow: qemu_x86
|
||||
tags: socket tcp
|
||||
integration_platforms:
|
||||
- qemu_x86
|
||||
|
||||
@@ -140,7 +140,7 @@ static int cmd_demo_getopt_ts(const struct shell *sh, size_t argc,
|
||||
shell_print(sh,
|
||||
"Option -%c requires an argument.",
|
||||
state->optopt);
|
||||
} else if (isprint(state->optopt)) {
|
||||
} else if (isprint(state->optopt) != 0) {
|
||||
shell_print(sh,
|
||||
"Unknown option `-%c'.",
|
||||
state->optopt);
|
||||
@@ -190,7 +190,7 @@ static int cmd_demo_getopt(const struct shell *sh, size_t argc,
|
||||
shell_print(sh,
|
||||
"Option -%c requires an argument.",
|
||||
optopt);
|
||||
} else if (isprint(optopt)) {
|
||||
} else if (isprint(optopt) != 0) {
|
||||
shell_print(sh, "Unknown option `-%c'.",
|
||||
optopt);
|
||||
} else {
|
||||
|
||||
@@ -237,6 +237,10 @@ class Gcovr(CoverageTool):
|
||||
|
||||
|
||||
def run_coverage(testplan, options):
|
||||
for plat in options.coverage_platform:
|
||||
_plat = testplan.get_platform(plat)
|
||||
if _plat and (_plat.type in {"native", "unit"}):
|
||||
use_system_gcov = True
|
||||
if not options.gcov_tool:
|
||||
zephyr_sdk_gcov_tool = os.path.join(
|
||||
os.environ.get("ZEPHYR_SDK_INSTALL_DIR", default=""),
|
||||
@@ -253,10 +257,10 @@ def run_coverage(testplan, options):
|
||||
except OSError:
|
||||
shutil.copy(llvm_cov, gcov_lnk)
|
||||
options.gcov_tool = gcov_lnk
|
||||
elif use_system_gcov:
|
||||
options.gcov_tool = "gcov"
|
||||
elif os.path.exists(zephyr_sdk_gcov_tool):
|
||||
options.gcov_tool = zephyr_sdk_gcov_tool
|
||||
else:
|
||||
options.gcov_tool = "gcov"
|
||||
|
||||
logger.info("Generating coverage files...")
|
||||
coverage_tool = CoverageTool.factory(options.coverage_tool)
|
||||
|
||||
@@ -462,14 +462,17 @@ class Reporting:
|
||||
pass_rate = 0
|
||||
|
||||
logger.info(
|
||||
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
|
||||
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {}{}{} errored, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
|
||||
Fore.RED if failed else Fore.GREEN,
|
||||
results.passed,
|
||||
results.total,
|
||||
Fore.RESET,
|
||||
pass_rate,
|
||||
Fore.RED if results.failed else Fore.RESET,
|
||||
results.failed + results.error,
|
||||
results.failed,
|
||||
Fore.RESET,
|
||||
Fore.RED if results.error else Fore.RESET,
|
||||
results.error,
|
||||
Fore.RESET,
|
||||
results.skipped_configs,
|
||||
Fore.YELLOW if self.plan.warnings else Fore.RESET,
|
||||
|
||||
@@ -50,6 +50,9 @@ class ExecutionCounter(object):
|
||||
# updated by report_out()
|
||||
self._done = Value('i', 0)
|
||||
|
||||
# iteration
|
||||
self._iteration = Value('i', 0)
|
||||
|
||||
# instances that actually executed and passed
|
||||
# updated by report_out()
|
||||
self._passed = Value('i', 0)
|
||||
@@ -76,24 +79,24 @@ class ExecutionCounter(object):
|
||||
# initialized to number of test instances
|
||||
self._total = Value('i', total)
|
||||
|
||||
# updated in update_counting_after_pipeline()
|
||||
# updated in report_out
|
||||
self._cases = Value('i', 0)
|
||||
self.lock = Lock()
|
||||
|
||||
def summary(self):
|
||||
logger.debug("--------------------------------")
|
||||
logger.debug(f"Total test suites: {self.total}") # actually test instances
|
||||
logger.debug(f"Total test cases: {self.cases}")
|
||||
logger.debug(f"Executed test cases: {self.cases - self.skipped_cases}")
|
||||
logger.debug(f"Skipped test cases: {self.skipped_cases}")
|
||||
logger.debug(f"Completed test suites: {self.done}")
|
||||
logger.debug(f"Passing test suites: {self.passed}")
|
||||
logger.debug(f"Failing test suites: {self.failed}")
|
||||
logger.debug(f"Skipped test suites: {self.skipped_configs}")
|
||||
logger.debug(f"Skipped test suites (runtime): {self.skipped_runtime}")
|
||||
logger.debug(f"Skipped test suites (filter): {self.skipped_filter}")
|
||||
logger.debug(f"Errors: {self.error}")
|
||||
logger.debug("--------------------------------")
|
||||
print("--------------------------------")
|
||||
print(f"Total test suites: {self.total}") # actually test instances
|
||||
print(f"Total test cases: {self.cases}")
|
||||
print(f"Executed test cases: {self.cases - self.skipped_cases}")
|
||||
print(f"Skipped test cases: {self.skipped_cases}")
|
||||
print(f"Completed test suites: {self.done}")
|
||||
print(f"Passing test suites: {self.passed}")
|
||||
print(f"Failing test suites: {self.failed}")
|
||||
print(f"Skipped test suites: {self.skipped_configs}")
|
||||
print(f"Skipped test suites (runtime): {self.skipped_runtime}")
|
||||
print(f"Skipped test suites (filter): {self.skipped_filter}")
|
||||
print(f"Errors: {self.error}")
|
||||
print("--------------------------------")
|
||||
|
||||
@property
|
||||
def cases(self):
|
||||
@@ -125,6 +128,16 @@ class ExecutionCounter(object):
|
||||
with self._error.get_lock():
|
||||
self._error.value = value
|
||||
|
||||
@property
|
||||
def iteration(self):
|
||||
with self._iteration.get_lock():
|
||||
return self._iteration.value
|
||||
|
||||
@iteration.setter
|
||||
def iteration(self, value):
|
||||
with self._iteration.get_lock():
|
||||
self._iteration.value = value
|
||||
|
||||
@property
|
||||
def done(self):
|
||||
with self._done.get_lock():
|
||||
@@ -604,7 +617,7 @@ class ProjectBuilder(FilterBuilder):
|
||||
mode = message.get("mode")
|
||||
if mode == "device":
|
||||
self.cleanup_device_testing_artifacts()
|
||||
elif mode == "pass" or (mode == "all" and self.instance.reason != "Cmake build failure"):
|
||||
elif mode == "passed" or (mode == "all" and self.instance.reason != "Cmake build failure"):
|
||||
self.cleanup_artifacts()
|
||||
|
||||
def determine_testcases(self, results):
|
||||
@@ -718,21 +731,26 @@ class ProjectBuilder(FilterBuilder):
|
||||
total_tests_width = len(str(total_to_do))
|
||||
results.done += 1
|
||||
instance = self.instance
|
||||
if results.iteration == 1:
|
||||
results.cases += len(instance.testcases)
|
||||
|
||||
if instance.status in ["error", "failed"]:
|
||||
if instance.status == "error":
|
||||
results.error += 1
|
||||
txt = " ERROR "
|
||||
else:
|
||||
results.failed += 1
|
||||
txt = " FAILED "
|
||||
if self.options.verbose:
|
||||
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
|
||||
status = Fore.RED + txt + Fore.RESET + instance.reason
|
||||
else:
|
||||
print("")
|
||||
logger.error(
|
||||
"{:<25} {:<50} {}FAILED{}: {}".format(
|
||||
"{:<25} {:<50} {}{}{}: {}".format(
|
||||
instance.platform.name,
|
||||
instance.testsuite.name,
|
||||
Fore.RED,
|
||||
txt,
|
||||
Fore.RESET,
|
||||
instance.reason))
|
||||
if not self.options.verbose:
|
||||
@@ -771,9 +789,8 @@ class ProjectBuilder(FilterBuilder):
|
||||
and hasattr(self.instance.handler, 'seed')
|
||||
and self.instance.handler.seed is not None ):
|
||||
more_info += "/seed: " + str(self.options.seed)
|
||||
|
||||
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
|
||||
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
|
||||
results.done, total_tests_width, total_to_do , instance.platform.name,
|
||||
instance.testsuite.name, status, more_info))
|
||||
|
||||
if instance.status in ["error", "failed", "timeout"]:
|
||||
@@ -783,9 +800,9 @@ class ProjectBuilder(FilterBuilder):
|
||||
if total_to_do > 0:
|
||||
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
|
||||
|
||||
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
|
||||
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s, error: %s%4d%s" % (
|
||||
Fore.GREEN,
|
||||
results.done + results.skipped_filter,
|
||||
results.done,
|
||||
total_to_do,
|
||||
Fore.RESET,
|
||||
completed_perc,
|
||||
@@ -794,6 +811,9 @@ class ProjectBuilder(FilterBuilder):
|
||||
Fore.RESET,
|
||||
Fore.RED if results.failed > 0 else Fore.RESET,
|
||||
results.failed,
|
||||
Fore.RESET,
|
||||
Fore.RED if results.error > 0 else Fore.RESET,
|
||||
results.error,
|
||||
Fore.RESET
|
||||
)
|
||||
)
|
||||
@@ -909,13 +929,13 @@ class TwisterRunner:
|
||||
def run(self):
|
||||
|
||||
retries = self.options.retry_failed + 1
|
||||
completed = 0
|
||||
|
||||
BaseManager.register('LifoQueue', queue.LifoQueue)
|
||||
manager = BaseManager()
|
||||
manager.start()
|
||||
|
||||
self.results = ExecutionCounter(total=len(self.instances))
|
||||
self.iteration = 0
|
||||
pipeline = manager.LifoQueue()
|
||||
done_queue = manager.LifoQueue()
|
||||
|
||||
@@ -943,17 +963,17 @@ class TwisterRunner:
|
||||
self.update_counting_before_pipeline()
|
||||
|
||||
while True:
|
||||
completed += 1
|
||||
self.results.iteration += 1
|
||||
|
||||
if completed > 1:
|
||||
logger.info("%d Iteration:" % (completed))
|
||||
if self.results.iteration > 1:
|
||||
logger.info("%d Iteration:" % (self.results.iteration))
|
||||
time.sleep(self.options.retry_interval) # waiting for the system to settle down
|
||||
self.results.done = self.results.total - self.results.failed
|
||||
self.results.done = self.results.total - self.results.failed - self.results.error
|
||||
self.results.failed = 0
|
||||
if self.options.retry_build_errors:
|
||||
self.results.failed = 0
|
||||
self.results.error = 0
|
||||
else:
|
||||
self.results.failed = self.results.error
|
||||
else:
|
||||
self.results.done = self.results.skipped_filter
|
||||
|
||||
self.execute(pipeline, done_queue)
|
||||
|
||||
@@ -970,13 +990,14 @@ class TwisterRunner:
|
||||
|
||||
print("")
|
||||
|
||||
retry_errors = False
|
||||
if self.results.error and self.options.retry_build_errors:
|
||||
retry_errors = True
|
||||
|
||||
retries = retries - 1
|
||||
# There are cases where failed == error (only build failures),
|
||||
# we do not try build failures.
|
||||
if retries == 0 or (self.results.failed == self.results.error and not self.options.retry_build_errors):
|
||||
if retries == 0 or ( self.results.failed == 0 and not retry_errors):
|
||||
break
|
||||
|
||||
self.update_counting_after_pipeline()
|
||||
self.show_brief()
|
||||
|
||||
def update_counting_before_pipeline(self):
|
||||
@@ -990,19 +1011,10 @@ class TwisterRunner:
|
||||
self.results.skipped_filter += 1
|
||||
self.results.skipped_configs += 1
|
||||
self.results.skipped_cases += len(instance.testsuite.testcases)
|
||||
self.results.cases += len(instance.testsuite.testcases)
|
||||
elif instance.status == 'error':
|
||||
self.results.error += 1
|
||||
|
||||
def update_counting_after_pipeline(self):
|
||||
'''
|
||||
Updating counting after pipeline is necessary because the number of test cases
|
||||
of a test instance will be refined based on zephyr.symbols as it goes through the
|
||||
pipeline. While the testsuite.testcases is obtained by scanning the source file.
|
||||
The instance.testcases is more accurate and can only be obtained after pipeline finishes.
|
||||
'''
|
||||
for instance in self.instances.values():
|
||||
self.results.cases += len(instance.testcases)
|
||||
|
||||
def show_brief(self):
|
||||
logger.info("%d test scenarios (%d test instances) selected, "
|
||||
"%d configurations skipped (%d by static filter, %d at runtime)." %
|
||||
|
||||
@@ -429,13 +429,12 @@ class TestPlan:
|
||||
logger.debug("Found possible testsuite in " + dirpath)
|
||||
|
||||
suite_yaml_path = os.path.join(dirpath, filename)
|
||||
suite_path = os.path.dirname(suite_yaml_path)
|
||||
|
||||
try:
|
||||
parsed_data = TwisterConfigParser(suite_yaml_path, self.suite_schema)
|
||||
parsed_data.load()
|
||||
|
||||
suite_path = os.path.dirname(suite_yaml_path)
|
||||
|
||||
subcases, ztest_suite_names = scan_testsuite_path(suite_path)
|
||||
|
||||
for name in parsed_data.scenarios.keys():
|
||||
|
||||
@@ -202,7 +202,8 @@ def main(options):
|
||||
|
||||
duration = time.time() - start_time
|
||||
|
||||
runner.results.summary()
|
||||
if VERBOSE > 1:
|
||||
runner.results.summary()
|
||||
|
||||
report.summary(runner.results, options.disable_unrecognized_section_test, duration)
|
||||
|
||||
|
||||
@@ -161,6 +161,7 @@ mapping:
|
||||
# maybe it is just an artifact?
|
||||
"tests":
|
||||
type: map
|
||||
required: true
|
||||
matching-rule: "any"
|
||||
mapping:
|
||||
# The key for the testname is any, so
|
||||
|
||||
@@ -30,7 +30,7 @@ static int map_rom_segments(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
size_t _partition_offset = FIXED_PARTITION_OFFSET(image_0);
|
||||
size_t _partition_offset = FIXED_PARTITION_OFFSET(slot0_partition);
|
||||
uint32_t _app_irom_start = _partition_offset + (uint32_t)&_image_irom_start;
|
||||
uint32_t _app_irom_size = (uint32_t)&_image_irom_size;
|
||||
uint32_t _app_irom_vaddr = (uint32_t)&_image_irom_vaddr;
|
||||
|
||||
@@ -1267,6 +1267,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
/* Active scanner */
|
||||
} else if (((pdu_adv_rx->type == PDU_ADV_TYPE_ADV_IND) ||
|
||||
(pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_IND)) &&
|
||||
(pdu_adv_rx->len >= offsetof(struct pdu_adv_adv_ind, data)) &&
|
||||
(pdu_adv_rx->len <= sizeof(struct pdu_adv_adv_ind)) &&
|
||||
lll->type && !lll->state &&
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
@@ -1359,6 +1360,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
else if (((((pdu_adv_rx->type == PDU_ADV_TYPE_ADV_IND) ||
|
||||
(pdu_adv_rx->type == PDU_ADV_TYPE_NONCONN_IND) ||
|
||||
(pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_IND)) &&
|
||||
(pdu_adv_rx->len >= offsetof(struct pdu_adv_adv_ind, data)) &&
|
||||
(pdu_adv_rx->len <= sizeof(struct pdu_adv_adv_ind))) ||
|
||||
((pdu_adv_rx->type == PDU_ADV_TYPE_DIRECT_IND) &&
|
||||
(pdu_adv_rx->len == sizeof(struct pdu_adv_direct_ind)) &&
|
||||
@@ -1373,6 +1375,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
&dir_report)) ||
|
||||
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
||||
((pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_RSP) &&
|
||||
(pdu_adv_rx->len >= offsetof(struct pdu_adv_scan_rsp, data)) &&
|
||||
(pdu_adv_rx->len <= sizeof(struct pdu_adv_scan_rsp)) &&
|
||||
(lll->state != 0U) &&
|
||||
isr_scan_rsp_adva_matches(pdu_adv_rx))) &&
|
||||
@@ -1423,6 +1426,7 @@ static inline bool isr_scan_init_check(const struct lll_scan *lll,
|
||||
lll_scan_adva_check(lll, pdu->tx_addr, pdu->adv_ind.addr,
|
||||
rl_idx)) &&
|
||||
(((pdu->type == PDU_ADV_TYPE_ADV_IND) &&
|
||||
(pdu->len >= offsetof(struct pdu_adv_adv_ind, data)) &&
|
||||
(pdu->len <= sizeof(struct pdu_adv_adv_ind))) ||
|
||||
((pdu->type == PDU_ADV_TYPE_DIRECT_IND) &&
|
||||
(pdu->len == sizeof(struct pdu_adv_direct_ind)) &&
|
||||
|
||||
@@ -8169,7 +8169,7 @@ static void ull_conn_update_ticker(struct ll_conn *conn,
|
||||
}
|
||||
|
||||
void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_t win_size,
|
||||
uint16_t win_offset_us, uint16_t interval, uint16_t latency,
|
||||
uint32_t win_offset_us, uint16_t interval, uint16_t latency,
|
||||
uint16_t timeout, uint16_t instant)
|
||||
{
|
||||
struct lll_conn *lll;
|
||||
|
||||
@@ -81,7 +81,7 @@ static inline void cpr_active_reset(void)
|
||||
uint16_t ull_conn_event_counter(struct ll_conn *conn);
|
||||
|
||||
void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc,
|
||||
uint8_t win_size, uint16_t win_offset_us,
|
||||
uint8_t win_size, uint32_t win_offset_us,
|
||||
uint16_t interval, uint16_t latency,
|
||||
uint16_t timeout, uint16_t instant);
|
||||
|
||||
|
||||
@@ -209,13 +209,13 @@ struct proc_ctx {
|
||||
uint8_t error;
|
||||
uint8_t rejected_opcode;
|
||||
uint8_t params_changed;
|
||||
uint16_t instant;
|
||||
uint8_t win_size;
|
||||
uint16_t win_offset_us;
|
||||
uint16_t instant;
|
||||
uint16_t interval_min;
|
||||
uint16_t interval_max;
|
||||
uint16_t latency;
|
||||
uint16_t timeout;
|
||||
uint32_t win_offset_us;
|
||||
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
|
||||
uint8_t preferred_periodicity;
|
||||
uint16_t reference_conn_event_count;
|
||||
|
||||
@@ -1266,12 +1266,11 @@ int bt_id_create(bt_addr_le_t *addr, uint8_t *irk)
|
||||
}
|
||||
}
|
||||
|
||||
new_id = bt_dev.id_count;
|
||||
new_id = bt_dev.id_count++;
|
||||
err = id_create(new_id, addr, irk);
|
||||
if (err) {
|
||||
bt_dev.id_count--;
|
||||
return err;
|
||||
} else {
|
||||
bt_dev.id_count++;
|
||||
}
|
||||
|
||||
return new_id;
|
||||
|
||||
@@ -266,6 +266,12 @@ bool bt_mesh_net_iv_update(uint32_t iv_index, bool iv_update)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Discard [iv, false] --> [iv, true] */
|
||||
if (iv_index == bt_mesh.iv_index && iv_update) {
|
||||
LOG_DBG("Ignore previous IV update procedure");
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((iv_index > bt_mesh.iv_index + 1) ||
|
||||
(iv_index == bt_mesh.iv_index + 1 &&
|
||||
(atomic_test_bit(bt_mesh.flags, BT_MESH_IVU_IN_PROGRESS) || !iv_update))) {
|
||||
|
||||
@@ -359,6 +359,13 @@ static void prov_pub_key(const uint8_t *data)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!memcmp(bt_mesh_prov->public_key_be,
|
||||
bt_mesh_prov_link.conf_inputs.pub_key_provisioner, PDU_LEN_PUB_KEY)) {
|
||||
LOG_ERR("Public keys are identical");
|
||||
prov_fail(PROV_ERR_NVAL_FMT);
|
||||
return;
|
||||
}
|
||||
|
||||
/* No swap needed since user provides public key in big-endian */
|
||||
memcpy(bt_mesh_prov_link.conf_inputs.pub_key_device, bt_mesh_prov->public_key_be,
|
||||
PDU_LEN_PUB_KEY);
|
||||
|
||||
@@ -880,7 +880,11 @@ static inline int send_sf(struct isotp_send_ctx *ctx)
|
||||
|
||||
frame.data[index++] = ISOTP_PCI_TYPE_SF | len;
|
||||
|
||||
__ASSERT_NO_MSG(len <= ISOTP_CAN_DL - index);
|
||||
if (len > ISOTP_CAN_DL - index) {
|
||||
LOG_ERR("SF len does not fit DL");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
memcpy(&frame.data[index], data, len);
|
||||
|
||||
#ifdef CONFIG_ISOTP_ENABLE_TX_PADDING
|
||||
@@ -1089,6 +1093,8 @@ static void send_state_machine(struct isotp_send_ctx *ctx)
|
||||
case ISOTP_TX_ERR:
|
||||
LOG_DBG("SM error");
|
||||
__fallthrough;
|
||||
case ISOTP_TX_SEND_SF:
|
||||
__fallthrough;
|
||||
case ISOTP_TX_WAIT_FIN:
|
||||
if (ctx->filter_id >= 0) {
|
||||
can_remove_rx_filter(ctx->can_dev, ctx->filter_id);
|
||||
@@ -1175,6 +1181,7 @@ static int send(struct isotp_send_ctx *ctx, const struct device *can_dev,
|
||||
ret = attach_fc_filter(ctx);
|
||||
if (ret) {
|
||||
LOG_ERR("Can't attach fc filter: %d", ret);
|
||||
free_send_ctx(&ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1185,8 +1192,8 @@ static int send(struct isotp_send_ctx *ctx, const struct device *can_dev,
|
||||
LOG_DBG("Sending single frame");
|
||||
ctx->filter_id = -1;
|
||||
ret = send_sf(ctx);
|
||||
ctx->state = ISOTP_TX_WAIT_FIN;
|
||||
if (ret) {
|
||||
free_send_ctx(&ctx);
|
||||
return ret == -EAGAIN ?
|
||||
ISOTP_N_TIMEOUT_A : ISOTP_N_ERROR;
|
||||
}
|
||||
|
||||
@@ -393,7 +393,7 @@ static void hexdump_line_print(const struct log_output *output,
|
||||
unsigned char c = (unsigned char)data[i];
|
||||
|
||||
print_formatted(output, "%c",
|
||||
isprint((int)c) ? c : '.');
|
||||
isprint((int)c) != 0 ? c : '.');
|
||||
} else {
|
||||
print_formatted(output, " ");
|
||||
}
|
||||
|
||||
@@ -267,7 +267,6 @@ img_mgmt_erase(struct smp_streamer *ctxt)
|
||||
struct image_version ver;
|
||||
int rc;
|
||||
zcbor_state_t *zsd = ctxt->reader->zs;
|
||||
zcbor_state_t *zse = ctxt->writer->zs;
|
||||
bool ok;
|
||||
uint32_t slot = 1;
|
||||
size_t decoded = 0;
|
||||
@@ -307,12 +306,15 @@ img_mgmt_erase(struct smp_streamer *ctxt)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_MCUMGR_SMP_LEGACY_RC_BEHAVIOUR) && zcbor_tstr_put_lit(zse, "rc") &&
|
||||
zcbor_int32_put(zse, 0)) {
|
||||
return MGMT_ERR_EOK;
|
||||
if (IS_ENABLED(CONFIG_MCUMGR_SMP_LEGACY_RC_BEHAVIOUR)) {
|
||||
zcbor_state_t *zse = ctxt->writer->zs;
|
||||
|
||||
if (!zcbor_tstr_put_lit(zse, "rc") || !zcbor_int32_put(zse, 0)) {
|
||||
return MGMT_ERR_EMSGSIZE;
|
||||
}
|
||||
}
|
||||
|
||||
return MGMT_ERR_EMSGSIZE;
|
||||
return MGMT_ERR_EOK;
|
||||
}
|
||||
|
||||
static int
|
||||
|
||||
@@ -232,6 +232,7 @@ SYS_INIT(smp_udp_init, APPLICATION, CONFIG_APPLICATION_INIT_PRIORITY);
|
||||
int smp_udp_open(void)
|
||||
{
|
||||
struct config *conf;
|
||||
int sock;
|
||||
|
||||
#ifdef CONFIG_MCUMGR_TRANSPORT_UDP_IPV4
|
||||
struct sockaddr_in addr4;
|
||||
@@ -242,11 +243,12 @@ int smp_udp_open(void)
|
||||
addr4.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
|
||||
conf = &configs.ipv4;
|
||||
conf->sock = create_socket((struct sockaddr *)&addr4, conf->proto);
|
||||
sock = create_socket((struct sockaddr *)&addr4, conf->proto);
|
||||
|
||||
if (conf->sock < 0) {
|
||||
if (sock < 0) {
|
||||
return -MGMT_ERR_EUNKNOWN;
|
||||
}
|
||||
conf->sock = sock;
|
||||
|
||||
create_thread(conf, "smp_udp4");
|
||||
#endif
|
||||
@@ -260,11 +262,12 @@ int smp_udp_open(void)
|
||||
addr6.sin6_addr = in6addr_any;
|
||||
|
||||
conf = &configs.ipv6;
|
||||
conf->sock = create_socket((struct sockaddr *)&addr6, conf->proto);
|
||||
sock = create_socket((struct sockaddr *)&addr6, conf->proto);
|
||||
|
||||
if (conf->sock < 0) {
|
||||
if (sock < 0) {
|
||||
return -MGMT_ERR_EUNKNOWN;
|
||||
}
|
||||
conf->sock = sock;
|
||||
|
||||
create_thread(conf, "smp_udp6");
|
||||
#endif
|
||||
|
||||
@@ -209,8 +209,12 @@ again:
|
||||
|
||||
if (info) {
|
||||
ret = info_len + sizeof(hdr);
|
||||
ret = MIN(max_len, ret);
|
||||
memcpy(©_to[sizeof(hdr)], info, ret);
|
||||
if (ret > max_len) {
|
||||
errno = EMSGSIZE;
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(©_to[sizeof(hdr)], info, info_len);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@@ -1049,7 +1049,7 @@ static void state_collect(const struct shell *shell)
|
||||
break;
|
||||
|
||||
default:
|
||||
if (isprint((int) data)) {
|
||||
if (isprint((int) data) != 0) {
|
||||
z_flag_history_exit_set(shell, true);
|
||||
z_shell_op_char_insert(shell, data);
|
||||
} else if (z_flag_echo_get(shell)) {
|
||||
@@ -1557,7 +1557,7 @@ void shell_hexdump_line(const struct shell *shell, unsigned int offset,
|
||||
char c = data[i];
|
||||
|
||||
shell_fprintf(shell, SHELL_NORMAL, "%c",
|
||||
isprint((int)c) ? c : '.');
|
||||
isprint((int)c) != 0 ? c : '.');
|
||||
} else {
|
||||
shell_fprintf(shell, SHELL_NORMAL, " ");
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ static void uart_isr(const struct device *dev, void *user_data)
|
||||
length = tracing_cmd_buffer_alloc(&cmd);
|
||||
}
|
||||
|
||||
if (!isprint(byte)) {
|
||||
if (isprint(byte) == 0) {
|
||||
if (byte == '\r') {
|
||||
cmd[cur] = '\0';
|
||||
tracing_cmd_handle(cmd, cur);
|
||||
|
||||
@@ -898,6 +898,9 @@ static void pe_send_not_supported_entry(void *obj)
|
||||
|
||||
LOG_INF("PE_Not_Supported");
|
||||
|
||||
/* Notify the Device Policy Manager of unsupported message reception */
|
||||
policy_notify(dev, MSG_NOT_SUPPORTED_RECEIVED);
|
||||
|
||||
/* Request the Protocol Layer to send a Not_Supported or Reject Message. */
|
||||
if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
|
||||
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_NOT_SUPPORTED);
|
||||
|
||||
@@ -111,6 +111,9 @@ static void test_ivu_normal(void)
|
||||
ASSERT_EQUAL(TEST_IV_IDX, bt_mesh.iv_index);
|
||||
ASSERT_EQUAL(0, bt_mesh.seq);
|
||||
|
||||
/* Ignore same iv index but iv in progress */
|
||||
ASSERT_FALSE(bt_mesh_net_iv_update(TEST_IV_IDX, BCN_IV_IN_PROGRESS));
|
||||
|
||||
bt_mesh.seq = 100;
|
||||
/* update before minimum duration */
|
||||
ASSERT_FALSE(bt_mesh_net_iv_update(TEST_IV_IDX + 1, BCN_IV_IN_PROGRESS));
|
||||
|
||||
@@ -479,7 +479,7 @@ ZTEST(test_c_lib, test_checktype)
|
||||
|
||||
ptr = buf;
|
||||
for (int i = 0; i < 128; i++) {
|
||||
if (isprint(i)) {
|
||||
if (isprint(i) != 0) {
|
||||
*ptr++ = i;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -457,6 +457,34 @@ ZTEST_USER(net_socket_net_mgmt, test_net_mgmt_catch_user)
|
||||
test_net_mgmt_catch_events();
|
||||
}
|
||||
|
||||
static void test_net_mgmt_catch_events_failure(void)
|
||||
{
|
||||
#define SMALL_BUF_LEN 16
|
||||
struct sockaddr_nm event_addr;
|
||||
socklen_t event_addr_len;
|
||||
uint8_t buf[SMALL_BUF_LEN];
|
||||
int ret;
|
||||
|
||||
memset(buf, 0, sizeof(buf));
|
||||
event_addr_len = sizeof(event_addr);
|
||||
|
||||
ret = recvfrom(fd, buf, sizeof(buf), 0,
|
||||
(struct sockaddr *)&event_addr,
|
||||
&event_addr_len);
|
||||
zassert_equal(ret, -1, "Msg check failed, %d", errno);
|
||||
zassert_equal(errno, EMSGSIZE, "Msg check failed, errno %d", errno);
|
||||
}
|
||||
|
||||
ZTEST(net_socket_net_mgmt, test_net_mgmt_catch_failure_kernel)
|
||||
{
|
||||
test_net_mgmt_catch_events_failure();
|
||||
}
|
||||
|
||||
ZTEST_USER(net_socket_net_mgmt, test_net_mgmt_catch_failure_user)
|
||||
{
|
||||
test_net_mgmt_catch_events_failure();
|
||||
}
|
||||
|
||||
ZTEST(net_socket_net_mgmt, test_net_mgmt_cleanup)
|
||||
{
|
||||
k_thread_abort(trigger_events_thread_id);
|
||||
|
||||
Reference in New Issue
Block a user