Compare commits
110 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6ed0a3557f | ||
|
|
a405f8b6b0 | ||
|
|
3683fe625b | ||
|
|
e9fcfa14e6 | ||
|
|
1d16757282 | ||
|
|
eeefd07f68 | ||
|
|
d013132f55 | ||
|
|
25398f36da | ||
|
|
d3c2a2457d | ||
|
|
10086910f5 | ||
|
|
01ad11252c | ||
|
|
652b7f6f83 | ||
|
|
32748c69b8 | ||
|
|
65104bc3cc | ||
|
|
ce4c30fc21 | ||
|
|
5d382fa560 | ||
|
|
e677cfd61d | ||
|
|
db1ed25fad | ||
|
|
e6f70e97c8 | ||
|
|
f89298cf0e | ||
|
|
2e98b1fd8c | ||
|
|
13072b4c7b | ||
|
|
43c936a5dd | ||
|
|
acc7cfaadf | ||
|
|
f9a56bcfd4 | ||
|
|
4a3b59d47b | ||
|
|
414d6c91a1 | ||
|
|
2daec8c70c | ||
|
|
229ca396aa | ||
|
|
06ae95e45c | ||
|
|
f72d8ffe80 | ||
|
|
f2eeeda113 | ||
|
|
916d9ad13b | ||
|
|
a90a3cc493 | ||
|
|
fb5845a072 | ||
|
|
c0d6fad199 | ||
|
|
60ae4f9351 | ||
|
|
6ebce3643e | ||
|
|
80ab098d64 | ||
|
|
d00c98d585 | ||
|
|
f290106952 | ||
|
|
2e0e5e27e8 | ||
|
|
030fa9da45 | ||
|
|
43370b89c3 | ||
|
|
15fa28896a | ||
|
|
ce3eb90a83 | ||
|
|
ca24cd6c2d | ||
|
|
4fc4dc7b84 | ||
|
|
fb24b62dc5 | ||
|
|
60e7a97328 | ||
|
|
a1aa463783 | ||
|
|
29c1e08cf7 | ||
|
|
190f09df52 | ||
|
|
a166290f1a | ||
|
|
21e0870106 | ||
|
|
ebe3651f3d | ||
|
|
1b7c720c7f | ||
|
|
58af1b51bd | ||
|
|
70f2a4951a | ||
|
|
650d10805a | ||
|
|
25616b1021 | ||
|
|
f72519007c | ||
|
|
1b2a7ec251 | ||
|
|
9d2533fc92 | ||
|
|
e20b8f3f34 | ||
|
|
199d5d5448 | ||
|
|
5db2717f06 | ||
|
|
f3851326da | ||
|
|
5a8d05b968 | ||
|
|
eea42e38f3 | ||
|
|
0388a90e7b | ||
|
|
4c62d76fb7 | ||
|
|
6f8f9b5c7a | ||
|
|
afbc93287d | ||
|
|
a28aa01a88 | ||
|
|
677a374255 | ||
|
|
0389fa740b | ||
|
|
b02d34b855 | ||
|
|
29e3a4865f | ||
|
|
aaa6d280ce | ||
|
|
e02a3377e5 | ||
|
|
76c30dfa55 | ||
|
|
c3f512d606 | ||
|
|
f882abfd13 | ||
|
|
bc7300fea7 | ||
|
|
8da9a76464 | ||
|
|
298b8ea788 | ||
|
|
a9aaf048e8 | ||
|
|
e2b81b48c4 | ||
|
|
45c41bc344 | ||
|
|
f570a46719 | ||
|
|
675a349e1b | ||
|
|
16927a6cbb | ||
|
|
ab353d6b7d | ||
|
|
951b055b7f | ||
|
|
8e256b3399 | ||
|
|
74f2760771 | ||
|
|
85e0912291 | ||
|
|
f2c582c75d | ||
|
|
c908ee8133 | ||
|
|
175e76b302 | ||
|
|
c520749a71 | ||
|
|
584f52d5be | ||
|
|
d05c3bdf36 | ||
|
|
3ab0c9516f | ||
|
|
df6f0f477f | ||
|
|
2dc30ca1fb | ||
|
|
5cbda9f1c7 | ||
|
|
711506349d | ||
|
|
572921a44a |
2
.github/workflows/backport_issue_check.yml
vendored
2
.github/workflows/backport_issue_check.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport Issue Check
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Check out source code
|
||||
|
||||
4
.github/workflows/clang.yaml
vendored
4
.github/workflows/clang.yaml
vendored
@@ -78,8 +78,8 @@ jobs:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-clang-${{ matrix.platform }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
|
||||
4
.github/workflows/codecov.yaml
vendored
4
.github/workflows/codecov.yaml
vendored
@@ -65,8 +65,8 @@ jobs:
|
||||
key: ${{ steps.ccache_cache_prop.outputs.repo }}-${{github.event_name}}-${{matrix.platform}}-codecov-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
|
||||
4
.github/workflows/daily_test_version.yml
vendored
4
.github/workflows/daily_test_version.yml
vendored
@@ -19,8 +19,8 @@ jobs:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: install-pip
|
||||
|
||||
8
.github/workflows/doc-build.yml
vendored
8
.github/workflows/doc-build.yml
vendored
@@ -125,7 +125,7 @@ jobs:
|
||||
- name: install-pkgs
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y python3-pip ninja-build doxygen graphviz librsvg2-bin
|
||||
apt-get install -y python3-pip python3-venv ninja-build doxygen graphviz librsvg2-bin
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v3
|
||||
@@ -133,6 +133,12 @@ jobs:
|
||||
path: ~/.cache/pip
|
||||
key: pip-${{ hashFiles('scripts/requirements-doc.txt') }}
|
||||
|
||||
- name: setup-venv
|
||||
run: |
|
||||
python3 -m venv .venv
|
||||
. .venv/bin/activate
|
||||
echo PATH=$PATH >> $GITHUB_ENV
|
||||
|
||||
- name: install-pip
|
||||
run: |
|
||||
pip3 install -U setuptools wheel pip
|
||||
|
||||
2
.github/workflows/doc-publish-pr.yml
vendored
2
.github/workflows/doc-publish-pr.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_BUILDS_ZEPHYR_PR_ACCESS_KEY_ID }}
|
||||
aws-access-key-id: ${{ vars.AWS_BUILDS_ZEPHYR_PR_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_BUILDS_ZEPHYR_PR_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
|
||||
4
.github/workflows/doc-publish.yml
vendored
4
.github/workflows/doc-publish.yml
vendored
@@ -32,8 +32,8 @@ jobs:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_DOCS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DOCS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Upload to AWS S3
|
||||
|
||||
4
.github/workflows/footprint-tracking.yml
vendored
4
.github/workflows/footprint-tracking.yml
vendored
@@ -53,8 +53,8 @@ jobs:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.FOOTPRINT_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.FOOTPRINT_AWS_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Record Footprint
|
||||
|
||||
4
.github/workflows/issue_count.yml
vendored
4
.github/workflows/issue_count.yml
vendored
@@ -43,8 +43,8 @@ jobs:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Post Results
|
||||
|
||||
4
.github/workflows/labeler.yml
vendored
4
.github/workflows/labeler.yml
vendored
@@ -7,6 +7,4 @@ jobs:
|
||||
name: Pull Request Labeler
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v2.1.1
|
||||
with:
|
||||
repo-token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
- uses: actions/labeler@v4
|
||||
|
||||
2
.github/workflows/manifest.yml
vendored
2
.github/workflows/manifest.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
contribs:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
name: Manifest
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
|
||||
4
.github/workflows/twister.yaml
vendored
4
.github/workflows/twister.yaml
vendored
@@ -183,8 +183,8 @@ jobs:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-${{github.event_name}}-${{ matrix.subset }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
|
||||
@@ -162,6 +162,13 @@ zephyr_compile_options(${OPTIMIZATION_FLAG})
|
||||
# @Intent: Obtain compiler specific flags related to C++ that are not influenced by kconfig
|
||||
zephyr_compile_options($<$<COMPILE_LANGUAGE:CXX>:$<TARGET_PROPERTY:compiler-cpp,required>>)
|
||||
|
||||
# Extra warnings options for twister run
|
||||
if (CONFIG_COMPILER_WARNINGS_AS_ERRORS)
|
||||
zephyr_compile_options($<$<COMPILE_LANGUAGE:C>:$<TARGET_PROPERTY:compiler,warnings_as_errors>>)
|
||||
zephyr_compile_options($<$<COMPILE_LANGUAGE:ASM>:$<TARGET_PROPERTY:asm,warnings_as_errors>>)
|
||||
zephyr_link_libraries($<TARGET_PROPERTY:linker,warnings_as_errors>)
|
||||
endif()
|
||||
|
||||
# @Intent: Obtain compiler specific flags for compiling under different ISO standards of C++
|
||||
if(CONFIG_CPLUSPLUS)
|
||||
# From kconfig choice, pick a single dialect.
|
||||
@@ -627,7 +634,7 @@ if(CONFIG_64BIT)
|
||||
endif()
|
||||
|
||||
if(CONFIG_TIMEOUT_64BIT)
|
||||
set(SYSCALL_SPLIT_TIMEOUT_ARG --split-type k_timeout_t)
|
||||
set(SYSCALL_SPLIT_TIMEOUT_ARG --split-type k_timeout_t --split-type k_ticks_t)
|
||||
endif()
|
||||
|
||||
add_custom_command(OUTPUT include/generated/syscall_dispatch.c ${syscall_list_h}
|
||||
|
||||
@@ -305,9 +305,13 @@ config NO_OPTIMIZATIONS
|
||||
help
|
||||
Compiler optimizations will be set to -O0 independently of other
|
||||
options.
|
||||
|
||||
endchoice
|
||||
|
||||
config COMPILER_WARNINGS_AS_ERRORS
|
||||
bool "Treat warnings as errors"
|
||||
help
|
||||
Turn on "warning as error" toolchain flags
|
||||
|
||||
config COMPILER_COLOR_DIAGNOSTICS
|
||||
bool "Enable colored diganostics"
|
||||
default y
|
||||
|
||||
2
VERSION
2
VERSION
@@ -1,5 +1,5 @@
|
||||
VERSION_MAJOR = 2
|
||||
VERSION_MINOR = 7
|
||||
PATCHLEVEL = 4
|
||||
PATCHLEVEL = 6
|
||||
VERSION_TWEAK = 0
|
||||
EXTRAVERSION =
|
||||
|
||||
@@ -27,6 +27,7 @@ endif # BOARD_BL5340_DVK_CPUAPP
|
||||
|
||||
config BUILD_WITH_TFM
|
||||
default y if BOARD_BL5340_DVK_CPUAPP_NS
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
if BUILD_WITH_TFM
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ config BOARD
|
||||
# force building with TF-M as the Secure Execution Environment.
|
||||
config BUILD_WITH_TFM
|
||||
default y if TRUSTED_EXECUTION_NONSECURE
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
|
||||
if GPIO
|
||||
|
||||
@@ -4,7 +4,10 @@ type: mcu
|
||||
arch: arm
|
||||
ram: 4096
|
||||
flash: 4096
|
||||
simulation: qemu
|
||||
# TFM is not supported by default in the Zephyr LTS release.
|
||||
# Excluding this board's simulator to avoid CI failures.
|
||||
#
|
||||
#simulation: qemu
|
||||
toolchain:
|
||||
- gnuarmemb
|
||||
- zephyr
|
||||
|
||||
@@ -13,6 +13,7 @@ config BOARD
|
||||
|
||||
config BUILD_WITH_TFM
|
||||
default y if BOARD_NRF5340DK_NRF5340_CPUAPP_NS
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
if BUILD_WITH_TFM
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ config BOARD
|
||||
|
||||
config BUILD_WITH_TFM
|
||||
default y if BOARD_NRF9160DK_NRF9160_NS
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
if BUILD_WITH_TFM
|
||||
|
||||
|
||||
@@ -132,6 +132,10 @@ set_property(TARGET compiler-cpp PROPERTY dialect_cpp2a "")
|
||||
set_property(TARGET compiler-cpp PROPERTY dialect_cpp20 "")
|
||||
set_property(TARGET compiler-cpp PROPERTY dialect_cpp2b "")
|
||||
|
||||
# Flags for set extra warnigs (ARCMWDT asm can't recognize --fatal-warnings. Skip it)
|
||||
set_property(TARGET compiler PROPERTY warnings_as_errors -Werror)
|
||||
set_property(TARGET asm PROPERTY warnings_as_errors -Werror)
|
||||
|
||||
# Disable exeptions flag in C++
|
||||
set_property(TARGET compiler-cpp PROPERTY no_exceptions "-fno-exceptions")
|
||||
|
||||
|
||||
@@ -65,6 +65,10 @@ set_property(TARGET compiler-cpp PROPERTY dialect_cpp2a)
|
||||
set_property(TARGET compiler-cpp PROPERTY dialect_cpp20)
|
||||
set_property(TARGET compiler-cpp PROPERTY dialect_cpp2b)
|
||||
|
||||
# Extra warnings options for twister run
|
||||
set_property(TARGET compiler PROPERTY warnings_as_errors)
|
||||
set_property(TARGET asm PROPERTY warnings_as_errors)
|
||||
|
||||
# Flag for disabling exeptions in C++
|
||||
set_property(TARGET compiler-cpp PROPERTY no_exceptions)
|
||||
|
||||
|
||||
@@ -137,6 +137,10 @@ set_property(TARGET compiler-cpp PROPERTY dialect_cpp20 "-std=c++20"
|
||||
set_property(TARGET compiler-cpp PROPERTY dialect_cpp2b "-std=c++2b"
|
||||
"-Wno-register" "-Wno-volatile")
|
||||
|
||||
# Flags for set extra warnigs (ARCMWDT asm can't recognize --fatal-warnings. Skip it)
|
||||
set_property(TARGET compiler PROPERTY warnings_as_errors -Werror)
|
||||
set_property(TARGET asm PROPERTY warnings_as_errors -Werror)
|
||||
|
||||
# Disable exeptions flag in C++
|
||||
set_property(TARGET compiler-cpp PROPERTY no_exceptions "-fno-exceptions")
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ list(REMOVE_DUPLICATES
|
||||
# Drop support for NOT CONFIG_HAS_DTS perhaps?
|
||||
if(EXISTS ${DTS_SOURCE})
|
||||
set(SUPPORTS_DTS 1)
|
||||
if(BOARD_REVISION AND EXISTS ${BOARD_DIR}/${BOARD}_${BOARD_REVISION_STRING}.overlay)
|
||||
if(DEFINED BOARD_REVISION AND EXISTS ${BOARD_DIR}/${BOARD}_${BOARD_REVISION_STRING}.overlay)
|
||||
list(APPEND DTS_SOURCE ${BOARD_DIR}/${BOARD}_${BOARD_REVISION_STRING}.overlay)
|
||||
endif()
|
||||
else()
|
||||
|
||||
@@ -518,7 +518,7 @@ function(zephyr_library_cc_option)
|
||||
string(MAKE_C_IDENTIFIER check${option} check)
|
||||
zephyr_check_compiler_flag(C ${option} ${check})
|
||||
|
||||
if(${check})
|
||||
if(${${check}})
|
||||
zephyr_library_compile_options(${option})
|
||||
endif()
|
||||
endforeach()
|
||||
@@ -1003,9 +1003,9 @@ endfunction()
|
||||
function(zephyr_check_compiler_flag lang option check)
|
||||
# Check if the option is covered by any hardcoded check before doing
|
||||
# an automated test.
|
||||
zephyr_check_compiler_flag_hardcoded(${lang} "${option}" check exists)
|
||||
zephyr_check_compiler_flag_hardcoded(${lang} "${option}" _${check} exists)
|
||||
if(exists)
|
||||
set(check ${check} PARENT_SCOPE)
|
||||
set(${check} ${_${check}} PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
@@ -1110,11 +1110,11 @@ function(zephyr_check_compiler_flag_hardcoded lang option check exists)
|
||||
# because they would produce a warning instead of an error during
|
||||
# the test. Exclude them by toolchain-specific blocklist.
|
||||
if((${lang} STREQUAL CXX) AND ("${option}" IN_LIST CXX_EXCLUDED_OPTIONS))
|
||||
set(check 0 PARENT_SCOPE)
|
||||
set(exists 1 PARENT_SCOPE)
|
||||
set(${check} 0 PARENT_SCOPE)
|
||||
set(${exists} 1 PARENT_SCOPE)
|
||||
else()
|
||||
# There does not exist a hardcoded check for this option.
|
||||
set(exists 0 PARENT_SCOPE)
|
||||
set(${exists} 0 PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction(zephyr_check_compiler_flag_hardcoded)
|
||||
|
||||
@@ -1862,7 +1862,7 @@ function(check_set_linker_property)
|
||||
zephyr_check_compiler_flag(C "" ${check})
|
||||
set(CMAKE_REQUIRED_FLAGS ${SAVED_CMAKE_REQUIRED_FLAGS})
|
||||
|
||||
if(${check})
|
||||
if(${${check}})
|
||||
set_property(TARGET ${LINKER_PROPERTY_TARGET} ${APPEND} PROPERTY ${property} ${option})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
4
cmake/linker/arcmwdt/linker_flags.cmake
Normal file
4
cmake/linker/arcmwdt/linker_flags.cmake
Normal file
@@ -0,0 +1,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Extra warnings options for twister run
|
||||
set_property(TARGET linker PROPERTY warnings_as_errors -Wl,--fatal-warnings)
|
||||
@@ -3,6 +3,9 @@ if (NOT CONFIG_COVERAGE_GCOV)
|
||||
set_property(TARGET linker PROPERTY coverage --coverage)
|
||||
endif()
|
||||
|
||||
# Extra warnings options for twister run
|
||||
set_property(TARGET linker PROPERTY ld_extra_warning_options -Wl,--fatal-warnings)
|
||||
|
||||
# ld/clang linker flags for sanitizing.
|
||||
check_set_linker_property(TARGET linker APPEND PROPERTY sanitize_address -fsanitize=address)
|
||||
|
||||
|
||||
@@ -7,6 +7,9 @@ if (NOT CONFIG_COVERAGE_GCOV)
|
||||
set_property(TARGET linker PROPERTY coverage -lgcov)
|
||||
endif()
|
||||
|
||||
# Extra warnings options for twister run
|
||||
set_property(TARGET linker PROPERTY warnings_as_errors -Wl,--fatal-warnings)
|
||||
|
||||
# ld/gcc linker flags for sanitizing.
|
||||
check_set_linker_property(TARGET linker APPEND PROPERTY sanitize_address -lasan)
|
||||
check_set_linker_property(TARGET linker APPEND PROPERTY sanitize_address -fsanitize=address)
|
||||
|
||||
@@ -14,3 +14,6 @@ check_set_linker_property(TARGET linker APPEND PROPERTY sanitize_undefined)
|
||||
# If memory reporting is a post build command, please use
|
||||
# cmake/bintools/bintools.cmake insted.
|
||||
check_set_linker_property(TARGET linker PROPERTY memusage)
|
||||
|
||||
# Extra warnings options for twister run
|
||||
set_property(TARGET linker PROPERTY warnings_as_errors)
|
||||
|
||||
@@ -253,8 +253,8 @@ graphviz_dot_args = [
|
||||
# -- Linkcheck options ----------------------------------------------------
|
||||
|
||||
extlinks = {
|
||||
"jira": ("https://jira.zephyrproject.org/browse/%s", ""),
|
||||
"github": ("https://github.com/zephyrproject-rtos/zephyr/issues/%s", ""),
|
||||
"jira": ("https://jira.zephyrproject.org/browse/%s", "JIRA %s"),
|
||||
"github": ("https://github.com/zephyrproject-rtos/zephyr/issues/%s", "GitHub #%s"),
|
||||
}
|
||||
|
||||
linkcheck_timeout = 30
|
||||
|
||||
@@ -2,6 +2,207 @@
|
||||
|
||||
.. _zephyr_2.7:
|
||||
|
||||
.. _zephyr_2.7.6:
|
||||
|
||||
Zephyr 2.7.6
|
||||
####################
|
||||
|
||||
This is an LTS maintenance release with fixes.
|
||||
|
||||
Issues Fixed
|
||||
************
|
||||
|
||||
These GitHub issues were addressed since the previous 2.7.5 tagged
|
||||
release:
|
||||
|
||||
.. comment List derived from GitHub Issue query: ...
|
||||
* :github:`issuenumber` - issue title
|
||||
|
||||
* :github:`32145` - use ``k_thread_foreach_unlocked()`` with shell callbacks
|
||||
* :github:`56604` - drivers: nrf: rtc: make uptime consistent for app booted from v3.x mcuboot
|
||||
* :github:`25917` - bluetooth: fix deadlock with tx of acl data and hci commands
|
||||
* :github:`47649` - bluetooth: release att notification buffer after reconnection
|
||||
* :github:`43718` - bluetooth: bt_conn: ensure tx buffers can be allocated within timeout
|
||||
* :github:`60707` - canbus: isotp: seal context buffer memory leaks
|
||||
* :github:`60904` - drivers: spi_nor: make erase operation more opportunistic
|
||||
* :github:`61451` - drivers: can: stm32: correct timing_max parameters
|
||||
* :github:`61501` - canbus: isotp: convert SF length check from ``ASSERT`` to runtime check
|
||||
* :github:`61544` - drivers: ieee802154_nrf5: add payload length check on TX
|
||||
* :github:`61784` - bluetooth: controller: check minmum sizes of adv PDUs
|
||||
* :github:`62003` - drivers: dma: sam: implement xdmac ``get_status()`` API
|
||||
* :github:`62701` - can: rework the table lookup code in ``can_dlc_to_bytes()``
|
||||
* :github:`63544` - drivers: can: mcan: move RF0L and RF1L to line 1
|
||||
* :github:`63835` - net_mgmt: return ``EMSGSIZE`` if buffer passed to ``recvfrom()`` is too small
|
||||
* :github:`63965` - logging: fix handling of ``CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS``
|
||||
* :github:`64398` - drivers: can: be consistent in ``filter_id`` checks when removing rx filters
|
||||
* :github:`65548` - cmake: modules: dts: fix board revision 0 overlay
|
||||
* :github:`66500` - toolchain: support ``CONFIG_COMPILER_WARNINGS_AS_ERRORS``
|
||||
* :github:`66888` - net: ipv6: drop received packets sent by the same interface
|
||||
* :github:`67692` - i2c: dw: fix integer overflow in ``i2c_dw_data_ask()``
|
||||
* :github:`69167` - fs: fuse: avoid possible buffer overflow
|
||||
* :github:`69637` - userspace: additional checks in ``K_SYSCALL_MEMORY``
|
||||
|
||||
Security Vulnerability Related
|
||||
******************************
|
||||
|
||||
The following security vulnerabilities (CVEs) were addressed in this
|
||||
release:
|
||||
|
||||
* CVE-2023-4263 `Zephyr project bug tracker GHSA-rf6q-rhhp-pqhf
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-rf6q-rhhp-pqhf>`_
|
||||
|
||||
* CVE-2023-4424: `Zephyr project bug tracker GHSA-j4qm-xgpf-qjw3
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-j4qm-xgpf-qjw3>`_
|
||||
|
||||
* CVE-2023-5779 `Zephyr project bug tracker GHSA-7cmj-963q-jj47
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-7cmj-963q-jj47>`_
|
||||
|
||||
* CVE-2023-6249 `Zephyr project bug tracker GHSA-32f5-3p9h-2rqc
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-32f5-3p9h-2rqc>`_
|
||||
|
||||
* CVE-2023-6881 `Zephyr project bug tracker GHSA-mh67-4h3q-p437
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-mh67-4h3q-p437>`_
|
||||
|
||||
More detailed information can be found in:
|
||||
https://docs.zephyrproject.org/latest/security/vulnerabilities.html
|
||||
|
||||
.. _zephyr_2.7.5:
|
||||
|
||||
Zephyr 2.7.5
|
||||
####################
|
||||
|
||||
This is an LTS maintenance release with fixes.
|
||||
|
||||
Issues Fixed
|
||||
************
|
||||
|
||||
These GitHub issues were addressed since the previous 2.7.4 tagged
|
||||
release:
|
||||
|
||||
.. comment List derived from GitHub Issue query: ...
|
||||
* :github:`issuenumber` - issue title
|
||||
|
||||
* :github:`41111` - utils: tmcvt: fix integer overflow after 6.4 days with ``gettimeofday()`` and ``z_tmcvt()``
|
||||
* :github:`51663` - tests: kernel: increase coverage for kernel and mmu tests
|
||||
* :github:`53124` - bmake: fix argument passing in ``zephyr_check_compiler_flag()`` cmake function
|
||||
* :github:`53315` - net: tcp: fix possible underflow in ``tcp_flags()``.
|
||||
* :github:`53981` - scripts: fixes for ``gen_syscalls`` and ``gen_app_partitions``
|
||||
* :github:`53983` - init: correct early init time calls to ``k_current_get()`` when TLS is enabled
|
||||
* :github:`54140` - net: fix BUS FAULT when running nmap towards echo_async sample
|
||||
* :github:`54325` - coredump: support out-of-tree coredump backend definition
|
||||
* :github:`54386` - kernel: correct SMP scheduling with more than 2 CPUs
|
||||
* :github:`54527` - tests: kernel: remove faulty test from tests/kernel/poll
|
||||
* :github:`55019` - bluetooth: initialize backport of #54905 failed
|
||||
* :github:`55068` - net: ipv6: validate arguments in ``net_if_ipv6_set_reachable_time()``
|
||||
* :github:`55069` - net: core: ``net pkt`` shell command missing input validation
|
||||
* :github:`55323` - logging: fix userspace runtime filtering
|
||||
* :github:`55490` - cxx: fix compile error in C++ project for bad flags ``-Wno-pointer-sign`` and ``-Werror=implicit-int``
|
||||
* :github:`56071` - security: MbedTLS: update to v2.28.3
|
||||
* :github:`56729` - posix: SCHED_RR valid thread priorities
|
||||
* :github:`57210` - drivers: pcie: endpoint: pcie_ep_iproc: correct use of optional devicetree binding
|
||||
* :github:`57419` - tests: dma: support 64-bit addressing in tests
|
||||
* :github:`57710` - posix: support building eventfd on arm-clang
|
||||
|
||||
mbedTLS
|
||||
*******
|
||||
|
||||
Moving mbedTLS to 2.28.x series (2.28.3 precisely). This is a LTS release
|
||||
that will be supported with bug fixes and security fixes until the end of 2024.
|
||||
|
||||
Detailed information can be found in:
|
||||
https://github.com/Mbed-TLS/mbedtls/releases/tag/v2.28.3
|
||||
https://github.com/zephyrproject-rtos/zephyr/issues/56071
|
||||
|
||||
This version is incompatible with TF-M and because of this TF-M is no longer
|
||||
supported in Zephyr LTS. If TF-M is required it can be manually added back
|
||||
changing the mbedTLS revision on ``west.yaml`` to the previous one
|
||||
(5765cb7f75a9973ae9232d438e361a9d7bbc49e7). This should be carefully assessed
|
||||
by a security expert to ensure that the know vulnerabilities in that version
|
||||
don't affect the product.
|
||||
|
||||
Vulnerabilities addressed in this update:
|
||||
|
||||
* MBEDTLS_AESNI_C, which is enabled by default, was silently ignored on
|
||||
builds that couldn't compile the GCC-style assembly implementation
|
||||
(most notably builds with Visual Studio), leaving them vulnerable to
|
||||
timing side-channel attacks. There is now an intrinsics-based AES-NI
|
||||
implementation as a fallback for when the assembly one cannot be used.
|
||||
|
||||
* Fix potential heap buffer overread and overwrite in DTLS if
|
||||
MBEDTLS_SSL_DTLS_CONNECTION_ID is enabled and
|
||||
MBEDTLS_SSL_CID_IN_LEN_MAX > 2 * MBEDTLS_SSL_CID_OUT_LEN_MAX.
|
||||
|
||||
* An adversary with access to precise enough information about memory
|
||||
accesses (typically, an untrusted operating system attacking a secure
|
||||
enclave) could recover an RSA private key after observing the victim
|
||||
performing a single private-key operation if the window size used for the
|
||||
exponentiation was 3 or smaller. Found and reported by Zili KOU,
|
||||
Wenjian HE, Sharad Sinha, and Wei ZHANG. See "Cache Side-channel Attacks
|
||||
and Defenses of the Sliding Window Algorithm in TEEs" - Design, Automation
|
||||
and Test in Europe 2023.
|
||||
|
||||
* Zeroize dynamically-allocated buffers used by the PSA Crypto key storage
|
||||
module before freeing them. These buffers contain secret key material, and
|
||||
could thus potentially leak the key through freed heap.
|
||||
|
||||
* Fix a potential heap buffer overread in TLS 1.2 server-side when
|
||||
MBEDTLS_USE_PSA_CRYPTO is enabled, an opaque key (created with
|
||||
mbedtls_pk_setup_opaque()) is provisioned, and a static ECDH ciphersuite
|
||||
is selected. This may result in an application crash or potentially an
|
||||
information leak.
|
||||
|
||||
* Fix a buffer overread in DTLS ClientHello parsing in servers with
|
||||
MBEDTLS_SSL_DTLS_CLIENT_PORT_REUSE enabled. An unauthenticated client
|
||||
or a man-in-the-middle could cause a DTLS server to read up to 255 bytes
|
||||
after the end of the SSL input buffer. The buffer overread only happens
|
||||
when MBEDTLS_SSL_IN_CONTENT_LEN is less than a threshold that depends on
|
||||
the exact configuration: 258 bytes if using mbedtls_ssl_cookie_check(),
|
||||
and possibly up to 571 bytes with a custom cookie check function.
|
||||
Reported by the Cybeats PSI Team.
|
||||
|
||||
* Zeroize several intermediate variables used to calculate the expected
|
||||
value when verifying a MAC or AEAD tag. This hardens the library in
|
||||
case the value leaks through a memory disclosure vulnerability. For
|
||||
example, a memory disclosure vulnerability could have allowed a
|
||||
man-in-the-middle to inject fake ciphertext into a DTLS connection.
|
||||
|
||||
* In psa_cipher_generate_iv() and psa_cipher_encrypt(), do not read back
|
||||
from the output buffer. This fixes a potential policy bypass or decryption
|
||||
oracle vulnerability if the output buffer is in memory that is shared with
|
||||
an untrusted application.
|
||||
|
||||
* Fix a double-free that happened after mbedtls_ssl_set_session() or
|
||||
mbedtls_ssl_get_session() failed with MBEDTLS_ERR_SSL_ALLOC_FAILED
|
||||
(out of memory). After that, calling mbedtls_ssl_session_free()
|
||||
and mbedtls_ssl_free() would cause an internal session buffer to
|
||||
be free()'d twice.
|
||||
|
||||
* Fix a bias in the generation of finite-field Diffie-Hellman-Merkle (DHM)
|
||||
private keys and of blinding values for DHM and elliptic curves (ECP)
|
||||
computations.
|
||||
|
||||
* Fix a potential side channel vulnerability in ECDSA ephemeral key generation.
|
||||
An adversary who is capable of very precise timing measurements could
|
||||
learn partial information about the leading bits of the nonce used for the
|
||||
signature, allowing the recovery of the private key after observing a
|
||||
large number of signature operations. This completes a partial fix in
|
||||
Mbed TLS 2.20.0.
|
||||
|
||||
Security Vulnerability Related
|
||||
******************************
|
||||
|
||||
The following security vulnerabilities (CVEs) were addressed in this
|
||||
release:
|
||||
|
||||
* CVE-2023-0397: `Zephyr project bug tracker GHSA-wc2h-h868-q7hj
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-wc2h-h868-q7hj>`_
|
||||
|
||||
* CVE-2023-0779: `Zephyr project bug tracker GHSA-9xj8-6989-r549
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-9xj8-6989-r549>`_
|
||||
|
||||
More detailed information can be found in:
|
||||
https://docs.zephyrproject.org/latest/security/vulnerabilities.html
|
||||
|
||||
.. _zephyr_2.7.4:
|
||||
|
||||
Zephyr 2.7.4
|
||||
|
||||
@@ -171,6 +171,11 @@ void can_loopback_detach(const struct device *dev, int filter_id)
|
||||
{
|
||||
struct can_loopback_data *data = DEV_DATA(dev);
|
||||
|
||||
if (filter_id < 0 || filter_id >= ARRAY_SIZE(data->filters)) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_DBG("Detach filter ID: %d", filter_id);
|
||||
k_mutex_lock(&data->mtx, K_FOREVER);
|
||||
data->filters[filter_id].rx_cb = NULL;
|
||||
|
||||
@@ -404,7 +404,8 @@ int can_mcan_init(const struct device *dev, const struct can_mcan_config *cfg,
|
||||
#ifdef CONFIG_CAN_STM32FD
|
||||
can->ils = CAN_MCAN_ILS_RXFIFO0 | CAN_MCAN_ILS_RXFIFO1;
|
||||
#else
|
||||
can->ils = CAN_MCAN_ILS_RF0N | CAN_MCAN_ILS_RF1N;
|
||||
can->ils = CAN_MCAN_ILS_RF0N | CAN_MCAN_ILS_RF1N |
|
||||
CAN_MCAN_ILS_RF0L | CAN_MCAN_ILS_RF1L;
|
||||
#endif
|
||||
can->ile = CAN_MCAN_ILE_EINT0 | CAN_MCAN_ILE_EINT1;
|
||||
/* Interrupt on every TX fifo element*/
|
||||
@@ -894,11 +895,16 @@ int can_mcan_attach_isr(struct can_mcan_data *data,
|
||||
void can_mcan_detach(struct can_mcan_data *data,
|
||||
struct can_mcan_msg_sram *msg_ram, int filter_nr)
|
||||
{
|
||||
if (filter_nr < 0) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_nr);
|
||||
return;
|
||||
}
|
||||
|
||||
k_mutex_lock(&data->inst_mutex, K_FOREVER);
|
||||
if (filter_nr >= NUM_STD_FILTER_DATA) {
|
||||
filter_nr -= NUM_STD_FILTER_DATA;
|
||||
if (filter_nr >= NUM_STD_FILTER_DATA) {
|
||||
LOG_ERR("Wrong filter id");
|
||||
LOG_ERR("filter ID %d out of bounds", filter_nr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -551,6 +551,11 @@ static void mcp2515_detach(const struct device *dev, int filter_nr)
|
||||
{
|
||||
struct mcp2515_data *dev_data = DEV_DATA(dev);
|
||||
|
||||
if (filter_nr < 0 || filter_nr >= CONFIG_CAN_MAX_FILTER) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_nr);
|
||||
return;
|
||||
}
|
||||
|
||||
k_mutex_lock(&dev_data->mutex, K_FOREVER);
|
||||
dev_data->filter_usage &= ~BIT(filter_nr);
|
||||
k_mutex_unlock(&dev_data->mutex);
|
||||
|
||||
@@ -480,9 +480,8 @@ static void mcux_flexcan_detach(const struct device *dev, int filter_id)
|
||||
const struct mcux_flexcan_config *config = dev->config;
|
||||
struct mcux_flexcan_data *data = dev->data;
|
||||
|
||||
if (filter_id >= MCUX_FLEXCAN_MAX_RX) {
|
||||
LOG_ERR("Detach: Filter id >= MAX_RX (%d >= %d)", filter_id,
|
||||
MCUX_FLEXCAN_MAX_RX);
|
||||
if (filter_id < 0 || filter_id >= MCUX_FLEXCAN_MAX_RX) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_id);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -829,7 +829,8 @@ void can_rcar_detach(const struct device *dev, int filter_nr)
|
||||
{
|
||||
struct can_rcar_data *data = DEV_CAN_DATA(dev);
|
||||
|
||||
if (filter_nr >= CONFIG_CAN_RCAR_MAX_FILTER) {
|
||||
if (filter_nr < 0 || filter_nr >= CONFIG_CAN_RCAR_MAX_FILTER) {
|
||||
LOG_ERR("filter ID %d out of bounds", filter_nr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -1113,10 +1113,10 @@ static const struct can_driver_api can_api_funcs = {
|
||||
.prescaler = 0x01
|
||||
},
|
||||
.timing_max = {
|
||||
.sjw = 0x07,
|
||||
.sjw = 0x04,
|
||||
.prop_seg = 0x00,
|
||||
.phase_seg1 = 0x0F,
|
||||
.phase_seg2 = 0x07,
|
||||
.phase_seg1 = 0x10,
|
||||
.phase_seg2 = 0x08,
|
||||
.prescaler = 0x400
|
||||
}
|
||||
};
|
||||
|
||||
@@ -354,11 +354,36 @@ static int sam_xdmac_initialize(const struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sam_xdmac_get_status(const struct device *dev, uint32_t channel,
|
||||
struct dma_status *status)
|
||||
{
|
||||
const struct sam_xdmac_dev_cfg *const dev_cfg = dev->config;
|
||||
|
||||
Xdmac * const xdmac = dev_cfg->regs;
|
||||
uint32_t chan_cfg = xdmac->XDMAC_CHID[channel].XDMAC_CC;
|
||||
uint32_t ublen = xdmac->XDMAC_CHID[channel].XDMAC_CUBC;
|
||||
|
||||
/* we need to check some of the XDMAC_CC registers to determine the DMA direction */
|
||||
if ((chan_cfg & XDMAC_CC_TYPE_Msk) == 0) {
|
||||
status->dir = MEMORY_TO_MEMORY;
|
||||
} else if ((chan_cfg & XDMAC_CC_DSYNC_Msk) == XDMAC_CC_DSYNC_MEM2PER) {
|
||||
status->dir = MEMORY_TO_PERIPHERAL;
|
||||
} else {
|
||||
status->dir = PERIPHERAL_TO_MEMORY;
|
||||
}
|
||||
|
||||
status->busy = ((chan_cfg & XDMAC_CC_INITD_Msk) != 0) || (ublen > 0);
|
||||
status->pending_length = ublen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dma_driver_api sam_xdmac_driver_api = {
|
||||
.config = sam_xdmac_config,
|
||||
.reload = sam_xdmac_transfer_reload,
|
||||
.start = sam_xdmac_transfer_start,
|
||||
.stop = sam_xdmac_transfer_stop,
|
||||
.get_status = sam_xdmac_get_status,
|
||||
};
|
||||
|
||||
/* DMA0 */
|
||||
|
||||
@@ -658,7 +658,7 @@ static int spi_nor_erase(const struct device *dev, off_t addr, size_t size)
|
||||
|
||||
if ((etp->exp != 0)
|
||||
&& SPI_NOR_IS_ALIGNED(addr, etp->exp)
|
||||
&& SPI_NOR_IS_ALIGNED(size, etp->exp)
|
||||
&& (size >= BIT(etp->exp))
|
||||
&& ((bet == NULL)
|
||||
|| (etp->exp > bet->exp))) {
|
||||
bet = etp;
|
||||
|
||||
@@ -43,10 +43,10 @@ static inline void i2c_dw_data_ask(const struct device *dev)
|
||||
{
|
||||
struct i2c_dw_dev_config * const dw = dev->data;
|
||||
uint32_t data;
|
||||
uint8_t tx_empty;
|
||||
int8_t rx_empty;
|
||||
uint8_t cnt;
|
||||
uint8_t rx_buffer_depth, tx_buffer_depth;
|
||||
int tx_empty;
|
||||
int rx_empty;
|
||||
int cnt;
|
||||
int rx_buffer_depth, tx_buffer_depth;
|
||||
union ic_comp_param_1_register ic_comp_param_1;
|
||||
uint32_t reg_base = get_regs(dev);
|
||||
|
||||
|
||||
@@ -494,6 +494,11 @@ static int nrf5_tx(const struct device *dev,
|
||||
uint8_t *payload = frag->data;
|
||||
bool ret = true;
|
||||
|
||||
if (payload_len > NRF5_PSDU_LENGTH) {
|
||||
LOG_ERR("Payload too large: %d", payload_len);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
LOG_DBG("%p (%u)", payload, payload_len);
|
||||
|
||||
nrf5_radio->tx_psdu[0] = payload_len + NRF5_FCS_LENGTH;
|
||||
|
||||
@@ -467,7 +467,7 @@ err_out:
|
||||
|
||||
static struct iproc_pcie_ep_ctx iproc_pcie_ep_ctx_0;
|
||||
|
||||
static struct iproc_pcie_ep_config iproc_pcie_ep_config_0 = {
|
||||
static const struct iproc_pcie_ep_config iproc_pcie_ep_config_0 = {
|
||||
.id = 0,
|
||||
.base = (struct iproc_pcie_reg *)DT_INST_REG_ADDR(0),
|
||||
.reg_size = DT_INST_REG_SIZE(0),
|
||||
@@ -475,19 +475,21 @@ static struct iproc_pcie_ep_config iproc_pcie_ep_config_0 = {
|
||||
.map_low_size = DT_INST_REG_SIZE_BY_NAME(0, map_lowmem),
|
||||
.map_high_base = DT_INST_REG_ADDR_BY_NAME(0, map_highmem),
|
||||
.map_high_size = DT_INST_REG_SIZE_BY_NAME(0, map_highmem),
|
||||
#if DT_INST_NODE_HAS_PROP(0, dmas)
|
||||
.pl330_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_IDX(0, 0)),
|
||||
.pl330_tx_chan_id = DT_INST_DMAS_CELL_BY_NAME(0, txdma, channel),
|
||||
.pl330_rx_chan_id = DT_INST_DMAS_CELL_BY_NAME(0, rxdma, channel),
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct pcie_ep_driver_api iproc_pcie_ep_api = {
|
||||
static const struct pcie_ep_driver_api iproc_pcie_ep_api = {
|
||||
.conf_read = iproc_pcie_conf_read,
|
||||
.conf_write = iproc_pcie_conf_write,
|
||||
.map_addr = iproc_pcie_map_addr,
|
||||
.unmap_addr = iproc_pcie_unmap_addr,
|
||||
.raise_irq = iproc_pcie_raise_irq,
|
||||
.register_reset_cb = iproc_pcie_register_reset_cb,
|
||||
.dma_xfer = iproc_pcie_pl330_dma_xfer,
|
||||
.dma_xfer = DT_INST_NODE_HAS_PROP(0, dmas) ? iproc_pcie_pl330_dma_xfer : NULL,
|
||||
};
|
||||
|
||||
DEVICE_DT_INST_DEFINE(0, &iproc_pcie_ep_init, NULL,
|
||||
|
||||
@@ -341,10 +341,11 @@ int sys_clock_driver_init(const struct device *dev)
|
||||
alloc_mask = BIT_MASK(EXT_CHAN_COUNT) << 1;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
||||
compare_set(0, counter() + CYC_PER_TICK,
|
||||
sys_clock_timeout_handler, NULL);
|
||||
}
|
||||
uint32_t initial_timeout = IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
|
||||
MAX_CYCLES : CYC_PER_TICK;
|
||||
|
||||
compare_set(0, counter() + initial_timeout,
|
||||
sys_clock_timeout_handler, NULL);
|
||||
|
||||
z_nrf_clock_control_lf_on(mode);
|
||||
|
||||
|
||||
@@ -266,6 +266,19 @@ struct bt_l2cap_chan_ops {
|
||||
*/
|
||||
void (*encrypt_change)(struct bt_l2cap_chan *chan, uint8_t hci_status);
|
||||
|
||||
/** @brief Channel alloc_seg callback
|
||||
*
|
||||
* If this callback is provided the channel will use it to allocate
|
||||
* buffers to store segments. This avoids wasting big SDU buffers with
|
||||
* potentially much smaller PDUs. If this callback is supplied, it must
|
||||
* return a valid buffer.
|
||||
*
|
||||
* @param chan The channel requesting a buffer.
|
||||
*
|
||||
* @return Allocated buffer.
|
||||
*/
|
||||
struct net_buf *(*alloc_seg)(struct bt_l2cap_chan *chan);
|
||||
|
||||
/** @brief Channel alloc_buf callback
|
||||
*
|
||||
* If this callback is provided the channel will use it to allocate
|
||||
|
||||
@@ -126,6 +126,31 @@ struct coredump_mem_hdr_t {
|
||||
uintptr_t end;
|
||||
} __packed;
|
||||
|
||||
typedef void (*coredump_backend_start_t)(void);
|
||||
typedef void (*coredump_backend_end_t)(void);
|
||||
typedef void (*coredump_backend_buffer_output_t)(uint8_t *buf, size_t buflen);
|
||||
typedef int (*coredump_backend_query_t)(enum coredump_query_id query_id,
|
||||
void *arg);
|
||||
typedef int (*coredump_backend_cmd_t)(enum coredump_cmd_id cmd_id,
|
||||
void *arg);
|
||||
|
||||
struct coredump_backend_api {
|
||||
/* Signal to backend of the start of coredump. */
|
||||
coredump_backend_start_t start;
|
||||
|
||||
/* Signal to backend of the end of coredump. */
|
||||
coredump_backend_end_t end;
|
||||
|
||||
/* Raw buffer output */
|
||||
coredump_backend_buffer_output_t buffer_output;
|
||||
|
||||
/* Perform query on backend */
|
||||
coredump_backend_query_t query;
|
||||
|
||||
/* Perform command on backend */
|
||||
coredump_backend_cmd_t cmd;
|
||||
};
|
||||
|
||||
void coredump(unsigned int reason, const z_arch_esf_t *esf,
|
||||
struct k_thread *thread);
|
||||
void coredump_memory_dump(uintptr_t start_addr, uintptr_t end_addr);
|
||||
|
||||
@@ -420,7 +420,7 @@ static inline uint8_t can_dlc_to_bytes(uint8_t dlc)
|
||||
static const uint8_t dlc_table[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 12,
|
||||
16, 20, 24, 32, 48, 64};
|
||||
|
||||
return dlc > 0x0F ? 64 : dlc_table[dlc];
|
||||
return dlc_table[MIN(dlc, ARRAY_SIZE(dlc_table) - 1)];
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -162,6 +162,11 @@ struct z_kernel {
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
struct k_thread *threads; /* singly linked list of ALL threads */
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
/* Need to signal an IPI at the next scheduling point */
|
||||
bool pending_ipi;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct z_kernel _kernel_t;
|
||||
|
||||
@@ -302,10 +302,8 @@ static inline char z_log_minimal_level_to_char(int level)
|
||||
} \
|
||||
\
|
||||
bool is_user_context = k_is_user_context(); \
|
||||
uint32_t filters = IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ? \
|
||||
(_dsource)->filters : 0;\
|
||||
if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) && !is_user_context && \
|
||||
_level > Z_LOG_RUNTIME_FILTER(filters)) { \
|
||||
_level > Z_LOG_RUNTIME_FILTER((_dsource)->filters)) { \
|
||||
break; \
|
||||
} \
|
||||
if (IS_ENABLED(CONFIG_LOG2)) { \
|
||||
@@ -347,8 +345,6 @@ static inline char z_log_minimal_level_to_char(int level)
|
||||
break; \
|
||||
} \
|
||||
bool is_user_context = k_is_user_context(); \
|
||||
uint32_t filters = IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ? \
|
||||
(_dsource)->filters : 0;\
|
||||
\
|
||||
if (IS_ENABLED(CONFIG_LOG_MINIMAL)) { \
|
||||
Z_LOG_TO_PRINTK(_level, "%s", _str); \
|
||||
@@ -357,7 +353,7 @@ static inline char z_log_minimal_level_to_char(int level)
|
||||
break; \
|
||||
} \
|
||||
if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) && !is_user_context && \
|
||||
_level > Z_LOG_RUNTIME_FILTER(filters)) { \
|
||||
_level > Z_LOG_RUNTIME_FILTER((_dsource)->filters)) { \
|
||||
break; \
|
||||
} \
|
||||
if (IS_ENABLED(CONFIG_LOG2)) { \
|
||||
|
||||
@@ -889,15 +889,6 @@ static inline void net_buf_simple_restore(struct net_buf_simple *buf,
|
||||
buf->len = state->len;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flag indicating that the buffer has associated fragments. Only used
|
||||
* internally by the buffer handling code while the buffer is inside a
|
||||
* FIFO, meaning this never needs to be explicitly set or unset by the
|
||||
* net_buf API user. As long as the buffer is outside of a FIFO, i.e.
|
||||
* in practice always for the user for this API, the buf->frags pointer
|
||||
* should be used instead.
|
||||
*/
|
||||
#define NET_BUF_FRAGS BIT(0)
|
||||
/**
|
||||
* Flag indicating that the buffer's associated data pointer, points to
|
||||
* externally allocated memory. Therefore once ref goes down to zero, the
|
||||
@@ -907,7 +898,7 @@ static inline void net_buf_simple_restore(struct net_buf_simple *buf,
|
||||
* Reference count mechanism however will behave the same way, and ref
|
||||
* count going to 0 will free the net_buf but no the data pointer in it.
|
||||
*/
|
||||
#define NET_BUF_EXTERNAL_DATA BIT(1)
|
||||
#define NET_BUF_EXTERNAL_DATA BIT(0)
|
||||
|
||||
/**
|
||||
* @brief Network buffer representation.
|
||||
@@ -917,13 +908,11 @@ static inline void net_buf_simple_restore(struct net_buf_simple *buf,
|
||||
* using the net_buf_alloc() API.
|
||||
*/
|
||||
struct net_buf {
|
||||
union {
|
||||
/** Allow placing the buffer into sys_slist_t */
|
||||
sys_snode_t node;
|
||||
/** Allow placing the buffer into sys_slist_t */
|
||||
sys_snode_t node;
|
||||
|
||||
/** Fragments associated with this buffer. */
|
||||
struct net_buf *frags;
|
||||
};
|
||||
/** Fragments associated with this buffer. */
|
||||
struct net_buf *frags;
|
||||
|
||||
/** Reference count. */
|
||||
uint8_t ref;
|
||||
|
||||
@@ -199,10 +199,11 @@ struct net_conn_handle;
|
||||
* anyway. This saves 12 bytes / context in IPv6.
|
||||
*/
|
||||
__net_socket struct net_context {
|
||||
/** User data.
|
||||
*
|
||||
* First member of the structure to let users either have user data
|
||||
* associated with a context, or put contexts into a FIFO.
|
||||
/** First member of the structure to allow to put contexts into a FIFO.
|
||||
*/
|
||||
void *fifo_reserved;
|
||||
|
||||
/** User data associated with a context.
|
||||
*/
|
||||
void *user_data;
|
||||
|
||||
|
||||
@@ -1368,6 +1368,10 @@ uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6);
|
||||
static inline void net_if_ipv6_set_reachable_time(struct net_if_ipv6 *ipv6)
|
||||
{
|
||||
#if defined(CONFIG_NET_NATIVE_IPV6)
|
||||
if (ipv6 == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
ipv6->reachable_time = net_if_ipv6_calc_reachable_time(ipv6);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -7,6 +7,9 @@
|
||||
#ifndef ZEPHYR_INCLUDE_TIME_UNITS_H_
|
||||
#define ZEPHYR_INCLUDE_TIME_UNITS_H_
|
||||
|
||||
#include <sys/util.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -56,6 +59,21 @@ static TIME_CONSTEXPR inline int sys_clock_hw_cycles_per_sec(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/** @internal
|
||||
* Macro determines if fast conversion algorithm can be used. It checks if
|
||||
* maximum timeout represented in source frequency domain and multiplied by
|
||||
* target frequency fits in 64 bits.
|
||||
*
|
||||
* @param from_hz Source frequency.
|
||||
* @param to_hz Target frequency.
|
||||
*
|
||||
* @retval true Use faster algorithm.
|
||||
* @retval false Use algorithm preventing overflow of intermediate value.
|
||||
*/
|
||||
#define Z_TMCVT_USE_FAST_ALGO(from_hz, to_hz) \
|
||||
((ceiling_fraction(CONFIG_SYS_CLOCK_MAX_TIMEOUT_DAYS * 24ULL * 3600ULL * from_hz, \
|
||||
UINT32_MAX) * to_hz) <= UINT32_MAX)
|
||||
|
||||
/* Time converter generator gadget. Selects from one of three
|
||||
* conversion algorithms: ones that take advantage when the
|
||||
* frequencies are an integer ratio (in either direction), or a full
|
||||
@@ -123,8 +141,18 @@ static TIME_CONSTEXPR ALWAYS_INLINE uint64_t z_tmcvt(uint64_t t, uint32_t from_h
|
||||
} else {
|
||||
if (result32) {
|
||||
return (uint32_t)((t * to_hz + off) / from_hz);
|
||||
} else if (const_hz && Z_TMCVT_USE_FAST_ALGO(from_hz, to_hz)) {
|
||||
/* Faster algorithm but source is first multiplied by target frequency
|
||||
* and it can overflow even though final result would not overflow.
|
||||
* Kconfig option shall prevent use of this algorithm when there is a
|
||||
* risk of overflow.
|
||||
*/
|
||||
return ((t * to_hz + off) / from_hz);
|
||||
} else {
|
||||
return (t * to_hz + off) / from_hz;
|
||||
/* Slower algorithm but input is first divided before being multiplied
|
||||
* which prevents overflow of intermediate value.
|
||||
*/
|
||||
return (t / from_hz) * to_hz + ((t % from_hz) * to_hz + off) / from_hz;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
|
||||
#include <zephyr/types.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -61,6 +62,23 @@ extern "C" {
|
||||
/** @brief 0 if @p cond is true-ish; causes a compile error otherwise. */
|
||||
#define ZERO_OR_COMPILE_ERROR(cond) ((int) sizeof(char[1 - 2 * !(cond)]) - 1)
|
||||
|
||||
/**
|
||||
* @brief Determine if a buffer exceeds highest address
|
||||
*
|
||||
* This macro determines if a buffer identified by a starting address @a addr
|
||||
* and length @a buflen spans a region of memory that goes beond the highest
|
||||
* possible address (thereby resulting in a pointer overflow).
|
||||
*
|
||||
* @param addr Buffer starting address
|
||||
* @param buflen Length of the buffer
|
||||
*
|
||||
* @return true if pointer overflow detected, false otherwise
|
||||
*/
|
||||
#define Z_DETECT_POINTER_OVERFLOW(addr, buflen) \
|
||||
(((buflen) != 0) && \
|
||||
((UINTPTR_MAX - (uintptr_t)(addr)) <= ((uintptr_t)((buflen) - 1))))
|
||||
|
||||
|
||||
#if defined(__cplusplus)
|
||||
|
||||
/* The built-in function used below for type checking in C is not
|
||||
|
||||
@@ -329,6 +329,22 @@ extern int z_user_string_copy(char *dst, const char *src, size_t maxlen);
|
||||
*/
|
||||
#define Z_SYSCALL_VERIFY(expr) Z_SYSCALL_VERIFY_MSG(expr, #expr)
|
||||
|
||||
/**
|
||||
* @brief Macro to check if size is negative
|
||||
*
|
||||
* Z_SYSCALL_MEMORY can be called with signed/unsigned types
|
||||
* and because of that if we check if size is greater or equal to
|
||||
* zero, many static analyzers complain about no effect expression.
|
||||
*
|
||||
* @param ptr Memory area to examine
|
||||
* @param size Size of the memory area
|
||||
* @return true if size is valid, false otherwise
|
||||
* @note This is an internal API. Do not use unless you are extending
|
||||
* functionality in the Zephyr tree.
|
||||
*/
|
||||
#define Z_SYSCALL_MEMORY_SIZE_CHECK(ptr, size) \
|
||||
(((uintptr_t)ptr + size) >= (uintptr_t)ptr)
|
||||
|
||||
/**
|
||||
* @brief Runtime check that a user thread has read and/or write permission to
|
||||
* a memory area
|
||||
@@ -346,8 +362,10 @@ extern int z_user_string_copy(char *dst, const char *src, size_t maxlen);
|
||||
* @return 0 on success, nonzero on failure
|
||||
*/
|
||||
#define Z_SYSCALL_MEMORY(ptr, size, write) \
|
||||
Z_SYSCALL_VERIFY_MSG(arch_buffer_validate((void *)ptr, size, write) \
|
||||
== 0, \
|
||||
Z_SYSCALL_VERIFY_MSG(Z_SYSCALL_MEMORY_SIZE_CHECK(ptr, size) \
|
||||
&& !Z_DETECT_POINTER_OVERFLOW(ptr, size) \
|
||||
&& (arch_buffer_validate((void *)ptr, size, write) \
|
||||
== 0), \
|
||||
"Memory region %p (size %zu) %s access denied", \
|
||||
(void *)(ptr), (size_t)(size), \
|
||||
write ? "write" : "read")
|
||||
|
||||
@@ -48,8 +48,9 @@
|
||||
#endif
|
||||
|
||||
|
||||
#undef BUILD_ASSERT /* clear out common version */
|
||||
/* C++11 has static_assert built in */
|
||||
#ifdef __cplusplus
|
||||
#if defined(__cplusplus) && (__cplusplus >= 201103L)
|
||||
#define BUILD_ASSERT(EXPR, MSG...) static_assert(EXPR, "" MSG)
|
||||
|
||||
/*
|
||||
|
||||
@@ -613,6 +613,17 @@ config TIMEOUT_64BIT
|
||||
availability of absolute timeout values (which require the
|
||||
extra precision).
|
||||
|
||||
config SYS_CLOCK_MAX_TIMEOUT_DAYS
|
||||
int "Max timeout (in days) used in conversions"
|
||||
default 365
|
||||
help
|
||||
Value is used in the time conversion static inline function to determine
|
||||
at compile time which algorithm to use. One algorithm is faster, takes
|
||||
less code but may overflow if multiplication of source and target
|
||||
frequency exceeds 64 bits. Second algorithm prevents that. Faster
|
||||
algorithm is selected for conversion if maximum timeout represented in
|
||||
source frequency domain multiplied by target frequency fits in 64 bits.
|
||||
|
||||
config XIP
|
||||
bool "Execute in place"
|
||||
help
|
||||
|
||||
@@ -576,6 +576,9 @@ static void triggered_work_expiration_handler(struct _timeout *timeout)
|
||||
k_work_submit_to_queue(twork->workq, &twork->work);
|
||||
}
|
||||
|
||||
extern int z_work_submit_to_queue(struct k_work_q *queue,
|
||||
struct k_work *work);
|
||||
|
||||
static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
|
||||
{
|
||||
struct z_poller *poller = event->poller;
|
||||
@@ -587,7 +590,7 @@ static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
|
||||
|
||||
z_abort_timeout(&twork->timeout);
|
||||
twork->poll_result = 0;
|
||||
k_work_submit_to_queue(work_q, &twork->work);
|
||||
z_work_submit_to_queue(work_q, &twork->work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -219,6 +219,25 @@ static ALWAYS_INLINE void dequeue_thread(void *pq,
|
||||
}
|
||||
}
|
||||
|
||||
static void signal_pending_ipi(void)
|
||||
{
|
||||
/* Synchronization note: you might think we need to lock these
|
||||
* two steps, but an IPI is idempotent. It's OK if we do it
|
||||
* twice. All we require is that if a CPU sees the flag true,
|
||||
* it is guaranteed to send the IPI, and if a core sets
|
||||
* pending_ipi, the IPI will be sent the next time through
|
||||
* this code.
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
if (CONFIG_MP_NUM_CPUS > 1) {
|
||||
if (_kernel.pending_ipi) {
|
||||
_kernel.pending_ipi = false;
|
||||
arch_sched_ipi();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Called out of z_swap() when CONFIG_SMP. The current thread can
|
||||
* never live in the run queue until we are inexorably on the context
|
||||
@@ -231,6 +250,7 @@ void z_requeue_current(struct k_thread *curr)
|
||||
if (z_is_thread_queued(curr)) {
|
||||
_priq_run_add(&_kernel.ready_q.runq, curr);
|
||||
}
|
||||
signal_pending_ipi();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -481,6 +501,15 @@ static bool thread_active_elsewhere(struct k_thread *thread)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void flag_ipi(void)
|
||||
{
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
if (CONFIG_MP_NUM_CPUS > 1) {
|
||||
_kernel.pending_ipi = true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void ready_thread(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
@@ -495,9 +524,7 @@ static void ready_thread(struct k_thread *thread)
|
||||
|
||||
queue_thread(&_kernel.ready_q.runq, thread);
|
||||
update_cache(0);
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
arch_sched_ipi();
|
||||
#endif
|
||||
flag_ipi();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -799,9 +826,7 @@ void z_thread_priority_set(struct k_thread *thread, int prio)
|
||||
{
|
||||
bool need_sched = z_set_prio(thread, prio);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
arch_sched_ipi();
|
||||
#endif
|
||||
flag_ipi();
|
||||
|
||||
if (need_sched && _current->base.sched_locked == 0U) {
|
||||
z_reschedule_unlocked();
|
||||
@@ -841,6 +866,7 @@ void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
|
||||
z_swap(lock, key);
|
||||
} else {
|
||||
k_spin_unlock(lock, key);
|
||||
signal_pending_ipi();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -850,6 +876,7 @@ void z_reschedule_irqlock(uint32_t key)
|
||||
z_swap_irqlock(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
signal_pending_ipi();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -883,7 +910,16 @@ void k_sched_unlock(void)
|
||||
struct k_thread *z_swap_next_thread(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return next_up();
|
||||
struct k_thread *ret = next_up();
|
||||
|
||||
if (ret == _current) {
|
||||
/* When not swapping, have to signal IPIs here. In
|
||||
* the context switch case it must happen later, after
|
||||
* _current gets requeued.
|
||||
*/
|
||||
signal_pending_ipi();
|
||||
}
|
||||
return ret;
|
||||
#else
|
||||
return _kernel.ready_q.cache;
|
||||
#endif
|
||||
@@ -950,6 +986,7 @@ void *z_get_next_switch_handle(void *interrupted)
|
||||
new_thread->switch_handle = NULL;
|
||||
}
|
||||
}
|
||||
signal_pending_ipi();
|
||||
return ret;
|
||||
#else
|
||||
_current->switch_handle = interrupted;
|
||||
@@ -1346,9 +1383,7 @@ void z_impl_k_wakeup(k_tid_t thread)
|
||||
z_mark_thread_as_not_suspended(thread);
|
||||
z_ready_thread(thread);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
arch_sched_ipi();
|
||||
#endif
|
||||
flag_ipi();
|
||||
|
||||
if (!arch_is_in_isr()) {
|
||||
z_reschedule_unlocked();
|
||||
@@ -1535,6 +1570,9 @@ void z_thread_abort(struct k_thread *thread)
|
||||
/* It's running somewhere else, flag and poke */
|
||||
thread->base.thread_state |= _THREAD_ABORTING;
|
||||
|
||||
/* We're going to spin, so need a true synchronous IPI
|
||||
* here, not deferred!
|
||||
*/
|
||||
#ifdef CONFIG_SCHED_IPI_SUPPORTED
|
||||
arch_sched_ipi();
|
||||
#endif
|
||||
|
||||
@@ -1011,7 +1011,7 @@ void z_thread_mark_switched_in(void)
|
||||
#ifdef CONFIG_THREAD_RUNTIME_STATS
|
||||
struct k_thread *thread;
|
||||
|
||||
thread = k_current_get();
|
||||
thread = z_current_get();
|
||||
#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
|
||||
thread->rt_stats.last_switched_in = timing_counter_get();
|
||||
#else
|
||||
@@ -1033,7 +1033,7 @@ void z_thread_mark_switched_out(void)
|
||||
uint64_t diff;
|
||||
struct k_thread *thread;
|
||||
|
||||
thread = k_current_get();
|
||||
thread = z_current_get();
|
||||
|
||||
if (unlikely(thread->rt_stats.last_switched_in == 0)) {
|
||||
/* Has not run before */
|
||||
|
||||
@@ -68,8 +68,14 @@ static int32_t next_timeout(void)
|
||||
{
|
||||
struct _timeout *to = first();
|
||||
int32_t ticks_elapsed = elapsed();
|
||||
int32_t ret = to == NULL ? MAX_WAIT
|
||||
: CLAMP(to->dticks - ticks_elapsed, 0, MAX_WAIT);
|
||||
int32_t ret;
|
||||
|
||||
if ((to == NULL) ||
|
||||
((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
|
||||
ret = MAX_WAIT;
|
||||
} else {
|
||||
ret = MAX(0, to->dticks - ticks_elapsed);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
|
||||
@@ -238,6 +244,18 @@ void sys_clock_announce(int32_t ticks)
|
||||
|
||||
k_spinlock_key_t key = k_spin_lock(&timeout_lock);
|
||||
|
||||
/* We release the lock around the callbacks below, so on SMP
|
||||
* systems someone might be already running the loop. Don't
|
||||
* race (which will cause paralllel execution of "sequential"
|
||||
* timeouts and confuse apps), just increment the tick count
|
||||
* and return.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) {
|
||||
announce_remaining += ticks;
|
||||
k_spin_unlock(&timeout_lock, key);
|
||||
return;
|
||||
}
|
||||
|
||||
announce_remaining = ticks;
|
||||
|
||||
while (first() != NULL && first()->dticks <= announce_remaining) {
|
||||
@@ -245,13 +263,13 @@ void sys_clock_announce(int32_t ticks)
|
||||
int dt = t->dticks;
|
||||
|
||||
curr_tick += dt;
|
||||
announce_remaining -= dt;
|
||||
t->dticks = 0;
|
||||
remove_timeout(t);
|
||||
|
||||
k_spin_unlock(&timeout_lock, key);
|
||||
t->fn(t);
|
||||
key = k_spin_lock(&timeout_lock);
|
||||
announce_remaining -= dt;
|
||||
}
|
||||
|
||||
if (first() != NULL) {
|
||||
@@ -271,7 +289,7 @@ int64_t sys_clock_tick_get(void)
|
||||
uint64_t t = 0U;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
t = curr_tick + sys_clock_elapsed();
|
||||
t = curr_tick + elapsed();
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
@@ -355,26 +355,45 @@ static int submit_to_queue_locked(struct k_work *work,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int k_work_submit_to_queue(struct k_work_q *queue,
|
||||
struct k_work *work)
|
||||
/* Submit work to a queue but do not yield the current thread.
|
||||
*
|
||||
* Intended for internal use.
|
||||
*
|
||||
* See also submit_to_queue_locked().
|
||||
*
|
||||
* @param queuep pointer to a queue reference.
|
||||
* @param work the work structure to be submitted
|
||||
*
|
||||
* @retval see submit_to_queue_locked()
|
||||
*/
|
||||
int z_work_submit_to_queue(struct k_work_q *queue,
|
||||
struct k_work *work)
|
||||
{
|
||||
__ASSERT_NO_MSG(work != NULL);
|
||||
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
|
||||
|
||||
int ret = submit_to_queue_locked(work, &queue);
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
/* If we changed the queue contents (as indicated by a positive ret)
|
||||
* the queue thread may now be ready, but we missed the reschedule
|
||||
* point because the lock was held. If this is being invoked by a
|
||||
* preemptible thread then yield.
|
||||
return ret;
|
||||
}
|
||||
|
||||
int k_work_submit_to_queue(struct k_work_q *queue,
|
||||
struct k_work *work)
|
||||
{
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
|
||||
|
||||
int ret = z_work_submit_to_queue(queue, work);
|
||||
|
||||
/* submit_to_queue_locked() won't reschedule on its own
|
||||
* (really it should, otherwise this process will result in
|
||||
* spurious calls to z_swap() due to the race), so do it here
|
||||
* if the queue state changed.
|
||||
*/
|
||||
if ((ret > 0) && (k_is_preempt_thread() != 0)) {
|
||||
k_yield();
|
||||
if (ret > 0) {
|
||||
z_reschedule_unlocked();
|
||||
}
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
|
||||
@@ -586,6 +605,7 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
|
||||
struct k_work *work = NULL;
|
||||
k_work_handler_t handler = NULL;
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
bool yield;
|
||||
|
||||
/* Check for and prepare any new work. */
|
||||
node = sys_slist_get(&queue->pending);
|
||||
@@ -644,34 +664,30 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
if (work != NULL) {
|
||||
bool yield;
|
||||
__ASSERT_NO_MSG(handler != NULL);
|
||||
handler(work);
|
||||
|
||||
__ASSERT_NO_MSG(handler != NULL);
|
||||
handler(work);
|
||||
/* Mark the work item as no longer running and deal
|
||||
* with any cancellation issued while it was running.
|
||||
* Clear the BUSY flag and optionally yield to prevent
|
||||
* starving other threads.
|
||||
*/
|
||||
key = k_spin_lock(&lock);
|
||||
|
||||
/* Mark the work item as no longer running and deal
|
||||
* with any cancellation issued while it was running.
|
||||
* Clear the BUSY flag and optionally yield to prevent
|
||||
* starving other threads.
|
||||
*/
|
||||
key = k_spin_lock(&lock);
|
||||
flag_clear(&work->flags, K_WORK_RUNNING_BIT);
|
||||
if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
|
||||
finalize_cancel_locked(work);
|
||||
}
|
||||
|
||||
flag_clear(&work->flags, K_WORK_RUNNING_BIT);
|
||||
if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
|
||||
finalize_cancel_locked(work);
|
||||
}
|
||||
flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
|
||||
yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
|
||||
yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
/* Optionally yield to prevent the work queue from
|
||||
* starving other threads.
|
||||
*/
|
||||
if (yield) {
|
||||
k_yield();
|
||||
}
|
||||
/* Optionally yield to prevent the work queue from
|
||||
* starving other threads.
|
||||
*/
|
||||
if (yield) {
|
||||
k_yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,6 +112,8 @@ config APP_LINK_WITH_POSIX_SUBSYS
|
||||
config EVENTFD
|
||||
bool "Enable support for eventfd"
|
||||
depends on !ARCH_POSIX
|
||||
select POLL
|
||||
default y if POSIX_API
|
||||
help
|
||||
Enable support for event file descriptors, eventfd. An eventfd can
|
||||
be used as an event wait/notify mechanism together with POSIX calls
|
||||
|
||||
@@ -27,7 +27,6 @@ static struct k_spinlock rt_clock_base_lock;
|
||||
*/
|
||||
int z_impl_clock_gettime(clockid_t clock_id, struct timespec *ts)
|
||||
{
|
||||
uint64_t elapsed_nsecs;
|
||||
struct timespec base;
|
||||
k_spinlock_key_t key;
|
||||
|
||||
@@ -48,9 +47,13 @@ int z_impl_clock_gettime(clockid_t clock_id, struct timespec *ts)
|
||||
return -1;
|
||||
}
|
||||
|
||||
elapsed_nsecs = k_ticks_to_ns_floor64(k_uptime_ticks());
|
||||
ts->tv_sec = (int32_t) (elapsed_nsecs / NSEC_PER_SEC);
|
||||
ts->tv_nsec = (int32_t) (elapsed_nsecs % NSEC_PER_SEC);
|
||||
uint64_t ticks = k_uptime_ticks();
|
||||
uint64_t elapsed_secs = ticks / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
|
||||
uint64_t nremainder = ticks - elapsed_secs * CONFIG_SYS_CLOCK_TICKS_PER_SEC;
|
||||
|
||||
ts->tv_sec = (time_t) elapsed_secs;
|
||||
/* For ns 32 bit conversion can be used since its smaller than 1sec. */
|
||||
ts->tv_nsec = (int32_t) k_ticks_to_ns_floor32(nremainder);
|
||||
|
||||
ts->tv_sec += base.tv_sec;
|
||||
ts->tv_nsec += base.tv_nsec;
|
||||
|
||||
@@ -15,12 +15,10 @@
|
||||
#define PTHREAD_INIT_FLAGS PTHREAD_CANCEL_ENABLE
|
||||
#define PTHREAD_CANCELED ((void *) -1)
|
||||
|
||||
#define LOWEST_POSIX_THREAD_PRIORITY 1
|
||||
|
||||
PTHREAD_MUTEX_DEFINE(pthread_key_lock);
|
||||
|
||||
static const pthread_attr_t init_pthread_attrs = {
|
||||
.priority = LOWEST_POSIX_THREAD_PRIORITY,
|
||||
.priority = 0,
|
||||
.stack = NULL,
|
||||
.stacksize = 0,
|
||||
.flags = PTHREAD_INIT_FLAGS,
|
||||
@@ -54,9 +52,11 @@ static uint32_t zephyr_to_posix_priority(int32_t z_prio, int *policy)
|
||||
if (z_prio < 0) {
|
||||
*policy = SCHED_FIFO;
|
||||
prio = -1 * (z_prio + 1);
|
||||
__ASSERT_NO_MSG(prio < CONFIG_NUM_COOP_PRIORITIES);
|
||||
} else {
|
||||
*policy = SCHED_RR;
|
||||
prio = (CONFIG_NUM_PREEMPT_PRIORITIES - z_prio);
|
||||
prio = (CONFIG_NUM_PREEMPT_PRIORITIES - z_prio - 1);
|
||||
__ASSERT_NO_MSG(prio < CONFIG_NUM_PREEMPT_PRIORITIES);
|
||||
}
|
||||
|
||||
return prio;
|
||||
@@ -68,9 +68,11 @@ static int32_t posix_to_zephyr_priority(uint32_t priority, int policy)
|
||||
|
||||
if (policy == SCHED_FIFO) {
|
||||
/* Zephyr COOP priority starts from -1 */
|
||||
__ASSERT_NO_MSG(priority < CONFIG_NUM_COOP_PRIORITIES);
|
||||
prio = -1 * (priority + 1);
|
||||
} else {
|
||||
prio = (CONFIG_NUM_PREEMPT_PRIORITIES - priority);
|
||||
__ASSERT_NO_MSG(priority < CONFIG_NUM_PREEMPT_PRIORITIES);
|
||||
prio = (CONFIG_NUM_PREEMPT_PRIORITIES - priority - 1);
|
||||
}
|
||||
|
||||
return prio;
|
||||
|
||||
@@ -7,13 +7,9 @@
|
||||
#include <kernel.h>
|
||||
#include <posix/posix_sched.h>
|
||||
|
||||
static bool valid_posix_policy(int policy)
|
||||
static inline bool valid_posix_policy(int policy)
|
||||
{
|
||||
if (policy != SCHED_FIFO && policy != SCHED_RR) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return policy == SCHED_FIFO || policy == SCHED_RR;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -23,25 +19,12 @@ static bool valid_posix_policy(int policy)
|
||||
*/
|
||||
int sched_get_priority_min(int policy)
|
||||
{
|
||||
if (valid_posix_policy(policy) == false) {
|
||||
if (!valid_posix_policy(policy)) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_COOP_ENABLED)) {
|
||||
if (policy == SCHED_FIFO) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_ENABLED)) {
|
||||
if (policy == SCHED_RR) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -51,25 +34,10 @@ int sched_get_priority_min(int policy)
|
||||
*/
|
||||
int sched_get_priority_max(int policy)
|
||||
{
|
||||
if (valid_posix_policy(policy) == false) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_COOP_ENABLED)) {
|
||||
if (policy == SCHED_FIFO) {
|
||||
/* Posix COOP priority starts from 0
|
||||
* whereas zephyr starts from -1
|
||||
*/
|
||||
return (CONFIG_NUM_COOP_PRIORITIES - 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_ENABLED)) {
|
||||
if (policy == SCHED_RR) {
|
||||
return CONFIG_NUM_PREEMPT_PRIORITIES;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_COOP_ENABLED) && policy == SCHED_FIFO) {
|
||||
return CONFIG_NUM_COOP_PRIORITIES - 1;
|
||||
} else if (IS_ENABLED(CONFIG_PREEMPT_ENABLED) && policy == SCHED_RR) {
|
||||
return CONFIG_NUM_PREEMPT_PRIORITIES - 1;
|
||||
}
|
||||
|
||||
errno = EINVAL;
|
||||
|
||||
@@ -24,6 +24,7 @@ config TFM_BOARD
|
||||
|
||||
menuconfig BUILD_WITH_TFM
|
||||
bool "Build with TF-M as the Secure Execution Environment"
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
depends on TRUSTED_EXECUTION_NONSECURE
|
||||
depends on TFM_BOARD != ""
|
||||
depends on ARM_TRUSTZONE_M
|
||||
|
||||
@@ -8,6 +8,7 @@ tests:
|
||||
platform_allow: mps2_an521_ns lpcxpresso55s69_ns nrf5340dk_nrf5340_cpuapp_ns
|
||||
nrf9160dk_nrf9160_ns nucleo_l552ze_q_ns v2m_musca_s1_ns stm32l562e_dk_ns
|
||||
bl5340_dvk_cpuapp_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
|
||||
@@ -5,6 +5,7 @@ common:
|
||||
tags: psa
|
||||
platform_allow: mps2_an521_ns v2m_musca_s1_ns
|
||||
nrf5340dk_nrf5340_cpuapp_ns nrf9160dk_nrf9160_ns bl5340_dvk_cpuapp_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
@@ -22,3 +23,4 @@ common:
|
||||
tests:
|
||||
sample.tfm.protected_storage:
|
||||
tags: tfm
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
@@ -8,6 +8,7 @@ tests:
|
||||
platform_allow: mps2_an521_ns lpcxpresso55s69_ns
|
||||
nrf5340dk_nrf5340_cpuapp_ns nrf9160dk_nrf9160_ns nucleo_l552ze_q_ns
|
||||
stm32l562e_dk_ns v2m_musca_s1_ns v2m_musca_b1_ns bl5340_dvk_cpuapp_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
@@ -21,6 +22,7 @@ tests:
|
||||
platform_allow: mps2_an521_ns
|
||||
extra_configs:
|
||||
- CONFIG_TFM_BL2=n
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
|
||||
@@ -3,6 +3,7 @@ common:
|
||||
platform_allow: mps2_an521_ns
|
||||
nrf5340dk_nrf5340_cpuapp_ns nrf9160dk_nrf9160_ns
|
||||
v2m_musca_s1_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
@@ -16,5 +17,7 @@ tests:
|
||||
sample.tfm.psa_protected_storage_test:
|
||||
extra_args: "CONFIG_TFM_PSA_TEST_PROTECTED_STORAGE=y"
|
||||
timeout: 100
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
sample.tfm.psa_internal_trusted_storage_test:
|
||||
extra_args: "CONFIG_TFM_PSA_TEST_INTERNAL_TRUSTED_STORAGE=y"
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
@@ -3,6 +3,7 @@ common:
|
||||
platform_allow: lpcxpresso55s69_ns
|
||||
nrf5340dk_nrf5340_cpuapp_ns nrf9160dk_nrf9160_ns
|
||||
v2m_musca_s1_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
@@ -18,3 +19,4 @@ tests:
|
||||
sample.tfm.tfm_regression:
|
||||
extra_args: ""
|
||||
timeout: 200
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
@@ -15,3 +15,5 @@ tests:
|
||||
sample.kernel.memory_protection.shared_mem:
|
||||
filter: CONFIG_ARCH_HAS_USERSPACE
|
||||
platform_exclude: twr_ke18f
|
||||
extra_configs:
|
||||
- CONFIG_TEST_HW_STACK_PROTECTION=n
|
||||
|
||||
@@ -58,7 +58,7 @@ data_template = """
|
||||
"""
|
||||
|
||||
library_data_template = """
|
||||
*{0}:*(.data .data.*)
|
||||
*{0}:*(.data .data.* .sdata .sdata.*)
|
||||
"""
|
||||
|
||||
bss_template = """
|
||||
@@ -67,7 +67,7 @@ bss_template = """
|
||||
"""
|
||||
|
||||
library_bss_template = """
|
||||
*{0}:*(.bss .bss.* COMMON COMMON.*)
|
||||
*{0}:*(.bss .bss.* .sbss .sbss.* COMMON COMMON.*)
|
||||
"""
|
||||
|
||||
footer_template = """
|
||||
|
||||
@@ -55,8 +55,8 @@ const _k_syscall_handler_t _k_syscall_table[K_SYSCALL_LIMIT] = {
|
||||
};
|
||||
"""
|
||||
|
||||
list_template = """
|
||||
/* auto-generated by gen_syscalls.py, don't edit */
|
||||
list_template = """/* auto-generated by gen_syscalls.py, don't edit */
|
||||
|
||||
#ifndef ZEPHYR_SYSCALL_LIST_H
|
||||
#define ZEPHYR_SYSCALL_LIST_H
|
||||
|
||||
@@ -82,17 +82,6 @@ syscall_template = """
|
||||
|
||||
#include <linker/sections.h>
|
||||
|
||||
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
|
||||
#pragma GCC diagnostic push
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
||||
#if !defined(__XCC__)
|
||||
#pragma GCC diagnostic ignored "-Warray-bounds"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -103,10 +92,6 @@ extern "C" {
|
||||
}
|
||||
#endif
|
||||
|
||||
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif /* include guard */
|
||||
"""
|
||||
@@ -153,25 +138,13 @@ def need_split(argtype):
|
||||
# Note: "lo" and "hi" are named in little endian conventions,
|
||||
# but it doesn't matter as long as they are consistently
|
||||
# generated.
|
||||
def union_decl(type):
|
||||
return "union { struct { uintptr_t lo, hi; } split; %s val; }" % type
|
||||
def union_decl(type, split):
|
||||
middle = "struct { uintptr_t lo, hi; } split" if split else "uintptr_t x"
|
||||
return "union { %s; %s val; }" % (middle, type)
|
||||
|
||||
def wrapper_defs(func_name, func_type, args):
|
||||
ret64 = need_split(func_type)
|
||||
mrsh_args = [] # List of rvalue expressions for the marshalled invocation
|
||||
split_args = []
|
||||
nsplit = 0
|
||||
for argtype, argname in args:
|
||||
if need_split(argtype):
|
||||
split_args.append((argtype, argname))
|
||||
mrsh_args.append("parm%d.split.lo" % nsplit)
|
||||
mrsh_args.append("parm%d.split.hi" % nsplit)
|
||||
nsplit += 1
|
||||
else:
|
||||
mrsh_args.append("*(uintptr_t *)&" + argname)
|
||||
|
||||
if ret64:
|
||||
mrsh_args.append("(uintptr_t)&ret64")
|
||||
|
||||
decl_arglist = ", ".join([" ".join(argrec) for argrec in args]) or "void"
|
||||
|
||||
@@ -184,10 +157,24 @@ def wrapper_defs(func_name, func_type, args):
|
||||
wrap += ("\t" + "uint64_t ret64;\n") if ret64 else ""
|
||||
wrap += "\t" + "if (z_syscall_trap()) {\n"
|
||||
|
||||
for parmnum, rec in enumerate(split_args):
|
||||
(argtype, argname) = rec
|
||||
wrap += "\t\t%s parm%d;\n" % (union_decl(argtype), parmnum)
|
||||
wrap += "\t\t" + "parm%d.val = %s;\n" % (parmnum, argname)
|
||||
valist_args = []
|
||||
for argnum, (argtype, argname) in enumerate(args):
|
||||
split = need_split(argtype)
|
||||
wrap += "\t\t%s parm%d" % (union_decl(argtype, split), argnum)
|
||||
if argtype != "va_list":
|
||||
wrap += " = { .val = %s };\n" % argname
|
||||
else:
|
||||
# va_list objects are ... peculiar.
|
||||
wrap += ";\n" + "\t\t" + "va_copy(parm%d.val, %s);\n" % (argnum, argname)
|
||||
valist_args.append("parm%d.val" % argnum)
|
||||
if split:
|
||||
mrsh_args.append("parm%d.split.lo" % argnum)
|
||||
mrsh_args.append("parm%d.split.hi" % argnum)
|
||||
else:
|
||||
mrsh_args.append("parm%d.x" % argnum)
|
||||
|
||||
if ret64:
|
||||
mrsh_args.append("(uintptr_t)&ret64")
|
||||
|
||||
if len(mrsh_args) > 6:
|
||||
wrap += "\t\t" + "uintptr_t more[] = {\n"
|
||||
@@ -200,21 +187,23 @@ def wrapper_defs(func_name, func_type, args):
|
||||
% (len(mrsh_args),
|
||||
", ".join(mrsh_args + [syscall_id])))
|
||||
|
||||
# Coverity does not understand syscall mechanism
|
||||
# and will already complain when any function argument
|
||||
# is not of exact size as uintptr_t. So tell Coverity
|
||||
# to ignore this particular rule here.
|
||||
wrap += "\t\t/* coverity[OVERRUN] */\n"
|
||||
|
||||
if ret64:
|
||||
wrap += "\t\t" + "(void)%s;\n" % invoke
|
||||
wrap += "\t\t" + "return (%s)ret64;\n" % func_type
|
||||
invoke = "\t\t" + "(void) %s;\n" % invoke
|
||||
retcode = "\t\t" + "return (%s) ret64;\n" % func_type
|
||||
elif func_type == "void":
|
||||
wrap += "\t\t" + "%s;\n" % invoke
|
||||
wrap += "\t\t" + "return;\n"
|
||||
invoke = "\t\t" + "(void) %s;\n" % invoke
|
||||
retcode = "\t\t" + "return;\n"
|
||||
elif valist_args:
|
||||
invoke = "\t\t" + "%s retval = %s;\n" % (func_type, invoke)
|
||||
retcode = "\t\t" + "return retval;\n"
|
||||
else:
|
||||
wrap += "\t\t" + "return (%s) %s;\n" % (func_type, invoke)
|
||||
invoke = "\t\t" + "return (%s) %s;\n" % (func_type, invoke)
|
||||
retcode = ""
|
||||
|
||||
wrap += invoke
|
||||
for argname in valist_args:
|
||||
wrap += "\t\t" + "va_end(%s);\n" % argname
|
||||
wrap += retcode
|
||||
wrap += "\t" + "}\n"
|
||||
wrap += "#endif\n"
|
||||
|
||||
@@ -244,16 +233,11 @@ def marshall_defs(func_name, func_type, args):
|
||||
mrsh_name = "z_mrsh_" + func_name
|
||||
|
||||
nmrsh = 0 # number of marshalled uintptr_t parameter
|
||||
vrfy_parms = [] # list of (arg_num, mrsh_or_parm_num, bool_is_split)
|
||||
split_parms = [] # list of a (arg_num, mrsh_num) for each split
|
||||
for i, (argtype, _) in enumerate(args):
|
||||
if need_split(argtype):
|
||||
vrfy_parms.append((i, len(split_parms), True))
|
||||
split_parms.append((i, nmrsh))
|
||||
nmrsh += 2
|
||||
else:
|
||||
vrfy_parms.append((i, nmrsh, False))
|
||||
nmrsh += 1
|
||||
vrfy_parms = [] # list of (argtype, bool_is_split)
|
||||
for (argtype, _) in args:
|
||||
split = need_split(argtype)
|
||||
vrfy_parms.append((argtype, split))
|
||||
nmrsh += 2 if split else 1
|
||||
|
||||
# Final argument for a 64 bit return value?
|
||||
if need_split(func_type):
|
||||
@@ -275,25 +259,22 @@ def marshall_defs(func_name, func_type, args):
|
||||
|
||||
if nmrsh > 6:
|
||||
mrsh += ("\tZ_OOPS(Z_SYSCALL_MEMORY_READ(more, "
|
||||
+ str(nmrsh - 6) + " * sizeof(uintptr_t)));\n")
|
||||
+ str(nmrsh - 5) + " * sizeof(uintptr_t)));\n")
|
||||
|
||||
for i, split_rec in enumerate(split_parms):
|
||||
arg_num, mrsh_num = split_rec
|
||||
arg_type = args[arg_num][0]
|
||||
mrsh += "\t%s parm%d;\n" % (union_decl(arg_type), i)
|
||||
mrsh += "\t" + "parm%d.split.lo = %s;\n" % (i, mrsh_rval(mrsh_num,
|
||||
nmrsh))
|
||||
mrsh += "\t" + "parm%d.split.hi = %s;\n" % (i, mrsh_rval(mrsh_num + 1,
|
||||
nmrsh))
|
||||
# Finally, invoke the verify function
|
||||
out_args = []
|
||||
for i, argn, is_split in vrfy_parms:
|
||||
if is_split:
|
||||
out_args.append("parm%d.val" % argn)
|
||||
argnum = 0
|
||||
for i, (argtype, split) in enumerate(vrfy_parms):
|
||||
mrsh += "\t%s parm%d;\n" % (union_decl(argtype, split), i)
|
||||
if split:
|
||||
mrsh += "\t" + "parm%d.split.lo = %s;\n" % (i, mrsh_rval(argnum, nmrsh))
|
||||
argnum += 1
|
||||
mrsh += "\t" + "parm%d.split.hi = %s;\n" % (i, mrsh_rval(argnum, nmrsh))
|
||||
else:
|
||||
out_args.append("*(%s*)&%s" % (args[i][0], mrsh_rval(argn, nmrsh)))
|
||||
mrsh += "\t" + "parm%d.x = %s;\n" % (i, mrsh_rval(argnum, nmrsh))
|
||||
argnum += 1
|
||||
|
||||
vrfy_call = "z_vrfy_%s(%s)\n" % (func_name, ", ".join(out_args))
|
||||
# Finally, invoke the verify function
|
||||
out_args = ", ".join(["parm%d.val" % i for i in range(len(args))])
|
||||
vrfy_call = "z_vrfy_%s(%s)" % (func_name, out_args)
|
||||
|
||||
if func_type == "void":
|
||||
mrsh += "\t" + "%s;\n" % vrfy_call
|
||||
@@ -436,19 +417,10 @@ def main():
|
||||
mrsh_fn = os.path.join(args.base_output, fn + "_mrsh.c")
|
||||
|
||||
with open(mrsh_fn, "w") as fp:
|
||||
fp.write("/* auto-generated by gen_syscalls.py, don't edit */\n")
|
||||
fp.write("#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n")
|
||||
fp.write("#pragma GCC diagnostic push\n")
|
||||
fp.write("#endif\n")
|
||||
fp.write("#ifdef __GNUC__\n")
|
||||
fp.write("#pragma GCC diagnostic ignored \"-Wstrict-aliasing\"\n")
|
||||
fp.write("#endif\n")
|
||||
fp.write("/* auto-generated by gen_syscalls.py, don't edit */\n\n")
|
||||
fp.write(mrsh_includes[fn] + "\n")
|
||||
fp.write("\n")
|
||||
fp.write(mrsh_defs[fn] + "\n")
|
||||
fp.write("#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n")
|
||||
fp.write("#pragma GCC diagnostic pop\n")
|
||||
fp.write("#endif\n")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -2017,21 +2017,17 @@ class CMake():
|
||||
def run_cmake(self, args=[]):
|
||||
|
||||
if self.warnings_as_errors:
|
||||
ldflags = "-Wl,--fatal-warnings"
|
||||
cflags = "-Werror"
|
||||
aflags = "-Wa,--fatal-warnings"
|
||||
warnings_as_errors = 'y'
|
||||
gen_defines_args = "--edtlib-Werror"
|
||||
else:
|
||||
ldflags = cflags = aflags = ""
|
||||
warnings_as_errors = 'n'
|
||||
gen_defines_args = ""
|
||||
|
||||
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
|
||||
cmake_args = [
|
||||
f'-B{self.build_dir}',
|
||||
f'-S{self.source_dir}',
|
||||
f'-DEXTRA_CFLAGS="{cflags}"',
|
||||
f'-DEXTRA_AFLAGS="{aflags}',
|
||||
f'-DEXTRA_LDFLAGS="{ldflags}"',
|
||||
f'-DCONFIG_COMPILER_WARNINGS_AS_ERRORS={warnings_as_errors}',
|
||||
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
|
||||
f'-G{self.generator}'
|
||||
]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# DOC: used to generate docs
|
||||
|
||||
breathe>=4.30
|
||||
sphinx~=4.0
|
||||
sphinx~=5.0.2
|
||||
sphinx_rtd_theme~=1.0
|
||||
sphinx-tabs
|
||||
sphinxcontrib-svg2pdfconverter
|
||||
|
||||
@@ -1181,6 +1181,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
/* Active scanner */
|
||||
} else if (((pdu_adv_rx->type == PDU_ADV_TYPE_ADV_IND) ||
|
||||
(pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_IND)) &&
|
||||
(pdu_adv_rx->len >= offsetof(struct pdu_adv_adv_ind, data)) &&
|
||||
(pdu_adv_rx->len <= sizeof(struct pdu_adv_adv_ind)) &&
|
||||
lll->type &&
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
@@ -1274,6 +1275,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
else if (((((pdu_adv_rx->type == PDU_ADV_TYPE_ADV_IND) ||
|
||||
(pdu_adv_rx->type == PDU_ADV_TYPE_NONCONN_IND) ||
|
||||
(pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_IND)) &&
|
||||
(pdu_adv_rx->len >= offsetof(struct pdu_adv_adv_ind, data)) &&
|
||||
(pdu_adv_rx->len <= sizeof(struct pdu_adv_adv_ind))) ||
|
||||
((pdu_adv_rx->type == PDU_ADV_TYPE_DIRECT_IND) &&
|
||||
(pdu_adv_rx->len == sizeof(struct pdu_adv_direct_ind)) &&
|
||||
@@ -1287,6 +1289,7 @@ static inline int isr_rx_pdu(struct lll_scan *lll, struct pdu_adv *pdu_adv_rx,
|
||||
pdu_adv_rx, rl_idx)) ||
|
||||
#endif /* CONFIG_BT_CTLR_ADV_EXT */
|
||||
((pdu_adv_rx->type == PDU_ADV_TYPE_SCAN_RSP) &&
|
||||
(pdu_adv_rx->len >= offsetof(struct pdu_adv_scan_rsp, data)) &&
|
||||
(pdu_adv_rx->len <= sizeof(struct pdu_adv_scan_rsp)) &&
|
||||
(lll->state != 0U) &&
|
||||
isr_scan_rsp_adva_matches(pdu_adv_rx))) &&
|
||||
@@ -1334,6 +1337,7 @@ static inline bool isr_scan_init_check(struct lll_scan *lll,
|
||||
lll_scan_adva_check(lll, pdu->tx_addr, pdu->adv_ind.addr,
|
||||
rl_idx)) &&
|
||||
(((pdu->type == PDU_ADV_TYPE_ADV_IND) &&
|
||||
(pdu->len >= offsetof(struct pdu_adv_adv_ind, data)) &&
|
||||
(pdu->len <= sizeof(struct pdu_adv_adv_ind))) ||
|
||||
((pdu->type == PDU_ADV_TYPE_DIRECT_IND) &&
|
||||
(pdu->len == sizeof(struct pdu_adv_direct_ind)) &&
|
||||
|
||||
@@ -41,6 +41,11 @@
|
||||
|
||||
struct tx_meta {
|
||||
struct bt_conn_tx *tx;
|
||||
/* This flag indicates if the current buffer has already been partially
|
||||
* sent to the controller (ie, the next fragments should be sent as
|
||||
* continuations).
|
||||
*/
|
||||
bool is_cont;
|
||||
};
|
||||
|
||||
#define tx_data(buf) ((struct tx_meta *)net_buf_user_data(buf))
|
||||
@@ -396,6 +401,8 @@ int bt_conn_send_cb(struct bt_conn *conn, struct net_buf *buf,
|
||||
tx_data(buf)->tx = NULL;
|
||||
}
|
||||
|
||||
tx_data(buf)->is_cont = false;
|
||||
|
||||
net_buf_put(&conn->tx_queue, buf);
|
||||
return 0;
|
||||
}
|
||||
@@ -464,25 +471,41 @@ static int send_iso(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
|
||||
return bt_send(buf);
|
||||
}
|
||||
|
||||
static bool send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags,
|
||||
bool always_consume)
|
||||
static inline uint16_t conn_mtu(struct bt_conn *conn)
|
||||
{
|
||||
#if defined(CONFIG_BT_BREDR)
|
||||
if (conn->type == BT_CONN_TYPE_BR || !bt_dev.le.acl_mtu) {
|
||||
return bt_dev.br.mtu;
|
||||
}
|
||||
#endif /* CONFIG_BT_BREDR */
|
||||
#if defined(CONFIG_BT_ISO)
|
||||
if (conn->type == BT_CONN_TYPE_ISO && bt_dev.le.iso_mtu) {
|
||||
return bt_dev.le.iso_mtu;
|
||||
}
|
||||
#endif /* CONFIG_BT_ISO */
|
||||
#if defined(CONFIG_BT_CONN)
|
||||
return bt_dev.le.acl_mtu;
|
||||
#else
|
||||
return 0;
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
}
|
||||
|
||||
static int do_send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
|
||||
{
|
||||
struct bt_conn_tx *tx = tx_data(buf)->tx;
|
||||
uint32_t *pending_no_cb;
|
||||
uint32_t *pending_no_cb = NULL;
|
||||
unsigned int key;
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("conn %p buf %p len %u flags 0x%02x", conn, buf, buf->len,
|
||||
flags);
|
||||
|
||||
/* Wait until the controller can accept ACL packets */
|
||||
k_sem_take(bt_conn_get_pkts(conn), K_FOREVER);
|
||||
|
||||
/* Check for disconnection while waiting for pkts_sem */
|
||||
if (conn->state != BT_CONN_CONNECTED) {
|
||||
err = -ENOTCONN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
BT_DBG("conn %p buf %p len %u flags 0x%02x", conn, buf, buf->len,
|
||||
flags);
|
||||
|
||||
/* Add to pending, it must be done before bt_buf_set_type */
|
||||
key = irq_lock();
|
||||
if (tx) {
|
||||
@@ -520,46 +543,61 @@ static bool send_frag(struct bt_conn *conn, struct net_buf *buf, uint8_t flags,
|
||||
(*pending_no_cb)--;
|
||||
}
|
||||
irq_unlock(key);
|
||||
|
||||
/* We don't want to end up in a situation where send_acl/iso
|
||||
* returns the same error code as when we don't get a buffer in
|
||||
* time.
|
||||
*/
|
||||
err = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
/* If we get here, something has seriously gone wrong:
|
||||
* We also need to destroy the `parent` buf.
|
||||
*/
|
||||
k_sem_give(bt_conn_get_pkts(conn));
|
||||
if (tx) {
|
||||
tx_free(tx);
|
||||
}
|
||||
|
||||
if (always_consume) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
return false;
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline uint16_t conn_mtu(struct bt_conn *conn)
|
||||
static int send_frag(struct bt_conn *conn,
|
||||
struct net_buf *buf, struct net_buf *frag,
|
||||
uint8_t flags)
|
||||
{
|
||||
#if defined(CONFIG_BT_BREDR)
|
||||
if (conn->type == BT_CONN_TYPE_BR || !bt_dev.le.acl_mtu) {
|
||||
return bt_dev.br.mtu;
|
||||
/* Check if the controller can accept ACL packets */
|
||||
if (k_sem_take(bt_conn_get_pkts(conn), K_NO_WAIT)) {
|
||||
BT_DBG("no controller bufs");
|
||||
return -ENOBUFS;
|
||||
}
|
||||
#endif /* CONFIG_BT_BREDR */
|
||||
#if defined(CONFIG_BT_ISO)
|
||||
if (conn->type == BT_CONN_TYPE_ISO && bt_dev.le.iso_mtu) {
|
||||
return bt_dev.le.iso_mtu;
|
||||
|
||||
/* Add the data to the buffer */
|
||||
if (frag) {
|
||||
uint16_t frag_len = MIN(conn_mtu(conn), net_buf_tailroom(frag));
|
||||
|
||||
net_buf_add_mem(frag, buf->data, frag_len);
|
||||
net_buf_pull(buf, frag_len);
|
||||
} else {
|
||||
/* De-queue the buffer now that we know we can send it.
|
||||
* Only applies if the buffer to be sent is the original buffer,
|
||||
* and not one of its fragments.
|
||||
* This buffer was fetched from the FIFO using a peek operation.
|
||||
*/
|
||||
buf = net_buf_get(&conn->tx_queue, K_NO_WAIT);
|
||||
frag = buf;
|
||||
}
|
||||
#endif /* CONFIG_BT_ISO */
|
||||
#if defined(CONFIG_BT_CONN)
|
||||
return bt_dev.le.acl_mtu;
|
||||
#else
|
||||
return 0;
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
|
||||
return do_send_frag(conn, frag, flags);
|
||||
}
|
||||
|
||||
static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf)
|
||||
{
|
||||
struct net_buf *frag;
|
||||
uint16_t frag_len;
|
||||
|
||||
switch (conn->type) {
|
||||
#if defined(CONFIG_BT_ISO)
|
||||
@@ -583,52 +621,55 @@ static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf)
|
||||
|
||||
/* Fragments never have a TX completion callback */
|
||||
tx_data(frag)->tx = NULL;
|
||||
|
||||
frag_len = MIN(conn_mtu(conn), net_buf_tailroom(frag));
|
||||
|
||||
net_buf_add_mem(frag, buf->data, frag_len);
|
||||
net_buf_pull(buf, frag_len);
|
||||
tx_data(frag)->is_cont = false;
|
||||
|
||||
return frag;
|
||||
}
|
||||
|
||||
static bool send_buf(struct bt_conn *conn, struct net_buf *buf)
|
||||
static int send_buf(struct bt_conn *conn, struct net_buf *buf)
|
||||
{
|
||||
struct net_buf *frag;
|
||||
uint8_t flags;
|
||||
int err;
|
||||
|
||||
BT_DBG("conn %p buf %p len %u", conn, buf, buf->len);
|
||||
|
||||
/* Send directly if the packet fits the ACL MTU */
|
||||
if (buf->len <= conn_mtu(conn)) {
|
||||
return send_frag(conn, buf, FRAG_SINGLE, false);
|
||||
}
|
||||
|
||||
/* Create & enqueue first fragment */
|
||||
frag = create_frag(conn, buf);
|
||||
if (!frag) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!send_frag(conn, frag, FRAG_START, true)) {
|
||||
return false;
|
||||
if (buf->len <= conn_mtu(conn) && !tx_data(buf)->is_cont) {
|
||||
BT_DBG("send single");
|
||||
return send_frag(conn, buf, NULL, FRAG_SINGLE);
|
||||
}
|
||||
|
||||
BT_DBG("start fragmenting");
|
||||
/*
|
||||
* Send the fragments. For the last one simply use the original
|
||||
* buffer (which works since we've used net_buf_pull on it.
|
||||
* buffer (which works since we've used net_buf_pull on it).
|
||||
*/
|
||||
flags = FRAG_START;
|
||||
if (tx_data(buf)->is_cont) {
|
||||
flags = FRAG_CONT;
|
||||
}
|
||||
|
||||
while (buf->len > conn_mtu(conn)) {
|
||||
frag = create_frag(conn, buf);
|
||||
if (!frag) {
|
||||
return false;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!send_frag(conn, frag, FRAG_CONT, true)) {
|
||||
return false;
|
||||
err = send_frag(conn, buf, frag, flags);
|
||||
if (err) {
|
||||
BT_DBG("%p failed, mark as existing frag", buf);
|
||||
tx_data(buf)->is_cont = flags != FRAG_START;
|
||||
net_buf_unref(frag);
|
||||
return err;
|
||||
}
|
||||
|
||||
flags = FRAG_CONT;
|
||||
}
|
||||
|
||||
return send_frag(conn, buf, FRAG_END, false);
|
||||
BT_DBG("last frag");
|
||||
tx_data(buf)->is_cont = true;
|
||||
return send_frag(conn, buf, NULL, FRAG_END);
|
||||
}
|
||||
|
||||
static struct k_poll_signal conn_change =
|
||||
@@ -674,10 +715,26 @@ static int conn_prepare_events(struct bt_conn *conn,
|
||||
|
||||
BT_DBG("Adding conn %p to poll list", conn);
|
||||
|
||||
k_poll_event_init(&events[0],
|
||||
K_POLL_TYPE_FIFO_DATA_AVAILABLE,
|
||||
K_POLL_MODE_NOTIFY_ONLY,
|
||||
&conn->tx_queue);
|
||||
bool buffers_available = k_sem_count_get(bt_conn_get_pkts(conn)) > 0;
|
||||
bool packets_waiting = !k_fifo_is_empty(&conn->tx_queue);
|
||||
|
||||
if (packets_waiting && !buffers_available) {
|
||||
/* Only resume sending when the controller has buffer space
|
||||
* available for this connection.
|
||||
*/
|
||||
BT_DBG("wait on ctlr buffers");
|
||||
k_poll_event_init(&events[0],
|
||||
K_POLL_TYPE_SEM_AVAILABLE,
|
||||
K_POLL_MODE_NOTIFY_ONLY,
|
||||
bt_conn_get_pkts(conn));
|
||||
} else {
|
||||
/* Wait until there is more data to send. */
|
||||
BT_DBG("wait on host fifo");
|
||||
k_poll_event_init(&events[0],
|
||||
K_POLL_TYPE_FIFO_DATA_AVAILABLE,
|
||||
K_POLL_MODE_NOTIFY_ONLY,
|
||||
&conn->tx_queue);
|
||||
}
|
||||
events[0].tag = BT_EVENT_CONN_TX_QUEUE;
|
||||
|
||||
return 0;
|
||||
@@ -720,6 +777,7 @@ int bt_conn_prepare_events(struct k_poll_event events[])
|
||||
void bt_conn_process_tx(struct bt_conn *conn)
|
||||
{
|
||||
struct net_buf *buf;
|
||||
int err;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
@@ -730,10 +788,26 @@ void bt_conn_process_tx(struct bt_conn *conn)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Get next ACL packet for connection */
|
||||
buf = net_buf_get(&conn->tx_queue, K_NO_WAIT);
|
||||
/* Get next ACL packet for connection. The buffer will only get dequeued
|
||||
* if there is a free controller buffer to put it in.
|
||||
*
|
||||
* Important: no operations should be done on `buf` until it is properly
|
||||
* dequeued from the FIFO, using the `net_buf_get()` API.
|
||||
*/
|
||||
buf = k_fifo_peek_head(&conn->tx_queue);
|
||||
BT_ASSERT(buf);
|
||||
if (!send_buf(conn, buf)) {
|
||||
|
||||
/* Since we used `peek`, the queue still owns the reference to the
|
||||
* buffer, so we need to take an explicit additional reference here.
|
||||
*/
|
||||
buf = net_buf_ref(buf);
|
||||
err = send_buf(conn, buf);
|
||||
net_buf_unref(buf);
|
||||
|
||||
if (err == -EIO) {
|
||||
tx_data(buf)->tx = NULL;
|
||||
|
||||
/* destroy the buffer */
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2372,6 +2372,13 @@ static void process_events(struct k_poll_event *ev, int count)
|
||||
switch (ev->state) {
|
||||
case K_POLL_STATE_SIGNALED:
|
||||
break;
|
||||
case K_POLL_STATE_SEM_AVAILABLE:
|
||||
/* After this fn is exec'd, `bt_conn_prepare_events()`
|
||||
* will be called once again, and this time buffers will
|
||||
* be available, so the FIFO will be added to the poll
|
||||
* list instead of the ctlr buffers semaphore.
|
||||
*/
|
||||
break;
|
||||
case K_POLL_STATE_FIFO_DATA_AVAILABLE:
|
||||
if (ev->tag == BT_EVENT_CMD_TX) {
|
||||
send_cmd();
|
||||
@@ -2431,6 +2438,7 @@ static void hci_tx_thread(void *p1, void *p2, void *p3)
|
||||
events[0].state = K_POLL_STATE_NOT_READY;
|
||||
ev_count = 1;
|
||||
|
||||
/* This adds the FIFO per-connection */
|
||||
if (IS_ENABLED(CONFIG_BT_CONN) || IS_ENABLED(CONFIG_BT_ISO)) {
|
||||
ev_count += bt_conn_prepare_events(&events[1]);
|
||||
}
|
||||
@@ -2503,13 +2511,15 @@ static void le_read_buffer_size_complete(struct net_buf *buf)
|
||||
BT_DBG("status 0x%02x", rp->status);
|
||||
|
||||
#if defined(CONFIG_BT_CONN)
|
||||
bt_dev.le.acl_mtu = sys_le16_to_cpu(rp->le_max_len);
|
||||
if (!bt_dev.le.acl_mtu) {
|
||||
uint16_t acl_mtu = sys_le16_to_cpu(rp->le_max_len);
|
||||
|
||||
if (!acl_mtu || !rp->le_max_num) {
|
||||
return;
|
||||
}
|
||||
|
||||
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num,
|
||||
bt_dev.le.acl_mtu);
|
||||
bt_dev.le.acl_mtu = acl_mtu;
|
||||
|
||||
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num, bt_dev.le.acl_mtu);
|
||||
|
||||
k_sem_init(&bt_dev.le.acl_pkts, rp->le_max_num, rp->le_max_num);
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
@@ -2523,25 +2533,26 @@ static void read_buffer_size_v2_complete(struct net_buf *buf)
|
||||
BT_DBG("status %u", rp->status);
|
||||
|
||||
#if defined(CONFIG_BT_CONN)
|
||||
bt_dev.le.acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
|
||||
if (!bt_dev.le.acl_mtu) {
|
||||
return;
|
||||
uint16_t acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
|
||||
|
||||
if (acl_mtu && rp->acl_max_num) {
|
||||
bt_dev.le.acl_mtu = acl_mtu;
|
||||
LOG_DBG("ACL LE buffers: pkts %u mtu %u", rp->acl_max_num, bt_dev.le.acl_mtu);
|
||||
|
||||
k_sem_init(&bt_dev.le.acl_pkts, rp->acl_max_num, rp->acl_max_num);
|
||||
}
|
||||
|
||||
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->acl_max_num,
|
||||
bt_dev.le.acl_mtu);
|
||||
|
||||
k_sem_init(&bt_dev.le.acl_pkts, rp->acl_max_num, rp->acl_max_num);
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
|
||||
bt_dev.le.iso_mtu = sys_le16_to_cpu(rp->iso_max_len);
|
||||
if (!bt_dev.le.iso_mtu) {
|
||||
uint16_t iso_mtu = sys_le16_to_cpu(rp->iso_max_len);
|
||||
|
||||
if (!iso_mtu || !rp->iso_max_num) {
|
||||
BT_ERR("ISO buffer size not set");
|
||||
return;
|
||||
}
|
||||
|
||||
BT_DBG("ISO buffers: pkts %u mtu %u", rp->iso_max_num,
|
||||
bt_dev.le.iso_mtu);
|
||||
bt_dev.le.iso_mtu = iso_mtu;
|
||||
|
||||
BT_DBG("ISO buffers: pkts %u mtu %u", rp->iso_max_num, bt_dev.le.iso_mtu);
|
||||
|
||||
k_sem_init(&bt_dev.le.iso_pkts, rp->iso_max_num, rp->iso_max_num);
|
||||
#endif /* CONFIG_BT_ISO */
|
||||
@@ -2810,6 +2821,7 @@ static int le_init_iso(void)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
read_buffer_size_v2_complete(rsp);
|
||||
|
||||
net_buf_unref(rsp);
|
||||
@@ -2823,6 +2835,7 @@ static int le_init_iso(void)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
le_read_buffer_size_complete(rsp);
|
||||
|
||||
net_buf_unref(rsp);
|
||||
@@ -2866,7 +2879,9 @@ static int le_init(void)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
le_read_buffer_size_complete(rsp);
|
||||
|
||||
net_buf_unref(rsp);
|
||||
}
|
||||
|
||||
|
||||
@@ -873,6 +873,12 @@ static void l2cap_chan_tx_process(struct k_work *work)
|
||||
if (sent < 0) {
|
||||
if (sent == -EAGAIN) {
|
||||
ch->tx_buf = buf;
|
||||
/* If we don't reschedule, and the app doesn't nudge l2cap (e.g. by
|
||||
* sending another SDU), the channel will be stuck in limbo. To
|
||||
* prevent this, we attempt to re-schedule the work item for every
|
||||
* channel on every connection when an SDU has successfully been
|
||||
* sent.
|
||||
*/
|
||||
} else {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
@@ -1693,13 +1699,20 @@ static void le_disconn_rsp(struct bt_l2cap *l2cap, uint8_t ident,
|
||||
bt_l2cap_chan_del(&chan->chan);
|
||||
}
|
||||
|
||||
static inline struct net_buf *l2cap_alloc_seg(struct net_buf *buf)
|
||||
static inline struct net_buf *l2cap_alloc_seg(struct net_buf *buf, struct bt_l2cap_le_chan *ch)
|
||||
{
|
||||
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
||||
struct net_buf *seg;
|
||||
|
||||
/* Try to use original pool if possible */
|
||||
seg = net_buf_alloc(pool, K_NO_WAIT);
|
||||
/* Use the dedicated segment callback if registered */
|
||||
if (ch->chan.ops->alloc_seg) {
|
||||
seg = ch->chan.ops->alloc_seg(&ch->chan);
|
||||
__ASSERT_NO_MSG(seg);
|
||||
} else {
|
||||
/* Try to use original pool if possible */
|
||||
seg = net_buf_alloc(pool, K_NO_WAIT);
|
||||
}
|
||||
|
||||
if (seg) {
|
||||
net_buf_reserve(seg, BT_L2CAP_CHAN_SEND_RESERVE);
|
||||
return seg;
|
||||
@@ -1736,7 +1749,8 @@ static struct net_buf *l2cap_chan_create_seg(struct bt_l2cap_le_chan *ch,
|
||||
}
|
||||
|
||||
segment:
|
||||
seg = l2cap_alloc_seg(buf);
|
||||
seg = l2cap_alloc_seg(buf, ch);
|
||||
|
||||
if (!seg) {
|
||||
return NULL;
|
||||
}
|
||||
@@ -1767,6 +1781,17 @@ static void l2cap_chan_tx_resume(struct bt_l2cap_le_chan *ch)
|
||||
k_work_submit(&ch->tx_work);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)
|
||||
static void resume_all_channels(struct bt_conn *conn, void *data)
|
||||
{
|
||||
struct bt_l2cap_chan *chan;
|
||||
|
||||
SYS_SLIST_FOR_EACH_CONTAINER(&conn->channels, chan, node) {
|
||||
l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data)
|
||||
{
|
||||
uint16_t cid = POINTER_TO_UINT(user_data);
|
||||
@@ -1784,7 +1809,15 @@ static void l2cap_chan_sdu_sent(struct bt_conn *conn, void *user_data)
|
||||
chan->ops->sent(chan);
|
||||
}
|
||||
|
||||
/* Resume the current channel */
|
||||
l2cap_chan_tx_resume(BT_L2CAP_LE_CHAN(chan));
|
||||
|
||||
if (IS_ENABLED(CONFIG_BT_L2CAP_DYNAMIC_CHANNEL)) {
|
||||
/* Resume all other channels in case one might be stuck.
|
||||
* The current channel has already been given priority.
|
||||
*/
|
||||
bt_conn_foreach(BT_CONN_TYPE_LE, resume_all_channels, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void l2cap_chan_seg_sent(struct bt_conn *conn, void *user_data)
|
||||
@@ -1872,12 +1905,12 @@ static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch,
|
||||
BT_WARN("Unable to send seg %d", err);
|
||||
atomic_inc(&ch->tx.credits);
|
||||
|
||||
/* If the segment is not the original buffer release it since it
|
||||
* won't be needed anymore.
|
||||
/* The host takes ownership of the reference in seg when
|
||||
* bt_l2cap_send_cb is successful. The call returned an error,
|
||||
* so we must get rid of the reference that was taken in
|
||||
* l2cap_chan_create_seg.
|
||||
*/
|
||||
if (seg != buf) {
|
||||
net_buf_unref(seg);
|
||||
}
|
||||
net_buf_unref(seg);
|
||||
|
||||
if (err == -ENOBUFS) {
|
||||
/* Restore state since segment could not be sent */
|
||||
@@ -2142,12 +2175,19 @@ static void l2cap_chan_send_credits(struct bt_l2cap_le_chan *chan,
|
||||
struct net_buf *buf, uint16_t credits)
|
||||
{
|
||||
struct bt_l2cap_le_credits *ev;
|
||||
uint16_t old_credits;
|
||||
|
||||
/* Cap the number of credits given */
|
||||
if (credits > chan->rx.init_credits) {
|
||||
credits = chan->rx.init_credits;
|
||||
}
|
||||
|
||||
/* Don't send back more than the initial amount. */
|
||||
old_credits = atomic_get(&chan->rx.credits);
|
||||
if (credits + old_credits > chan->rx.init_credits) {
|
||||
credits = chan->rx.init_credits - old_credits;
|
||||
}
|
||||
|
||||
buf = l2cap_create_le_sig_pdu(buf, BT_L2CAP_LE_CREDITS, get_ident(),
|
||||
sizeof(*ev));
|
||||
if (!buf) {
|
||||
@@ -2180,6 +2220,8 @@ static void l2cap_chan_update_credits(struct bt_l2cap_le_chan *chan,
|
||||
credits = ((chan->_sdu_len - net_buf_frags_len(buf)) +
|
||||
(chan->rx.mps - 1)) / chan->rx.mps;
|
||||
|
||||
BT_DBG("cred %d old %d", credits, (int)old_credits);
|
||||
|
||||
if (credits < old_credits) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -124,13 +124,22 @@ static void purge_buffers(sys_slist_t *list)
|
||||
|
||||
buf = (void *)sys_slist_get_not_empty(list);
|
||||
|
||||
buf->frags = NULL;
|
||||
buf->flags &= ~NET_BUF_FRAGS;
|
||||
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static void purge_seg_buffers(struct net_buf *buf)
|
||||
{
|
||||
/* Fragments head has always 2 references: one when allocated, one when becomes
|
||||
* fragments head.
|
||||
*/
|
||||
net_buf_unref(buf);
|
||||
|
||||
do {
|
||||
buf = net_buf_frag_del(NULL, buf);
|
||||
} while (buf != NULL);
|
||||
}
|
||||
|
||||
/* Intentionally start a little bit late into the ReceiveWindow when
|
||||
* it's large enough. This may improve reliability with some platforms,
|
||||
* like the PTS, where the receiver might not have sufficiently compensated
|
||||
@@ -166,8 +175,10 @@ static void friend_clear(struct bt_mesh_friend *frnd)
|
||||
for (i = 0; i < ARRAY_SIZE(frnd->seg); i++) {
|
||||
struct bt_mesh_friend_seg *seg = &frnd->seg[i];
|
||||
|
||||
purge_buffers(&seg->queue);
|
||||
seg->seg_count = 0U;
|
||||
if (seg->buf) {
|
||||
purge_seg_buffers(seg->buf);
|
||||
seg->seg_count = 0U;
|
||||
}
|
||||
}
|
||||
|
||||
STRUCT_SECTION_FOREACH(bt_mesh_friend_cb, cb) {
|
||||
@@ -1053,7 +1064,7 @@ init_friend:
|
||||
|
||||
static bool is_seg(struct bt_mesh_friend_seg *seg, uint16_t src, uint16_t seq_zero)
|
||||
{
|
||||
struct net_buf *buf = (void *)sys_slist_peek_head(&seg->queue);
|
||||
struct net_buf *buf = seg->buf;
|
||||
struct net_buf_simple_state state;
|
||||
uint16_t buf_seq_zero;
|
||||
uint16_t buf_src;
|
||||
@@ -1086,7 +1097,7 @@ static struct bt_mesh_friend_seg *get_seg(struct bt_mesh_friend *frnd,
|
||||
return seg;
|
||||
}
|
||||
|
||||
if (!unassigned && !sys_slist_peek_head(&seg->queue)) {
|
||||
if (!unassigned && !seg->buf) {
|
||||
unassigned = seg;
|
||||
}
|
||||
}
|
||||
@@ -1121,16 +1132,13 @@ static void enqueue_friend_pdu(struct bt_mesh_friend *frnd,
|
||||
return;
|
||||
}
|
||||
|
||||
net_buf_slist_put(&seg->queue, buf);
|
||||
seg->buf = net_buf_frag_add(seg->buf, buf);
|
||||
|
||||
if (type == BT_MESH_FRIEND_PDU_COMPLETE) {
|
||||
sys_slist_merge_slist(&frnd->queue, &seg->queue);
|
||||
net_buf_slist_put(&frnd->queue, seg->buf);
|
||||
|
||||
frnd->queue_size += seg->seg_count;
|
||||
seg->seg_count = 0U;
|
||||
} else {
|
||||
/* Mark the buffer as having more to come after it */
|
||||
buf->flags |= NET_BUF_FRAGS;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1244,6 +1252,15 @@ static void friend_timeout(struct k_work *work)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Put next segment to the friend queue. */
|
||||
if (frnd->last != net_buf_frag_last(frnd->last)) {
|
||||
struct net_buf *next;
|
||||
|
||||
next = net_buf_frag_del(NULL, frnd->last);
|
||||
net_buf_frag_add(NULL, next);
|
||||
sys_slist_prepend(&frnd->queue, &next->node);
|
||||
}
|
||||
|
||||
md = (uint8_t)(sys_slist_peek_head(&frnd->queue) != NULL);
|
||||
|
||||
update_overwrite(frnd->last, md);
|
||||
@@ -1252,10 +1269,6 @@ static void friend_timeout(struct k_work *work)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear the flag we use for segment tracking */
|
||||
frnd->last->flags &= ~NET_BUF_FRAGS;
|
||||
frnd->last->frags = NULL;
|
||||
|
||||
BT_DBG("Sending buf %p from Friend Queue of LPN 0x%04x",
|
||||
frnd->last, frnd->lpn);
|
||||
frnd->queue_size--;
|
||||
@@ -1330,16 +1343,11 @@ int bt_mesh_friend_init(void)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bt_mesh.frnd); i++) {
|
||||
struct bt_mesh_friend *frnd = &bt_mesh.frnd[i];
|
||||
int j;
|
||||
|
||||
sys_slist_init(&frnd->queue);
|
||||
|
||||
k_work_init_delayable(&frnd->timer, friend_timeout);
|
||||
k_work_init_delayable(&frnd->clear.timer, clear_timeout);
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(frnd->seg); j++) {
|
||||
sys_slist_init(&frnd->seg[j].queue);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1635,11 +1643,16 @@ static bool friend_queue_prepare_space(struct bt_mesh_friend *frnd, uint16_t add
|
||||
frnd->queue_size--;
|
||||
avail_space++;
|
||||
|
||||
pending_segments = (buf->flags & NET_BUF_FRAGS);
|
||||
if (buf != net_buf_frag_last(buf)) {
|
||||
struct net_buf *next;
|
||||
|
||||
/* Make sure old slist entry state doesn't remain */
|
||||
buf->frags = NULL;
|
||||
buf->flags &= ~NET_BUF_FRAGS;
|
||||
next = net_buf_frag_del(NULL, buf);
|
||||
|
||||
net_buf_frag_add(NULL, next);
|
||||
sys_slist_prepend(&frnd->queue, &next->node);
|
||||
|
||||
pending_segments = true;
|
||||
}
|
||||
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
@@ -1762,7 +1775,7 @@ void bt_mesh_friend_clear_incomplete(struct bt_mesh_subnet *sub, uint16_t src,
|
||||
|
||||
BT_WARN("Clearing incomplete segments for 0x%04x", src);
|
||||
|
||||
purge_buffers(&seg->queue);
|
||||
purge_seg_buffers(seg->buf);
|
||||
seg->seg_count = 0U;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -67,7 +67,10 @@ struct bt_mesh_friend {
|
||||
struct k_work_delayable timer;
|
||||
|
||||
struct bt_mesh_friend_seg {
|
||||
sys_slist_t queue;
|
||||
/* First received segment of a segmented message. Rest
|
||||
* segments are added as net_buf fragments.
|
||||
*/
|
||||
struct net_buf *buf;
|
||||
|
||||
/* The target number of segments, i.e. not necessarily
|
||||
* the current number of segments, in the queue. This is
|
||||
|
||||
@@ -907,7 +907,11 @@ static inline int send_sf(struct isotp_send_ctx *ctx)
|
||||
|
||||
frame.data[index++] = ISOTP_PCI_TYPE_SF | len;
|
||||
|
||||
__ASSERT_NO_MSG(len <= ISOTP_CAN_DL - index);
|
||||
if (len > ISOTP_CAN_DL - index) {
|
||||
LOG_ERR("SF len does not fit DL");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
memcpy(&frame.data[index], data, len);
|
||||
|
||||
#ifdef CONFIG_ISOTP_ENABLE_TX_PADDING
|
||||
@@ -1202,6 +1206,7 @@ static int send(struct isotp_send_ctx *ctx, const struct device *can_dev,
|
||||
ret = attach_fc_filter(ctx);
|
||||
if (ret) {
|
||||
LOG_ERR("Can't attach fc filter: %d", ret);
|
||||
free_send_ctx(&ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1214,6 +1219,7 @@ static int send(struct isotp_send_ctx *ctx, const struct device *can_dev,
|
||||
ret = send_sf(ctx);
|
||||
ctx->state = ISOTP_TX_WAIT_FIN;
|
||||
if (ret) {
|
||||
free_send_ctx(&ctx);
|
||||
return ret == CAN_TIMEOUT ?
|
||||
ISOTP_N_TIMEOUT_A : ISOTP_N_ERROR;
|
||||
}
|
||||
|
||||
@@ -28,6 +28,11 @@ config DEBUG_COREDUMP_BACKEND_FLASH_PARTITION
|
||||
Core dump is saved to a flash partition with DTS alias
|
||||
"coredump-partition".
|
||||
|
||||
config DEBUG_COREDUMP_BACKEND_OTHER
|
||||
bool "Backend subsystem for coredump defined out of tree"
|
||||
help
|
||||
Core dump is done via custom mechanism defined out of tree
|
||||
|
||||
endchoice
|
||||
|
||||
choice
|
||||
|
||||
@@ -513,7 +513,7 @@ static int coredump_flash_backend_cmd(enum coredump_cmd_id cmd_id,
|
||||
}
|
||||
|
||||
|
||||
struct z_coredump_backend_api z_coredump_backend_flash_partition = {
|
||||
struct coredump_backend_api coredump_backend_flash_partition = {
|
||||
.start = coredump_flash_backend_start,
|
||||
.end = coredump_flash_backend_end,
|
||||
.buffer_output = coredump_flash_backend_buffer_output,
|
||||
|
||||
@@ -116,7 +116,7 @@ static int coredump_logging_backend_cmd(enum coredump_cmd_id cmd_id,
|
||||
}
|
||||
|
||||
|
||||
struct z_coredump_backend_api z_coredump_backend_logging = {
|
||||
struct coredump_backend_api coredump_backend_logging = {
|
||||
.start = coredump_logging_backend_start,
|
||||
.end = coredump_logging_backend_end,
|
||||
.buffer_output = coredump_logging_backend_buffer_output,
|
||||
|
||||
@@ -14,13 +14,17 @@
|
||||
#include "coredump_internal.h"
|
||||
|
||||
#if defined(CONFIG_DEBUG_COREDUMP_BACKEND_LOGGING)
|
||||
extern struct z_coredump_backend_api z_coredump_backend_logging;
|
||||
static struct z_coredump_backend_api
|
||||
*backend_api = &z_coredump_backend_logging;
|
||||
extern struct coredump_backend_api coredump_backend_logging;
|
||||
static struct coredump_backend_api
|
||||
*backend_api = &coredump_backend_logging;
|
||||
#elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_FLASH_PARTITION)
|
||||
extern struct z_coredump_backend_api z_coredump_backend_flash_partition;
|
||||
static struct z_coredump_backend_api
|
||||
*backend_api = &z_coredump_backend_flash_partition;
|
||||
extern struct coredump_backend_api coredump_backend_flash_partition;
|
||||
static struct coredump_backend_api
|
||||
*backend_api = &coredump_backend_flash_partition;
|
||||
#elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_OTHER)
|
||||
extern struct coredump_backend_api coredump_backend_other;
|
||||
static struct coredump_backend_api
|
||||
*backend_api = &coredump_backend_other;
|
||||
#else
|
||||
#error "Need to select a coredump backend"
|
||||
#endif
|
||||
|
||||
@@ -53,31 +53,6 @@ void z_coredump_start(void);
|
||||
*/
|
||||
void z_coredump_end(void);
|
||||
|
||||
typedef void (*z_coredump_backend_start_t)(void);
|
||||
typedef void (*z_coredump_backend_end_t)(void);
|
||||
typedef void (*z_coredump_backend_buffer_output_t)(uint8_t *buf, size_t buflen);
|
||||
typedef int (*coredump_backend_query_t)(enum coredump_query_id query_id,
|
||||
void *arg);
|
||||
typedef int (*coredump_backend_cmd_t)(enum coredump_cmd_id cmd_id,
|
||||
void *arg);
|
||||
|
||||
struct z_coredump_backend_api {
|
||||
/* Signal to backend of the start of coredump. */
|
||||
z_coredump_backend_start_t start;
|
||||
|
||||
/* Signal to backend of the end of coredump. */
|
||||
z_coredump_backend_end_t end;
|
||||
|
||||
/* Raw buffer output */
|
||||
z_coredump_backend_buffer_output_t buffer_output;
|
||||
|
||||
/* Perform query on backend */
|
||||
coredump_backend_query_t query;
|
||||
|
||||
/* Perform command on backend */
|
||||
coredump_backend_cmd_t cmd;
|
||||
};
|
||||
|
||||
/**
|
||||
* @endcond
|
||||
*/
|
||||
|
||||
@@ -65,8 +65,15 @@ static void release_file_handle(size_t handle)
|
||||
static bool is_mount_point(const char *path)
|
||||
{
|
||||
char dir_path[PATH_MAX];
|
||||
size_t len;
|
||||
|
||||
sprintf(dir_path, "%s", path);
|
||||
len = strlen(path);
|
||||
if (len >= sizeof(dir_path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memcpy(dir_path, path, len);
|
||||
dir_path[len] = '\0';
|
||||
return strcmp(dirname(dir_path), "/") == 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1190,8 +1190,11 @@ void z_log_msg2_init(void)
|
||||
|
||||
struct log_msg2 *z_log_msg2_alloc(uint32_t wlen)
|
||||
{
|
||||
return (struct log_msg2 *)mpsc_pbuf_alloc(&log_buffer, wlen,
|
||||
K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
|
||||
return (struct log_msg2 *)mpsc_pbuf_alloc(
|
||||
&log_buffer, wlen,
|
||||
(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS == -1)
|
||||
? K_FOREVER
|
||||
: K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
|
||||
}
|
||||
|
||||
void z_log_msg2_commit(struct log_msg2 *msg)
|
||||
|
||||
@@ -17,7 +17,7 @@ config NET_BUF_USER_DATA_SIZE
|
||||
int "Size of user_data available in every network buffer"
|
||||
default 24 if MCUMGR_SMP_UDP && MCUMGR_SMP_UDP_IPV6
|
||||
default 8 if MCUMGR_SMP_UDP && MCUMGR_SMP_UDP_IPV4
|
||||
default 8 if ((BT || NET_TCP2) && 64BIT) || BT_ISO || MCUMGR_SMP_BT
|
||||
default 8 if ((BT || NET_TCP2) && 64BIT) || BT_CONN || BT_ISO
|
||||
default 4
|
||||
range 4 65535 if BT || NET_TCP2
|
||||
range 0 65535
|
||||
|
||||
@@ -405,7 +405,7 @@ struct net_buf *net_buf_get_debug(struct k_fifo *fifo, k_timeout_t timeout,
|
||||
struct net_buf *net_buf_get(struct k_fifo *fifo, k_timeout_t timeout)
|
||||
#endif
|
||||
{
|
||||
struct net_buf *buf, *frag;
|
||||
struct net_buf *buf;
|
||||
|
||||
NET_BUF_DBG("%s():%d: fifo %p", func, line, fifo);
|
||||
|
||||
@@ -416,18 +416,6 @@ struct net_buf *net_buf_get(struct k_fifo *fifo, k_timeout_t timeout)
|
||||
|
||||
NET_BUF_DBG("%s():%d: buf %p fifo %p", func, line, buf, fifo);
|
||||
|
||||
/* Get any fragments belonging to this buffer */
|
||||
for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
|
||||
frag->frags = k_fifo_get(fifo, K_NO_WAIT);
|
||||
__ASSERT_NO_MSG(frag->frags);
|
||||
|
||||
/* The fragments flag is only for FIFO-internal usage */
|
||||
frag->flags &= ~NET_BUF_FRAGS;
|
||||
}
|
||||
|
||||
/* Mark the end of the fragment list */
|
||||
frag->frags = NULL;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
@@ -451,24 +439,19 @@ void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve)
|
||||
|
||||
void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
|
||||
{
|
||||
struct net_buf *tail;
|
||||
unsigned int key;
|
||||
|
||||
__ASSERT_NO_MSG(list);
|
||||
__ASSERT_NO_MSG(buf);
|
||||
|
||||
for (tail = buf; tail->frags; tail = tail->frags) {
|
||||
tail->flags |= NET_BUF_FRAGS;
|
||||
}
|
||||
|
||||
key = irq_lock();
|
||||
sys_slist_append_list(list, &buf->node, &tail->node);
|
||||
sys_slist_append(list, &buf->node);
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
||||
struct net_buf *net_buf_slist_get(sys_slist_t *list)
|
||||
{
|
||||
struct net_buf *buf, *frag;
|
||||
struct net_buf *buf;
|
||||
unsigned int key;
|
||||
|
||||
__ASSERT_NO_MSG(list);
|
||||
@@ -477,40 +460,15 @@ struct net_buf *net_buf_slist_get(sys_slist_t *list)
|
||||
buf = (void *)sys_slist_get(list);
|
||||
irq_unlock(key);
|
||||
|
||||
if (!buf) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Get any fragments belonging to this buffer */
|
||||
for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
|
||||
key = irq_lock();
|
||||
frag->frags = (void *)sys_slist_get(list);
|
||||
irq_unlock(key);
|
||||
|
||||
__ASSERT_NO_MSG(frag->frags);
|
||||
|
||||
/* The fragments flag is only for list-internal usage */
|
||||
frag->flags &= ~NET_BUF_FRAGS;
|
||||
}
|
||||
|
||||
/* Mark the end of the fragment list */
|
||||
frag->frags = NULL;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
void net_buf_put(struct k_fifo *fifo, struct net_buf *buf)
|
||||
{
|
||||
struct net_buf *tail;
|
||||
|
||||
__ASSERT_NO_MSG(fifo);
|
||||
__ASSERT_NO_MSG(buf);
|
||||
|
||||
for (tail = buf; tail->frags; tail = tail->frags) {
|
||||
tail->flags |= NET_BUF_FRAGS;
|
||||
}
|
||||
|
||||
k_fifo_put_list(fifo, buf, tail);
|
||||
k_fifo_put(fifo, buf);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NET_BUF_LOG)
|
||||
|
||||
@@ -210,7 +210,7 @@ int net_ipv4_parse_hdr_options(struct net_pkt *pkt,
|
||||
}
|
||||
#endif
|
||||
|
||||
enum net_verdict net_ipv4_input(struct net_pkt *pkt)
|
||||
enum net_verdict net_ipv4_input(struct net_pkt *pkt, bool is_loopback)
|
||||
{
|
||||
NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access, struct net_ipv4_hdr);
|
||||
NET_PKT_DATA_ACCESS_DEFINE(udp_access, struct net_udp_hdr);
|
||||
@@ -266,6 +266,19 @@ enum net_verdict net_ipv4_input(struct net_pkt *pkt)
|
||||
net_pkt_update_length(pkt, pkt_len);
|
||||
}
|
||||
|
||||
if (!is_loopback) {
|
||||
if (net_ipv4_is_addr_loopback(&hdr->dst) ||
|
||||
net_ipv4_is_addr_loopback(&hdr->src)) {
|
||||
NET_DBG("DROP: localhost packet");
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (net_ipv4_is_my_addr(&hdr->src)) {
|
||||
NET_DBG("DROP: src addr is %s", "mine");
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (net_ipv4_is_addr_mcast(&hdr->src)) {
|
||||
NET_DBG("DROP: src addr is %s", "mcast");
|
||||
goto drop;
|
||||
|
||||
@@ -488,6 +488,11 @@ enum net_verdict net_ipv6_input(struct net_pkt *pkt, bool is_loopback)
|
||||
NET_DBG("DROP: invalid scope multicast packet");
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (net_ipv6_is_my_addr(&hdr->src)) {
|
||||
NET_DBG("DROP: src addr is %s", "mine");
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check extension headers */
|
||||
|
||||
@@ -123,7 +123,7 @@ static inline enum net_verdict process_data(struct net_pkt *pkt,
|
||||
#endif
|
||||
#if defined(CONFIG_NET_IPV4)
|
||||
case 0x40:
|
||||
return net_ipv4_input(pkt);
|
||||
return net_ipv4_input(pkt, is_loopback);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -69,12 +69,14 @@ static inline const char *net_context_state(struct net_context *context)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NET_NATIVE)
|
||||
enum net_verdict net_ipv4_input(struct net_pkt *pkt);
|
||||
enum net_verdict net_ipv4_input(struct net_pkt *pkt, bool is_loopback);
|
||||
enum net_verdict net_ipv6_input(struct net_pkt *pkt, bool is_loopback);
|
||||
#else
|
||||
static inline enum net_verdict net_ipv4_input(struct net_pkt *pkt)
|
||||
static inline enum net_verdict net_ipv4_input(struct net_pkt *pkt,
|
||||
bool is_loopback)
|
||||
{
|
||||
ARG_UNUSED(pkt);
|
||||
ARG_UNUSED(is_loopback);
|
||||
|
||||
return NET_CONTINUE;
|
||||
}
|
||||
|
||||
@@ -4214,6 +4214,74 @@ wait_reply:
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool is_pkt_part_of_slab(const struct k_mem_slab *slab, const char *ptr)
|
||||
{
|
||||
size_t last_offset = (slab->num_blocks - 1) * slab->block_size;
|
||||
size_t ptr_offset;
|
||||
|
||||
/* Check if pointer fits into slab buffer area. */
|
||||
if ((ptr < slab->buffer) || (ptr > slab->buffer + last_offset)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check if pointer offset is correct. */
|
||||
ptr_offset = ptr - slab->buffer;
|
||||
if (ptr_offset % slab->block_size != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct ctx_pkt_slab_info {
|
||||
const void *ptr;
|
||||
bool pkt_source_found;
|
||||
};
|
||||
|
||||
static void check_context_pool(struct net_context *context, void *user_data)
|
||||
{
|
||||
#if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
|
||||
if (!net_context_is_used(context)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (context->tx_slab) {
|
||||
struct ctx_pkt_slab_info *info = user_data;
|
||||
struct k_mem_slab *slab = context->tx_slab();
|
||||
|
||||
if (is_pkt_part_of_slab(slab, info->ptr)) {
|
||||
info->pkt_source_found = true;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
|
||||
}
|
||||
|
||||
static bool is_pkt_ptr_valid(const void *ptr)
|
||||
{
|
||||
struct k_mem_slab *rx, *tx;
|
||||
|
||||
net_pkt_get_info(&rx, &tx, NULL, NULL);
|
||||
|
||||
if (is_pkt_part_of_slab(rx, ptr) || is_pkt_part_of_slab(tx, ptr)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_NET_CONTEXT_NET_PKT_POOL)) {
|
||||
struct ctx_pkt_slab_info info;
|
||||
|
||||
info.ptr = ptr;
|
||||
info.pkt_source_found = false;
|
||||
|
||||
net_context_foreach(check_context_pool, &info);
|
||||
|
||||
if (info.pkt_source_found) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct net_pkt *get_net_pkt(const char *ptr_str)
|
||||
{
|
||||
uint8_t buf[sizeof(intptr_t)];
|
||||
@@ -4289,6 +4357,14 @@ static int cmd_net_pkt(const struct shell *shell, size_t argc, char *argv[])
|
||||
if (!pkt) {
|
||||
PR_ERROR("Invalid ptr value (%s). "
|
||||
"Example: 0x01020304\n", argv[1]);
|
||||
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
if (!is_pkt_ptr_valid(pkt)) {
|
||||
PR_ERROR("Pointer is not recognized as net_pkt (%s).\n",
|
||||
argv[1]);
|
||||
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
|
||||
@@ -205,8 +205,12 @@ again:
|
||||
|
||||
if (info) {
|
||||
ret = info_len + sizeof(hdr);
|
||||
ret = MIN(max_len, ret);
|
||||
memcpy(©_to[sizeof(hdr)], info, ret);
|
||||
if (ret > max_len) {
|
||||
errno = EMSGSIZE;
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(©_to[sizeof(hdr)], info, info_len);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@@ -140,7 +140,13 @@ static int cmd_kernel_threads(const struct shell *shell,
|
||||
|
||||
shell_print(shell, "Scheduler: %u since last call", sys_clock_elapsed());
|
||||
shell_print(shell, "Threads:");
|
||||
k_thread_foreach(shell_tdata_dump, (void *)shell);
|
||||
|
||||
/*
|
||||
* Use the unlocked version as the callback itself might call
|
||||
* arch_irq_unlock.
|
||||
*/
|
||||
k_thread_foreach_unlocked(shell_tdata_dump, (void *)shell);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -184,7 +190,12 @@ static int cmd_kernel_stacks(const struct shell *shell,
|
||||
|
||||
ARG_UNUSED(argc);
|
||||
ARG_UNUSED(argv);
|
||||
k_thread_foreach(shell_stack_dump, (void *)shell);
|
||||
|
||||
/*
|
||||
* Use the unlocked version as the callback itself might call
|
||||
* arch_irq_unlock.
|
||||
*/
|
||||
k_thread_foreach_unlocked(shell_stack_dump, (void *)shell);
|
||||
|
||||
/* Placeholder logic for interrupt stack until we have better
|
||||
* kernel support, including dumping arch-specific exception-related
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#define CONFIG_MP_NUM_CPUS 1
|
||||
#define CONFIG_SYS_CLOCK_TICKS_PER_SEC 100
|
||||
#define CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC 10000000
|
||||
#define CONFIG_SYS_CLOCK_MAX_TIMEOUT_DAYS 365
|
||||
#define ARCH_STACK_PTR_ALIGN 8
|
||||
/* FIXME: Properly integrate with Zephyr's arch specific code */
|
||||
#define CONFIG_X86 1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user