Compare commits
86 Commits
v3.3.0
...
v3.2-branc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab2f20ae34 | ||
|
|
eddbc823f9 | ||
|
|
05bdd419fd | ||
|
|
a413244f4a | ||
|
|
5b7742225e | ||
|
|
4831c47b48 | ||
|
|
eb637d1067 | ||
|
|
7ccd0340ee | ||
|
|
0b80bef0e3 | ||
|
|
9e27ddabc0 | ||
|
|
cf715bcad5 | ||
|
|
2f5071f886 | ||
|
|
7eb2dc8996 | ||
|
|
0da370c24d | ||
|
|
a5d8c30f91 | ||
|
|
4b55a665cc | ||
|
|
15af1b64ea | ||
|
|
e7b28e1e0b | ||
|
|
0ca7bdf91a | ||
|
|
ed7118fef0 | ||
|
|
b8c31d6c28 | ||
|
|
09dcc911b3 | ||
|
|
8f6cfb4e40 | ||
|
|
0151e0cdd9 | ||
|
|
2f32a355d9 | ||
|
|
17238d2e0e | ||
|
|
f312f7393d | ||
|
|
fa294c3b5b | ||
|
|
33a5ae7250 | ||
|
|
56cece0632 | ||
|
|
86ee3f3e61 | ||
|
|
438da4bc21 | ||
|
|
0bb84fb889 | ||
|
|
2b6e40e877 | ||
|
|
b3ab6666d3 | ||
|
|
5bce343f0a | ||
|
|
ed05fdf0e5 | ||
|
|
5eb5b3efe5 | ||
|
|
a6e658a0d5 | ||
|
|
46d06d2227 | ||
|
|
15286c5c6d | ||
|
|
2677520f19 | ||
|
|
3d56142dc4 | ||
|
|
62403eacf1 | ||
|
|
470c22017f | ||
|
|
811f0ab6a3 | ||
|
|
58cc6268b1 | ||
|
|
7c8eeb15bd | ||
|
|
fe82772048 | ||
|
|
caefa95e5e | ||
|
|
53fe4d2335 | ||
|
|
3e8f22ae31 | ||
|
|
2a9823bc9d | ||
|
|
cd74fc6f69 | ||
|
|
8fb3893c4c | ||
|
|
a284b8a1b1 | ||
|
|
9b1ffe3d0c | ||
|
|
2f318bf315 | ||
|
|
360a69822f | ||
|
|
bf16432961 | ||
|
|
a478578b9a | ||
|
|
23e2db5a9b | ||
|
|
754b2441b5 | ||
|
|
0a49fd43d6 | ||
|
|
93aee550b6 | ||
|
|
59797c25ad | ||
|
|
6955f1098e | ||
|
|
dc071bb7a7 | ||
|
|
cbce6bfd5a | ||
|
|
50f7d4ba3c | ||
|
|
b7484c0220 | ||
|
|
ae2d093866 | ||
|
|
2320b111de | ||
|
|
4024b93842 | ||
|
|
93944053f6 | ||
|
|
77b221be17 | ||
|
|
005b8af3ef | ||
|
|
94887d11af | ||
|
|
4c97854416 | ||
|
|
4cce882294 | ||
|
|
8275f20657 | ||
|
|
21230e8462 | ||
|
|
a6c099454d | ||
|
|
95b345f0b0 | ||
|
|
638735fc27 | ||
|
|
4b58ae97f3 |
4
.github/workflows/assigner.yml
vendored
4
.github/workflows/assigner.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
assignment:
|
||||
name: Pull Request Assignment
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Install Python dependencies
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
pip3 install -U PyGithub>=1.55
|
||||
|
||||
- name: Check out source code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run assignment script
|
||||
env:
|
||||
|
||||
4
.github/workflows/backport_issue_check.yml
vendored
4
.github/workflows/backport_issue_check.yml
vendored
@@ -8,11 +8,11 @@ on:
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport Issue Check
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Check out source code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
|
||||
23
.github/workflows/bluetooth-tests.yaml
vendored
23
.github/workflows/bluetooth-tests.yaml
vendored
@@ -11,24 +11,19 @@ on:
|
||||
- "soc/posix/**"
|
||||
- "arch/posix/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
bluetooth-test-prep:
|
||||
bluetooth-test:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
bluetooth-test-build:
|
||||
runs-on: ubuntu-20.04
|
||||
needs: bluetooth-test-prep
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.2
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.3
|
||||
options: '--entrypoint /bin/bash'
|
||||
env:
|
||||
ZEPHYR_TOOLCHAIN_VARIANT: zephyr
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.15.0
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
BSIM_OUT_PATH: /opt/bsim/
|
||||
BSIM_COMPONENTS_PATH: /opt/bsim/components
|
||||
EDTT_PATH: ../tools/edtt
|
||||
@@ -47,7 +42,7 @@ jobs:
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -72,7 +67,7 @@ jobs:
|
||||
|
||||
- name: Upload Test Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: bluetooth-test-results
|
||||
path: |
|
||||
@@ -81,7 +76,7 @@ jobs:
|
||||
|
||||
- name: Upload Event Details
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: event
|
||||
path: |
|
||||
|
||||
6
.github/workflows/bug_snapshot.yaml
vendored
6
.github/workflows/bug_snapshot.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
@@ -42,9 +42,9 @@ jobs:
|
||||
echo "BUGS_PICKLE_PATH=${BUGS_PICKLE_PATH}" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_BUILDS_ZEPHYR_BUG_SNAPSHOT_ACCESS_KEY_ID }}
|
||||
aws-access-key-id: ${{ vars.AWS_BUILDS_ZEPHYR_BUG_SNAPSHOT_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_BUILDS_ZEPHYR_BUG_SNAPSHOT_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
|
||||
51
.github/workflows/clang.yaml
vendored
51
.github/workflows/clang.yaml
vendored
@@ -2,29 +2,25 @@ name: Build with Clang/LLVM
|
||||
|
||||
on: pull_request_target
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
clang-build-prep:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
clang-build:
|
||||
runs-on: zephyr_runner
|
||||
needs: clang-build-prep
|
||||
runs-on: zephyr-runner-linux-x64-4xlarge
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.2
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.3
|
||||
options: '--entrypoint /bin/bash'
|
||||
volumes:
|
||||
- /home/runners/zephyrproject:/github/cache/zephyrproject
|
||||
- /repo-cache/zephyrproject:/github/cache/zephyrproject
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ["native_posix"]
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.15.0
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
LLVM_TOOLCHAIN_PATH: /usr/lib/llvm-15
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
BASE_REF: ${{ github.base_ref }}
|
||||
outputs:
|
||||
@@ -38,12 +34,14 @@ jobs:
|
||||
# GitHub comes up with a fundamental fix for this problem.
|
||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||
|
||||
- name: Cleanup
|
||||
- name: Clone cached Zephyr repository
|
||||
continue-on-error: true
|
||||
run: |
|
||||
# hotfix, until we have a better way to deal with existing data
|
||||
rm -rf zephyr zephyr-testing
|
||||
git clone --shared /github/cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -69,7 +67,7 @@ jobs:
|
||||
- name: Check Environment
|
||||
run: |
|
||||
cmake --version
|
||||
${CLANG_ROOT_DIR}/bin/clang --version
|
||||
${LLVM_TOOLCHAIN_PATH}/bin/clang --version
|
||||
gcc --version
|
||||
ls -la
|
||||
|
||||
@@ -80,16 +78,17 @@ jobs:
|
||||
string(TIMESTAMP current_date "%Y-%m-%d-%H;%M;%S" UTC)
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "repo=${repo2}\n")
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1.2.0
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-clang-${{ matrix.platform }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -108,12 +107,12 @@ jobs:
|
||||
|
||||
# We can limit scope to just what has changed
|
||||
if [ -s testplan.json ]; then
|
||||
echo "::set-output name=report_needed::1";
|
||||
echo "report_needed=1" >> $GITHUB_OUTPUT
|
||||
# Full twister but with options based on changes
|
||||
./scripts/twister --force-color --inline-logs -M -N -v --load-tests testplan.json --retry-failed 2
|
||||
else
|
||||
# if nothing is run, skip reporting step
|
||||
echo "::set-output name=report_needed::0";
|
||||
echo "report_needed=0" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: ccache stats post
|
||||
@@ -122,7 +121,7 @@ jobs:
|
||||
|
||||
- name: Upload Unit Test Results
|
||||
if: always() && steps.twister.outputs.report_needed != 0
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Unit Test Results (Subset ${{ matrix.platform }})
|
||||
path: twister-out/twister.xml
|
||||
@@ -134,7 +133,7 @@ jobs:
|
||||
if: (success() || failure() ) && needs.clang-build.outputs.report_needed != 0
|
||||
steps:
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: artifacts
|
||||
- name: Merge Test Results
|
||||
@@ -145,7 +144,7 @@ jobs:
|
||||
|
||||
- name: Upload Unit Test Results in HTML
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: HTML Unit Test Results
|
||||
if-no-files-found: ignore
|
||||
|
||||
50
.github/workflows/codecov.yaml
vendored
50
.github/workflows/codecov.yaml
vendored
@@ -4,29 +4,24 @@ on:
|
||||
schedule:
|
||||
- cron: '25 */3 * * 1-5'
|
||||
|
||||
jobs:
|
||||
codecov-prep:
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
codecov:
|
||||
runs-on: zephyr_runner
|
||||
needs: codecov-prep
|
||||
runs-on: zephyr-runner-linux-x64-4xlarge
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.2
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.3
|
||||
options: '--entrypoint /bin/bash'
|
||||
volumes:
|
||||
- /repo-cache/zephyrproject:/github/cache/zephyrproject
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ["native_posix", "qemu_x86", "unit_testing"]
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.15.0
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
steps:
|
||||
- name: Apply container owner mismatch workaround
|
||||
run: |
|
||||
@@ -40,8 +35,14 @@ jobs:
|
||||
run: |
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Clone cached Zephyr repository
|
||||
continue-on-error: true
|
||||
run: |
|
||||
git clone --shared /github/cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -53,7 +54,6 @@ jobs:
|
||||
- name: Check Environment
|
||||
run: |
|
||||
cmake --version
|
||||
${CLANG_ROOT_DIR}/bin/clang --version
|
||||
gcc --version
|
||||
ls -la
|
||||
- name: Prepare ccache keys
|
||||
@@ -62,17 +62,17 @@ jobs:
|
||||
run: |
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "repo=${repo2}\n")
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1.2.0
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_prop.outputs.repo }}-${{github.event_name}}-${{matrix.platform}}-codecov-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
|
||||
- name: Upload Coverage Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Coverage Data (Subset ${{ matrix.platform }})
|
||||
path: coverage/reports/${{ matrix.platform }}.info
|
||||
@@ -116,11 +116,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: coverage/reports
|
||||
|
||||
@@ -152,8 +152,8 @@ jobs:
|
||||
set(MERGELIST "${MERGELIST} -a ${f}")
|
||||
endif()
|
||||
endforeach()
|
||||
message("::set-output name=mergefiles::${MERGELIST}")
|
||||
message("::set-output name=covfiles::${FILELIST}")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "mergefiles=${MERGELIST}\n")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "covfiles=${FILELIST}\n")
|
||||
|
||||
- name: Merge coverage files
|
||||
run: |
|
||||
@@ -164,7 +164,7 @@ jobs:
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v2
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
directory: ./coverage/reports
|
||||
env_vars: OS,PYTHON
|
||||
|
||||
4
.github/workflows/coding_guidelines.yml
vendored
4
.github/workflows/coding_guidelines.yml
vendored
@@ -8,13 +8,13 @@ jobs:
|
||||
name: Run coding guidelines checks on patch series (PR)
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-doc-pip
|
||||
|
||||
6
.github/workflows/compliance.yml
vendored
6
.github/workflows/compliance.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
name: Check MAINTAINERS file
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -28,13 +28,13 @@ jobs:
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-doc-pip
|
||||
|
||||
8
.github/workflows/daily_test_version.yml
vendored
8
.github/workflows/daily_test_version.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: install-pip
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
pip3 install gitpython
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
16
.github/workflows/devicetree_checks.yml
vendored
16
.github/workflows/devicetree_checks.yml
vendored
@@ -6,10 +6,16 @@ name: Devicetree script tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- v*-branch
|
||||
paths:
|
||||
- 'scripts/dts/**'
|
||||
- '.github/workflows/devicetree_checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- v*-branch
|
||||
paths:
|
||||
- 'scripts/dts/**'
|
||||
- '.github/workflows/devicetree_checks.yml'
|
||||
@@ -29,14 +35,14 @@ jobs:
|
||||
python-version: 3.6
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: cache-pip-linux
|
||||
if: startsWith(runner.os, 'Linux')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
@@ -44,7 +50,7 @@ jobs:
|
||||
${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
- name: cache-pip-mac
|
||||
if: startsWith(runner.os, 'macOS')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/Library/Caches/pip
|
||||
# Trailing '-' was just to get a different cache name
|
||||
@@ -53,7 +59,7 @@ jobs:
|
||||
${{ runner.os }}-pip-${{ matrix.python-version }}-
|
||||
- name: cache-pip-win
|
||||
if: startsWith(runner.os, 'Windows')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~\AppData\Local\pip\Cache
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
|
||||
18
.github/workflows/doc-build.yml
vendored
18
.github/workflows/doc-build.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: install-pkgs
|
||||
run: |
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
echo "${PWD}/doxygen-${DOXYGEN_VERSION}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: pip-${{ hashFiles('scripts/requirements-doc.txt') }}
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
echo "Documentation will be available shortly at: ${DOC_URL}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: upload-pr-number
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
if: github.event_name == 'pull_request'
|
||||
with:
|
||||
name: pr_num
|
||||
@@ -124,19 +124,25 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: install-pkgs
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y python3-pip ninja-build doxygen graphviz librsvg2-bin
|
||||
apt-get install -y python3-pip python3-venv ninja-build doxygen graphviz librsvg2-bin
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: pip-${{ hashFiles('scripts/requirements-doc.txt') }}
|
||||
|
||||
- name: setup-venv
|
||||
run: |
|
||||
python3 -m venv .venv
|
||||
. .venv/bin/activate
|
||||
echo PATH=$PATH >> $GITHUB_ENV
|
||||
|
||||
- name: install-pip
|
||||
run: |
|
||||
pip3 install -U setuptools wheel pip
|
||||
|
||||
6
.github/workflows/doc-publish-pr.yml
vendored
6
.github/workflows/doc-publish-pr.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
|
||||
- name: Check PR number
|
||||
id: check-pr
|
||||
uses: carpentries/actions/check-valid-pr@v0.8
|
||||
uses: carpentries/actions/check-valid-pr@v0.14.0
|
||||
with:
|
||||
pr: ${{ env.PR_NUM }}
|
||||
sha: ${{ github.event.workflow_run.head_sha }}
|
||||
@@ -48,9 +48,9 @@ jobs:
|
||||
tar xf html-output/html-output.tar.xz -C html-output
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_BUILDS_ZEPHYR_PR_ACCESS_KEY_ID }}
|
||||
aws-access-key-id: ${{ vars.AWS_BUILDS_ZEPHYR_PR_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_BUILDS_ZEPHYR_PR_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
|
||||
6
.github/workflows/doc-publish.yml
vendored
6
.github/workflows/doc-publish.yml
vendored
@@ -34,10 +34,10 @@ jobs:
|
||||
tar xf html-output/html-output.tar.xz -C html-output
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_DOCS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DOCS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Upload to AWS S3
|
||||
|
||||
4
.github/workflows/errno.yml
vendored
4
.github/workflows/errno.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
check-errno:
|
||||
runs-on: ubuntu-20.04
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.2
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.3
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.15.0
|
||||
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run errno.py
|
||||
run: |
|
||||
|
||||
24
.github/workflows/footprint-tracking.yml
vendored
24
.github/workflows/footprint-tracking.yml
vendored
@@ -13,27 +13,21 @@ on:
|
||||
# same commit
|
||||
- 'v*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
footprint-tracking-cancel:
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
footprint-tracking:
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
needs: footprint-tracking-cancel
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.2
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.3
|
||||
options: '--entrypoint /bin/bash'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.15.0
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
ZEPHYR_TOOLCHAIN_VARIANT: zephyr
|
||||
steps:
|
||||
- name: Apply container owner mismatch workaround
|
||||
@@ -52,7 +46,7 @@ jobs:
|
||||
sudo pip3 install -U setuptools wheel pip gitpython
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -64,10 +58,10 @@ jobs:
|
||||
west update 2>&1 1> west.update.log || west update 2>&1 1> west.update2.log
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.FOOTPRINT_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.FOOTPRINT_AWS_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Record Footprint
|
||||
|
||||
23
.github/workflows/footprint.yml
vendored
23
.github/workflows/footprint.yml
vendored
@@ -2,34 +2,23 @@ name: Footprint Delta
|
||||
|
||||
on: pull_request
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
footprint-cancel:
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
footprint-delta:
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
needs: footprint-cancel
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.2
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.3
|
||||
options: '--entrypoint /bin/bash'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.15.0
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
ZEPHYR_TOOLCHAIN_VARIANT: zephyr
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
- name: Apply container owner mismatch workaround
|
||||
run: |
|
||||
# FIXME: The owner UID of the GITHUB_WORKSPACE directory may not
|
||||
@@ -43,7 +32,7 @@ jobs:
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
6
.github/workflows/issue_count.yml
vendored
6
.github/workflows/issue_count.yml
vendored
@@ -42,10 +42,10 @@ jobs:
|
||||
path: ${{ env.OUTPUT_FILE_NAME }}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Post Results
|
||||
|
||||
2
.github/workflows/license_check.yml
vendored
2
.github/workflows/license_check.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
with:
|
||||
directory-to-scan: 'scan/'
|
||||
- name: Artifact Upload
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: scancode
|
||||
path: ./artifacts
|
||||
|
||||
6
.github/workflows/manifest.yml
vendored
6
.github/workflows/manifest.yml
vendored
@@ -1,16 +1,14 @@
|
||||
name: Manifest
|
||||
on:
|
||||
pull_request_target:
|
||||
paths:
|
||||
- 'west.yml'
|
||||
|
||||
jobs:
|
||||
contribs:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
name: Manifest
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: zephyrproject/zephyr
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
5
.github/workflows/release.yml
vendored
5
.github/workflows/release.yml
vendored
@@ -9,13 +9,14 @@ jobs:
|
||||
release:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get the version
|
||||
id: get_version
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/}
|
||||
run: |
|
||||
echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: REUSE Compliance Check
|
||||
uses: fsfe/reuse-action@v1
|
||||
|
||||
68
.github/workflows/twister.yaml
vendored
68
.github/workflows/twister.yaml
vendored
@@ -13,24 +13,18 @@ on:
|
||||
# Run at 00:00 on Wednesday and Saturday
|
||||
- cron: '0 0 * * 3,6'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
twister-build-cleanup:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
twister-build-prep:
|
||||
|
||||
runs-on: zephyr_runner
|
||||
needs: twister-build-cleanup
|
||||
runs-on: zephyr-runner-linux-x64-4xlarge
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.2
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.3
|
||||
options: '--entrypoint /bin/bash'
|
||||
volumes:
|
||||
- /home/runners/zephyrproject:/github/cache/zephyrproject
|
||||
- /repo-cache/zephyrproject:/github/cache/zephyrproject
|
||||
outputs:
|
||||
subset: ${{ steps.output-services.outputs.subset }}
|
||||
size: ${{ steps.output-services.outputs.size }}
|
||||
@@ -40,7 +34,6 @@ jobs:
|
||||
PUSH_MATRIX_SIZE: 15
|
||||
DAILY_MATRIX_SIZE: 80
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.15.0
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
TESTS_PER_BUILDER: 700
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
BASE_REF: ${{ github.base_ref }}
|
||||
@@ -53,14 +46,16 @@ jobs:
|
||||
# GitHub comes up with a fundamental fix for this problem.
|
||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||
|
||||
- name: Cleanup
|
||||
- name: Clone cached Zephyr repository
|
||||
if: github.event_name == 'pull_request_target'
|
||||
continue-on-error: true
|
||||
run: |
|
||||
# hotfix, until we have a better way to deal with existing data
|
||||
rm -rf zephyr zephyr-testing
|
||||
git clone --shared /github/cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: Checkout
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -113,26 +108,25 @@ jobs:
|
||||
else
|
||||
size=0
|
||||
fi
|
||||
echo "::set-output name=subset::${subset}";
|
||||
echo "::set-output name=size::${size}";
|
||||
echo "::set-output name=fullrun::${TWISTER_FULL}";
|
||||
echo "subset=${subset}" >> $GITHUB_OUTPUT
|
||||
echo "size=${size}" >> $GITHUB_OUTPUT
|
||||
echo "fullrun=${TWISTER_FULL}" >> $GITHUB_OUTPUT
|
||||
|
||||
twister-build:
|
||||
runs-on: zephyr_runner
|
||||
runs-on: zephyr-runner-linux-x64-4xlarge
|
||||
needs: twister-build-prep
|
||||
if: needs.twister-build-prep.outputs.size != 0
|
||||
container:
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.2
|
||||
image: ghcr.io/zephyrproject-rtos/ci:v0.24.3
|
||||
options: '--entrypoint /bin/bash'
|
||||
volumes:
|
||||
- /home/runners/zephyrproject:/github/cache/zephyrproject
|
||||
- /repo-cache/zephyrproject:/github/cache/zephyrproject
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
subset: ${{fromJSON(needs.twister-build-prep.outputs.subset)}}
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.15.0
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
TWISTER_COMMON: ' --force-color --inline-logs -v -N -M --retry-failed 3 '
|
||||
DAILY_OPTIONS: ' -M --build-only --all'
|
||||
PR_OPTIONS: ' --clobber-output --integration'
|
||||
@@ -148,13 +142,14 @@ jobs:
|
||||
# GitHub comes up with a fundamental fix for this problem.
|
||||
git config --global --add safe.directory ${GITHUB_WORKSPACE}
|
||||
|
||||
- name: Cleanup
|
||||
- name: Clone cached Zephyr repository
|
||||
continue-on-error: true
|
||||
run: |
|
||||
# hotfix, until we have a better way to deal with existing data
|
||||
rm -rf zephyr zephyr-testing
|
||||
git clone --shared /github/cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -179,7 +174,6 @@ jobs:
|
||||
- name: Check Environment
|
||||
run: |
|
||||
cmake --version
|
||||
${CLANG_ROOT_DIR}/bin/clang --version
|
||||
gcc --version
|
||||
ls -la
|
||||
echo "github.ref: ${{ github.ref }}"
|
||||
@@ -193,18 +187,18 @@ jobs:
|
||||
string(TIMESTAMP current_date "%Y-%m-%d-%H;%M;%S" UTC)
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "repo=${repo2}\n")
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1
|
||||
uses: zephyrproject-rtos/action-s3-cache@v1.2.0
|
||||
continue-on-error: true
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-${{github.event_name}}-${{ matrix.subset }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -259,7 +253,7 @@ jobs:
|
||||
|
||||
- name: Upload Unit Test Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Unit Test Results (Subset ${{ matrix.subset }})
|
||||
if-no-files-found: ignore
|
||||
@@ -277,7 +271,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
@@ -289,7 +283,7 @@ jobs:
|
||||
|
||||
- name: Upload Unit Test Results in HTML
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: HTML Unit Test Results
|
||||
if-no-files-found: ignore
|
||||
|
||||
12
.github/workflows/twister_tests.yml
vendored
12
.github/workflows/twister_tests.yml
vendored
@@ -5,12 +5,18 @@ name: Twister TestSuite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- v*-branch
|
||||
paths:
|
||||
- 'scripts/pylib/twister/**'
|
||||
- 'scripts/twister'
|
||||
- 'scripts/tests/twister/**'
|
||||
- '.github/workflows/twister_tests.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- v*-branch
|
||||
paths:
|
||||
- 'scripts/pylib/twister/**'
|
||||
- 'scripts/twister'
|
||||
@@ -27,14 +33,14 @@ jobs:
|
||||
os: [ubuntu-20.04]
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: cache-pip-linux
|
||||
if: startsWith(runner.os, 'Linux')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
|
||||
16
.github/workflows/west_cmds.yml
vendored
16
.github/workflows/west_cmds.yml
vendored
@@ -5,11 +5,17 @@ name: Zephyr West Command Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- v*-branch
|
||||
paths:
|
||||
- 'scripts/west-commands.yml'
|
||||
- 'scripts/west_commands/**'
|
||||
- '.github/workflows/west_cmds.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- v*-branch
|
||||
paths:
|
||||
- 'scripts/west-commands.yml'
|
||||
- 'scripts/west_commands/**'
|
||||
@@ -30,14 +36,14 @@ jobs:
|
||||
python-version: 3.6
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: cache-pip-linux
|
||||
if: startsWith(runner.os, 'Linux')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
@@ -45,7 +51,7 @@ jobs:
|
||||
${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
- name: cache-pip-mac
|
||||
if: startsWith(runner.os, 'macOS')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/Library/Caches/pip
|
||||
# Trailing '-' was just to get a different cache name
|
||||
@@ -54,7 +60,7 @@ jobs:
|
||||
${{ runner.os }}-pip-${{ matrix.python-version }}-
|
||||
- name: cache-pip-win
|
||||
if: startsWith(runner.os, 'Windows')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~\AppData\Local\pip\Cache
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
|
||||
@@ -391,7 +391,9 @@ zephyr_compile_options(${COMPILER_OPT_AS_LIST})
|
||||
|
||||
# TODO: Include arch compiler options at this point.
|
||||
|
||||
if(NOT CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
||||
if(NOT CMAKE_C_COMPILER_ID STREQUAL "Clang" AND
|
||||
NOT CMAKE_C_COMPILER_ID STREQUAL "IntelLLVM" AND
|
||||
NOT CMAKE_C_COMPILER_ID STREQUAL "ARMClang")
|
||||
# GCC assumed
|
||||
zephyr_cc_option(-fno-reorder-functions)
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
zephyr,console = &usart1;
|
||||
zephyr,shell-uart = &usart1;
|
||||
zephyr,sram = &sram0;
|
||||
zephyr,flash = &flash0;
|
||||
zephyr,code-partition = &slot1_ns_partition;
|
||||
};
|
||||
|
||||
|
||||
@@ -227,14 +227,14 @@ when you invoke ``west build`` or ``cmake`` in your Zephyr application. For
|
||||
example:
|
||||
|
||||
.. zephyr-app-commands::
|
||||
:zephyr-app: samples/drivers/can
|
||||
:zephyr-app: samples/drivers/can/counter
|
||||
:tool: all
|
||||
:board: nrf52dk_nrf52832
|
||||
:shield: dfrobot_can_bus_v2_0
|
||||
:goals: build flash
|
||||
|
||||
.. zephyr-app-commands::
|
||||
:zephyr-app: samples/drivers/can
|
||||
:zephyr-app: samples/drivers/can/counter
|
||||
:tool: all
|
||||
:board: nrf52840dk_nrf52840
|
||||
:shield: keyestudio_can_bus_ks0411
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
if(DEFINED TOOLCHAIN_HOME)
|
||||
set(find_program_clang_args PATHS ${TOOLCHAIN_HOME} ${ONEAPI_PYTHON_PATH} NO_DEFAULT_PATH)
|
||||
set(find_program_clang_args PATHS ${TOOLCHAIN_HOME} ${ONEAPI_LLVM_BIN_PATH} NO_DEFAULT_PATH)
|
||||
set(find_program_binutils_args PATHS ${TOOLCHAIN_HOME} )
|
||||
endif()
|
||||
|
||||
find_package(oneApi 2023.0.0 REQUIRED)
|
||||
|
||||
find_program(CMAKE_AR llvm-ar ${find_program_clang_args} )
|
||||
find_program(CMAKE_NM llvm-nm ${find_program_clang_args} )
|
||||
if(ONEAPI_VERSION VERSION_LESS_EQUAL "2023.0.0")
|
||||
find_program(CMAKE_NM nm ${find_program_binutils_args} )
|
||||
else()
|
||||
find_program(CMAKE_NM llvm-nm ${find_program_clang_args} )
|
||||
endif()
|
||||
# In OneApi installation directory on Windows, there is no llvm-objdump
|
||||
# binary, so would better use objdump from system environment both
|
||||
# on Linux and Windows.
|
||||
|
||||
@@ -38,6 +38,8 @@ else()
|
||||
elseif(CONFIG_FP_HARDABI)
|
||||
list(APPEND TOOLCHAIN_C_FLAGS -mfloat-abi=hard)
|
||||
endif()
|
||||
else()
|
||||
list(APPEND TOOLCHAIN_C_FLAGS -mfloat-abi=soft)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ check_set_compiler_property(PROPERTY warning_base
|
||||
-Wno-main
|
||||
-Wno-unused-but-set-variable
|
||||
-Wno-typedef-redefinition
|
||||
-Wno-deprecated-non-prototype
|
||||
)
|
||||
|
||||
check_set_compiler_property(APPEND PROPERTY warning_base -Wno-pointer-sign)
|
||||
|
||||
@@ -35,8 +35,6 @@ zephyr_linker_section_configure(SECTION initshell
|
||||
zephyr_linker_section(NAME log_dynamic GROUP DATA_REGION NOINPUT)
|
||||
zephyr_linker_section_configure(SECTION log_dynamic KEEP INPUT ".log_dynamic_*")
|
||||
|
||||
zephyr_iterable_section(NAME _static_thread_data GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
|
||||
if(CONFIG_USERSPACE)
|
||||
# All kernel objects within are assumed to be either completely
|
||||
# initialized at build time, or initialized automatically at runtime
|
||||
@@ -57,6 +55,7 @@ zephyr_iterable_section(NAME k_pipe GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SU
|
||||
zephyr_iterable_section(NAME k_sem GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME k_queue GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME k_condvar GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME k_event GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
|
||||
zephyr_linker_section(NAME _net_buf_pool_area GROUP DATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_linker_section_configure(SECTION _net_buf_pool_area
|
||||
@@ -112,3 +111,22 @@ if(CONFIG_ZTEST_NEW_API)
|
||||
zephyr_iterable_section(NAME ztest_test_rule GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME ztest_expected_result_entry GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_BT_MESH_ADV_EXT)
|
||||
zephyr_iterable_section(NAME bt_mesh_ext_adv GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_LOG)
|
||||
zephyr_iterable_section(NAME log_mpsc_pbuf GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME log_msg_ptr GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_PCIE)
|
||||
zephyr_iterable_section(NAME pcie_dev GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if(CONFIG_USB_DEVICE_STACK)
|
||||
zephyr_iterable_section(NAME usb_cfg_data GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME usbd_contex GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME usbd_class_node GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
@@ -162,9 +162,6 @@ zephyr_linker_section_configure(SECTION log_strings INPUT ".log_strings*" KEEP S
|
||||
zephyr_linker_section(NAME log_const KVMA RAM_REGION GROUP RODATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT})
|
||||
zephyr_linker_section_configure(SECTION log_const INPUT ".log_const_*" KEEP SORT NAME)
|
||||
|
||||
zephyr_linker_section(NAME log_backends KVMA RAM_REGION GROUP RODATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT})
|
||||
zephyr_linker_section_configure(SECTION log_backends INPUT ".log_backends.*" KEEP)
|
||||
|
||||
zephyr_iterable_section(NAME shell KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
|
||||
zephyr_linker_section(NAME shell_root_cmds KVMA RAM_REGION GROUP RODATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT})
|
||||
@@ -181,3 +178,14 @@ zephyr_linker_section_configure(SECTION zephyr_dbg_info INPUT ".zephyr_dbg_info"
|
||||
zephyr_linker_section(NAME device_handles KVMA RAM_REGION GROUP RODATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT} ENDALIGN 16)
|
||||
zephyr_linker_section_configure(SECTION device_handles INPUT .__device_handles_pass1* KEEP SORT NAME PASS LINKER_DEVICE_HANDLES_PASS1)
|
||||
zephyr_linker_section_configure(SECTION device_handles INPUT .__device_handles_pass2* KEEP SORT NAME PASS NOT LINKER_DEVICE_HANDLES_PASS1)
|
||||
|
||||
zephyr_iterable_section(NAME _static_thread_data KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
|
||||
if (CONFIG_BT_IAS)
|
||||
zephyr_iterable_section(NAME bt_ias_cb KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
if (CONFIG_LOG)
|
||||
zephyr_iterable_section(NAME log_link KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
zephyr_iterable_section(NAME log_backend KVMA RAM_REGION GROUP RODATA_REGION SUBALIGN 4)
|
||||
endif()
|
||||
|
||||
24
cmake/modules/FindoneApi.cmake
Normal file
24
cmake/modules/FindoneApi.cmake
Normal file
@@ -0,0 +1,24 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# FindoneApi module for locating oneAPI compiler, icx.
|
||||
#
|
||||
# The module defines the following variables:
|
||||
#
|
||||
# 'oneApi_FOUND', 'ONEAPI_FOUND'
|
||||
# True if the oneApi toolchain/compiler, icx, was found.
|
||||
#
|
||||
# 'ONEAPI_VERSION'
|
||||
# The version of the oneAPI toolchain.
|
||||
|
||||
if(CMAKE_C_COMPILER)
|
||||
# Parse the 'clang --version' output to find the installed version.
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} --version OUTPUT_VARIABLE ONEAPI_VERSION)
|
||||
string(REGEX REPLACE "[^0-9]*([0-9.]+) .*" "\\1" ONEAPI_VERSION ${ONEAPI_VERSION})
|
||||
endif()
|
||||
|
||||
find_package_handle_standard_args(oneApi
|
||||
REQUIRED_VARS CMAKE_C_COMPILER
|
||||
VERSION_VAR ONEAPI_VERSION
|
||||
)
|
||||
@@ -514,7 +514,7 @@ function(zephyr_library_cc_option)
|
||||
string(MAKE_C_IDENTIFIER check${option} check)
|
||||
zephyr_check_compiler_flag(C ${option} ${check})
|
||||
|
||||
if(${check})
|
||||
if(${${check}})
|
||||
zephyr_library_compile_options(${option})
|
||||
endif()
|
||||
endforeach()
|
||||
@@ -1014,9 +1014,9 @@ endfunction()
|
||||
function(zephyr_check_compiler_flag lang option check)
|
||||
# Check if the option is covered by any hardcoded check before doing
|
||||
# an automated test.
|
||||
zephyr_check_compiler_flag_hardcoded(${lang} "${option}" check exists)
|
||||
zephyr_check_compiler_flag_hardcoded(${lang} "${option}" _${check} exists)
|
||||
if(exists)
|
||||
set(check ${check} PARENT_SCOPE)
|
||||
set(${check} ${_${check}} PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
@@ -1121,11 +1121,11 @@ function(zephyr_check_compiler_flag_hardcoded lang option check exists)
|
||||
# because they would produce a warning instead of an error during
|
||||
# the test. Exclude them by toolchain-specific blocklist.
|
||||
if((${lang} STREQUAL CXX) AND ("${option}" IN_LIST CXX_EXCLUDED_OPTIONS))
|
||||
set(check 0 PARENT_SCOPE)
|
||||
set(exists 1 PARENT_SCOPE)
|
||||
set(${check} 0 PARENT_SCOPE)
|
||||
set(${exists} 1 PARENT_SCOPE)
|
||||
else()
|
||||
# There does not exist a hardcoded check for this option.
|
||||
set(exists 0 PARENT_SCOPE)
|
||||
set(${exists} 0 PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction(zephyr_check_compiler_flag_hardcoded)
|
||||
|
||||
@@ -1968,7 +1968,7 @@ function(check_set_linker_property)
|
||||
zephyr_check_compiler_flag(C "" ${check})
|
||||
set(CMAKE_REQUIRED_FLAGS ${SAVED_CMAKE_REQUIRED_FLAGS})
|
||||
|
||||
if(${check})
|
||||
if(${${check}})
|
||||
set_property(TARGET ${LINKER_PROPERTY_TARGET} ${APPEND} PROPERTY ${property} ${option})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
@@ -10,7 +10,7 @@ endif()
|
||||
string(TOLOWER ${CMAKE_HOST_SYSTEM_NAME} system)
|
||||
if(ONEAPI_TOOLCHAIN_PATH)
|
||||
set(TOOLCHAIN_HOME ${ONEAPI_TOOLCHAIN_PATH}/compiler/latest/${system}/bin/)
|
||||
set(ONEAPI_PYTHON_PATH ${ONEAPI_TOOLCHAIN_PATH}/intelpython/latest/bin)
|
||||
set(ONEAPI_LLVM_BIN_PATH ${ONEAPI_TOOLCHAIN_PATH}/compiler/latest/${system}/bin-llvm)
|
||||
endif()
|
||||
|
||||
set(ONEAPI_TOOLCHAIN_PATH ${ONEAPI_TOOLCHAIN_PATH} CACHE PATH "oneApi install directory")
|
||||
@@ -43,4 +43,6 @@ elseif(system STREQUAL "windows")
|
||||
add_link_options(--target=${triple})
|
||||
endif()
|
||||
|
||||
set(TOOLCHAIN_HAS_NEWLIB OFF CACHE BOOL "True if toolchain supports newlib")
|
||||
|
||||
message(STATUS "Found toolchain: host (clang/ld)")
|
||||
|
||||
@@ -7,3 +7,4 @@ config CAN_SAM
|
||||
default y
|
||||
depends on DT_HAS_ATMEL_SAM_CAN_ENABLED
|
||||
select CAN_MCAN
|
||||
select CACHE_MANAGEMENT
|
||||
|
||||
@@ -1018,35 +1018,34 @@ static int mcp2515_init(const struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay)
|
||||
#define MCP2515_INIT(inst) \
|
||||
static K_KERNEL_STACK_DEFINE(mcp2515_int_thread_stack_##inst, \
|
||||
CONFIG_CAN_MCP2515_INT_THREAD_STACK_SIZE); \
|
||||
\
|
||||
static struct mcp2515_data mcp2515_data_##inst = { \
|
||||
.int_thread_stack = mcp2515_int_thread_stack_##inst, \
|
||||
.tx_busy_map = 0U, \
|
||||
.filter_usage = 0U, \
|
||||
}; \
|
||||
\
|
||||
static const struct mcp2515_config mcp2515_config_##inst = { \
|
||||
.bus = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8), 0), \
|
||||
.int_gpio = GPIO_DT_SPEC_INST_GET(inst, int_gpios), \
|
||||
.int_thread_stack_size = CONFIG_CAN_MCP2515_INT_THREAD_STACK_SIZE, \
|
||||
.int_thread_priority = CONFIG_CAN_MCP2515_INT_THREAD_PRIO, \
|
||||
.tq_sjw = DT_INST_PROP(inst, sjw), \
|
||||
.tq_prop = DT_INST_PROP_OR(inst, prop_seg, 0), \
|
||||
.tq_bs1 = DT_INST_PROP_OR(inst, phase_seg1, 0), \
|
||||
.tq_bs2 = DT_INST_PROP_OR(inst, phase_seg2, 0), \
|
||||
.bus_speed = DT_INST_PROP(inst, bus_speed), \
|
||||
.osc_freq = DT_INST_PROP(inst, osc_freq), \
|
||||
.sample_point = DT_INST_PROP_OR(inst, sample_point, 0), \
|
||||
.phy = DEVICE_DT_GET_OR_NULL(DT_INST_PHANDLE(inst, phys)), \
|
||||
.max_bitrate = DT_INST_CAN_TRANSCEIVER_MAX_BITRATE(inst, 1000000), \
|
||||
}; \
|
||||
\
|
||||
DEVICE_DT_INST_DEFINE(inst, &mcp2515_init, NULL, &mcp2515_data_##inst, \
|
||||
&mcp2515_config_##inst, POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \
|
||||
&can_api_funcs);
|
||||
|
||||
static K_KERNEL_STACK_DEFINE(mcp2515_int_thread_stack,
|
||||
CONFIG_CAN_MCP2515_INT_THREAD_STACK_SIZE);
|
||||
|
||||
static struct mcp2515_data mcp2515_data_1 = {
|
||||
.int_thread_stack = mcp2515_int_thread_stack,
|
||||
.tx_busy_map = 0U,
|
||||
.filter_usage = 0U,
|
||||
};
|
||||
|
||||
static const struct mcp2515_config mcp2515_config_1 = {
|
||||
.bus = SPI_DT_SPEC_INST_GET(0, SPI_WORD_SET(8), 0),
|
||||
.int_gpio = GPIO_DT_SPEC_INST_GET(0, int_gpios),
|
||||
.int_thread_stack_size = CONFIG_CAN_MCP2515_INT_THREAD_STACK_SIZE,
|
||||
.int_thread_priority = CONFIG_CAN_MCP2515_INT_THREAD_PRIO,
|
||||
.tq_sjw = DT_INST_PROP(0, sjw),
|
||||
.tq_prop = DT_INST_PROP_OR(0, prop_seg, 0),
|
||||
.tq_bs1 = DT_INST_PROP_OR(0, phase_seg1, 0),
|
||||
.tq_bs2 = DT_INST_PROP_OR(0, phase_seg2, 0),
|
||||
.bus_speed = DT_INST_PROP(0, bus_speed),
|
||||
.osc_freq = DT_INST_PROP(0, osc_freq),
|
||||
.sample_point = DT_INST_PROP_OR(0, sample_point, 0),
|
||||
.phy = DEVICE_DT_GET_OR_NULL(DT_INST_PHANDLE(0, phys)),
|
||||
.max_bitrate = DT_INST_CAN_TRANSCEIVER_MAX_BITRATE(0, 1000000),
|
||||
};
|
||||
|
||||
DEVICE_DT_INST_DEFINE(0, &mcp2515_init, NULL,
|
||||
&mcp2515_data_1, &mcp2515_config_1, POST_KERNEL,
|
||||
CONFIG_CAN_INIT_PRIORITY, &can_api_funcs);
|
||||
|
||||
#endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */
|
||||
DT_INST_FOREACH_STATUS_OKAY(MCP2515_INIT)
|
||||
|
||||
@@ -83,8 +83,9 @@ struct can_sja1000_config {
|
||||
.phase_seg1 = DT_PROP_OR(node_id, phase_seg1, 0), \
|
||||
.phase_seg2 = DT_PROP_OR(node_id, phase_seg2, 0), \
|
||||
.sample_point = DT_PROP_OR(node_id, sample_point, 0), \
|
||||
.max_bitrate = DT_CAN_TRANSCEIVER_MAX_BITRATE(node_id, 1000000), .ocr = _ocr, \
|
||||
.cdr = _cdr, .custom = _custom, \
|
||||
.max_bitrate = DT_CAN_TRANSCEIVER_MAX_BITRATE(node_id, 1000000), \
|
||||
.phy = DEVICE_DT_GET_OR_NULL(DT_PHANDLE(node_id, phys)), \
|
||||
.ocr = _ocr, .cdr = _cdr, .custom = _custom, \
|
||||
}
|
||||
|
||||
#define CAN_SJA1000_DT_CONFIG_INST_GET(inst, _custom, _read_reg, _write_reg, _ocr, _cdr) \
|
||||
|
||||
@@ -490,7 +490,7 @@ static void uart_console_isr(const struct device *unused, void *user_data)
|
||||
}
|
||||
|
||||
/* Handle special control characters */
|
||||
if (!isprint(byte)) {
|
||||
if (isprint(byte) == 0) {
|
||||
switch (byte) {
|
||||
case BS:
|
||||
case DEL:
|
||||
|
||||
@@ -43,6 +43,19 @@ config INTC_INIT_PRIORITY
|
||||
help
|
||||
Interrupt controller device initialization priority.
|
||||
|
||||
if MCHP_ECIA_XEC
|
||||
|
||||
config XEC_GIRQ_INIT_PRIORITY
|
||||
int "XEX GIRQ Interrupt controller init priority"
|
||||
default 41
|
||||
help
|
||||
XEC GIRQ Interrupt controller device initialization priority.
|
||||
The priority value needs to be greater than INTC_INIT_PRIORITY
|
||||
So that the XEC GIRQ controllers are initialized after the
|
||||
xec_ecia.
|
||||
|
||||
endif
|
||||
|
||||
module = INTC
|
||||
module-str = intc
|
||||
source "subsys/logging/Kconfig.template.log_config"
|
||||
|
||||
@@ -572,7 +572,7 @@ static int xec_ecia_init(const struct device *dev)
|
||||
\
|
||||
DEVICE_DT_DEFINE(n, xec_girq_init_##n, \
|
||||
NULL, &xec_data_girq_##n, &xec_config_girq_##n, \
|
||||
PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, \
|
||||
PRE_KERNEL_1, CONFIG_XEC_GIRQ_INIT_PRIORITY, \
|
||||
NULL); \
|
||||
\
|
||||
static int xec_girq_init_##n(const struct device *dev) \
|
||||
|
||||
@@ -45,6 +45,8 @@ struct mcux_ftm_capture_data {
|
||||
pwm_capture_callback_handler_t callback;
|
||||
void *user_data;
|
||||
uint32_t first_edge_overflows;
|
||||
uint16_t first_edge_cnt;
|
||||
bool first_edge_overflow;
|
||||
bool pulse_capture;
|
||||
};
|
||||
|
||||
@@ -270,8 +272,8 @@ static int mcux_ftm_disable_capture(const struct device *dev, uint32_t channel)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mcux_ftm_capture_first_edge(const struct device *dev,
|
||||
uint32_t channel)
|
||||
static void mcux_ftm_capture_first_edge(const struct device *dev, uint32_t channel,
|
||||
uint16_t cnt, bool overflow)
|
||||
{
|
||||
const struct mcux_ftm_config *config = dev->config;
|
||||
struct mcux_ftm_data *data = dev->data;
|
||||
@@ -282,11 +284,17 @@ static void mcux_ftm_capture_first_edge(const struct device *dev,
|
||||
capture = &data->capture[pair];
|
||||
|
||||
FTM_DisableInterrupts(config->base, BIT(PAIR_1ST_CH(pair)));
|
||||
|
||||
capture->first_edge_cnt = cnt;
|
||||
capture->first_edge_overflows = data->overflows;
|
||||
capture->first_edge_overflow = overflow;
|
||||
|
||||
LOG_DBG("pair = %d, 1st cnt = %u, 1st ovf = %d", pair, cnt, overflow);
|
||||
}
|
||||
|
||||
static void mcux_ftm_capture_second_edge(const struct device *dev,
|
||||
uint32_t channel)
|
||||
static void mcux_ftm_capture_second_edge(const struct device *dev, uint32_t channel,
|
||||
uint16_t cnt, bool overflow)
|
||||
|
||||
{
|
||||
const struct mcux_ftm_config *config = dev->config;
|
||||
struct mcux_ftm_data *data = dev->data;
|
||||
@@ -305,13 +313,27 @@ static void mcux_ftm_capture_second_edge(const struct device *dev,
|
||||
first_cnv = config->base->CONTROLS[PAIR_1ST_CH(pair)].CnV;
|
||||
second_cnv = config->base->CONTROLS[PAIR_2ND_CH(pair)].CnV;
|
||||
|
||||
/* Prepare for next capture */
|
||||
if (capture->param.mode == kFTM_Continuous) {
|
||||
if (capture->pulse_capture) {
|
||||
/* Clear both edge flags for pulse capture to capture first edge overflow counter */
|
||||
FTM_ClearStatusFlags(config->base, BIT(PAIR_1ST_CH(pair)) | BIT(PAIR_2ND_CH(pair)));
|
||||
} else {
|
||||
/* Only clear second edge flag for period capture as next first edge is this edge */
|
||||
FTM_ClearStatusFlags(config->base, BIT(PAIR_2ND_CH(pair)));
|
||||
}
|
||||
|
||||
/* Calculate cycles, check for overflows */
|
||||
if (unlikely(capture->first_edge_overflow && first_cnv > capture->first_edge_cnt)) {
|
||||
/* Compensate for the overflow registered in the same IRQ */
|
||||
capture->first_edge_overflows--;
|
||||
}
|
||||
|
||||
if (unlikely(overflow && second_cnv > cnt)) {
|
||||
/* Compensate for the overflow registered in the same IRQ */
|
||||
second_edge_overflows--;
|
||||
}
|
||||
|
||||
overflows = second_edge_overflows - capture->first_edge_overflows;
|
||||
|
||||
/* Calculate cycles, check for overflows */
|
||||
if (overflows > 0) {
|
||||
if (u32_mul_overflow(overflows, config->base->MOD, &cycles)) {
|
||||
LOG_ERR("overflow while calculating cycles");
|
||||
@@ -328,8 +350,10 @@ static void mcux_ftm_capture_second_edge(const struct device *dev,
|
||||
cycles = second_cnv - first_cnv;
|
||||
}
|
||||
|
||||
LOG_DBG("pair = %d, overflows = %u, cycles = %u", pair, overflows,
|
||||
cycles);
|
||||
LOG_DBG("pair = %d, 1st ovfs = %u, 2nd ovfs = %u, ovfs = %u, 1st cnv = %u, "
|
||||
"2nd cnv = %u, cycles = %u, 2nd cnt = %u, 2nd ovf = %d",
|
||||
pair, capture->first_edge_overflows, second_edge_overflows, overflows, first_cnv,
|
||||
second_cnv, cycles, cnt, overflow);
|
||||
|
||||
if (capture->pulse_capture) {
|
||||
capture->callback(dev, pair, 0, cycles, status,
|
||||
@@ -340,9 +364,16 @@ static void mcux_ftm_capture_second_edge(const struct device *dev,
|
||||
}
|
||||
|
||||
if (capture->param.mode == kFTM_OneShot) {
|
||||
/* One-shot capture done */
|
||||
FTM_DisableInterrupts(config->base, BIT(PAIR_2ND_CH(pair)));
|
||||
} else {
|
||||
} else if (capture->pulse_capture) {
|
||||
/* Prepare for first edge of next pulse capture */
|
||||
FTM_EnableInterrupts(config->base, BIT(PAIR_1ST_CH(pair)));
|
||||
} else {
|
||||
/* First edge of next period capture is second edge of this capture (this edge) */
|
||||
capture->first_edge_cnt = cnt;
|
||||
capture->first_edge_overflows = second_edge_overflows;
|
||||
capture->first_edge_overflow = false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -350,24 +381,28 @@ static void mcux_ftm_isr(const struct device *dev)
|
||||
{
|
||||
const struct mcux_ftm_config *config = dev->config;
|
||||
struct mcux_ftm_data *data = dev->data;
|
||||
bool overflow = false;
|
||||
uint32_t flags;
|
||||
uint32_t irqs;
|
||||
uint16_t cnt;
|
||||
uint32_t ch;
|
||||
|
||||
flags = FTM_GetStatusFlags(config->base);
|
||||
irqs = FTM_GetEnabledInterrupts(config->base);
|
||||
cnt = config->base->CNT;
|
||||
|
||||
if (flags & kFTM_TimeOverflowFlag) {
|
||||
data->overflows++;
|
||||
overflow = true;
|
||||
FTM_ClearStatusFlags(config->base, kFTM_TimeOverflowFlag);
|
||||
}
|
||||
|
||||
for (ch = 0; ch < MAX_CHANNELS; ch++) {
|
||||
if ((flags & BIT(ch)) && (irqs & BIT(ch))) {
|
||||
if (ch & 1) {
|
||||
mcux_ftm_capture_second_edge(dev, ch);
|
||||
mcux_ftm_capture_second_edge(dev, ch, cnt, overflow);
|
||||
} else {
|
||||
mcux_ftm_capture_first_edge(dev, ch);
|
||||
mcux_ftm_capture_first_edge(dev, ch, cnt, overflow);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ config MCUX_LPTMR_TIMER
|
||||
default y
|
||||
depends on DT_HAS_NXP_KINETIS_LPTMR_ENABLED
|
||||
depends on !COUNTER_MCUX_LPTMR
|
||||
depends on PM
|
||||
select SYSTEM_TIMER_HAS_DISABLE_SUPPORT
|
||||
help
|
||||
This module implements a kernel device driver for the NXP MCUX Low
|
||||
|
||||
@@ -237,4 +237,10 @@ config ESP32_WIFI_NET_ALLOC_SPIRAM
|
||||
Allocate memory of WiFi and NET stack in SPIRAM, increasing available RAM memory space
|
||||
for application stack.
|
||||
|
||||
config ESP_WIFI_SOFTAP_SUPPORT
|
||||
bool
|
||||
default y
|
||||
help
|
||||
Hidden option to enable Wi-Fi SoftAP functions in WPA supplicant and RF libraries.
|
||||
|
||||
endif # WIFI_ESP32
|
||||
|
||||
@@ -706,8 +706,8 @@
|
||||
* Example usage:
|
||||
*
|
||||
* @code{.c}
|
||||
* DT_PROP_HAS_NAME(nx, foos, event) // 1
|
||||
* DT_PROP_HAS_NAME(nx, foos, failure) // 0
|
||||
* DT_PROP_HAS_NAME(DT_NODELABEL(nx), foos, event) // 1
|
||||
* DT_PROP_HAS_NAME(DT_NODELABEL(nx), foos, failure) // 0
|
||||
* @endcode
|
||||
*
|
||||
* @param node_id node identifier
|
||||
|
||||
@@ -14,9 +14,4 @@
|
||||
__log_const_end = .;
|
||||
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
|
||||
|
||||
SECTION_DATA_PROLOGUE(log_backends_sections,,)
|
||||
{
|
||||
__log_backends_start = .;
|
||||
KEEP(*("._log_backend.*"));
|
||||
__log_backends_end = .;
|
||||
} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
|
||||
ITERABLE_SECTION_ROM(log_backend, 4)
|
||||
|
||||
@@ -94,9 +94,6 @@ struct log_backend {
|
||||
bool autostart;
|
||||
};
|
||||
|
||||
extern const struct log_backend __log_backends_start[];
|
||||
extern const struct log_backend __log_backends_end[];
|
||||
|
||||
/**
|
||||
* @brief Macro for creating a logger backend instance.
|
||||
*
|
||||
@@ -246,7 +243,11 @@ static inline uint8_t log_backend_id_get(const struct log_backend *const backend
|
||||
*/
|
||||
static inline const struct log_backend *log_backend_get(uint32_t idx)
|
||||
{
|
||||
return &__log_backends_start[idx];
|
||||
const struct log_backend *backend;
|
||||
|
||||
STRUCT_SECTION_GET(log_backend, idx, &backend);
|
||||
|
||||
return backend;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -256,7 +257,11 @@ static inline const struct log_backend *log_backend_get(uint32_t idx)
|
||||
*/
|
||||
static inline int log_backend_count_get(void)
|
||||
{
|
||||
return __log_backends_end - __log_backends_start;
|
||||
int cnt;
|
||||
|
||||
STRUCT_SECTION_COUNT(log_backend, &cnt);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright Runtime.io 2018. All rights reserved.
|
||||
* Copyright (c) 2022 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -69,6 +70,19 @@ typedef int zephyr_smp_transport_ud_copy_fn(struct net_buf *dst,
|
||||
*/
|
||||
typedef void zephyr_smp_transport_ud_free_fn(void *ud);
|
||||
|
||||
/** @typedef zephyr_smp_transport_query_valid_check_fn
|
||||
* @brief Function for checking if queued data is still valid.
|
||||
*
|
||||
* This function is used to check if queued SMP data is still valid e.g. on a remote device
|
||||
* disconnecting, this is triggered when ``smp_rx_clear`` is called.
|
||||
*
|
||||
* @param nb net buf containing queued request.
|
||||
* @param arg Argument provided when calling smp_rx_clear() function.
|
||||
*
|
||||
* @return false if data is no longer valid/should be freed, true otherwise.
|
||||
*/
|
||||
typedef bool zephyr_smp_transport_query_valid_check_fn(struct net_buf *nb, void *arg);
|
||||
|
||||
/**
|
||||
* @brief Provides Zephyr-specific functionality for sending SMP responses.
|
||||
*/
|
||||
@@ -83,6 +97,7 @@ struct zephyr_smp_transport {
|
||||
zephyr_smp_transport_get_mtu_fn *zst_get_mtu;
|
||||
zephyr_smp_transport_ud_copy_fn *zst_ud_copy;
|
||||
zephyr_smp_transport_ud_free_fn *zst_ud_free;
|
||||
zephyr_smp_transport_query_valid_check_fn *zst_query_valid_check;
|
||||
|
||||
#ifdef CONFIG_MCUMGR_SMP_REASSEMBLY
|
||||
/* Packet reassembly internal data, API access only */
|
||||
@@ -96,17 +111,31 @@ struct zephyr_smp_transport {
|
||||
/**
|
||||
* @brief Initializes a Zephyr SMP transport object.
|
||||
*
|
||||
* @param zst The transport to construct.
|
||||
* @param output_func The transport's send function.
|
||||
* @param get_mtu_func The transport's get-MTU function.
|
||||
* @param ud_copy_func The transport buffer user_data copy function.
|
||||
* @param ud_free_func The transport buffer user_data free function.
|
||||
* @param zst The transport to construct.
|
||||
* @param output_func The transport's send function.
|
||||
* @param get_mtu_func The transport's get-MTU function.
|
||||
* @param ud_copy_func The transport buffer user_data copy function.
|
||||
* @param ud_free_func The transport buffer user_data free function.
|
||||
* @param query_valid_check_func The transport's check function for if data should be cleared.
|
||||
*/
|
||||
void zephyr_smp_transport_init(struct zephyr_smp_transport *zst,
|
||||
zephyr_smp_transport_out_fn *output_func,
|
||||
zephyr_smp_transport_get_mtu_fn *get_mtu_func,
|
||||
zephyr_smp_transport_ud_copy_fn *ud_copy_func,
|
||||
zephyr_smp_transport_ud_free_fn *ud_free_func);
|
||||
zephyr_smp_transport_ud_free_fn *ud_free_func,
|
||||
zephyr_smp_transport_query_valid_check_fn *query_valid_check_func);
|
||||
|
||||
/**
|
||||
* @brief Used to remove queued requests for an SMP transport that are no longer valid. A
|
||||
* ``zephyr_smp_transport_query_valid_check_fn`` function must be registered for this
|
||||
* to function. If the ``zephyr_smp_transport_query_valid_check_fn`` function returns
|
||||
* false during a callback, the queried command will classed as invalid and dropped.
|
||||
*
|
||||
* @param zst The transport to use.
|
||||
* @param arg Argument provided to callback ``zephyr_smp_transport_query_valid_check_fn``
|
||||
* function.
|
||||
*/
|
||||
void smp_rx_remove_invalid(struct zephyr_smp_transport *zst, void *arg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -199,10 +199,11 @@ struct net_conn_handle;
|
||||
* anyway. This saves 12 bytes / context in IPv6.
|
||||
*/
|
||||
__net_socket struct net_context {
|
||||
/** User data.
|
||||
*
|
||||
* First member of the structure to let users either have user data
|
||||
* associated with a context, or put contexts into a FIFO.
|
||||
/** First member of the structure to allow to put contexts into a FIFO.
|
||||
*/
|
||||
void *fifo_reserved;
|
||||
|
||||
/** User data associated with a context.
|
||||
*/
|
||||
void *user_data;
|
||||
|
||||
|
||||
@@ -1430,6 +1430,10 @@ uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6);
|
||||
static inline void net_if_ipv6_set_reachable_time(struct net_if_ipv6 *ipv6)
|
||||
{
|
||||
#if defined(CONFIG_NET_NATIVE_IPV6)
|
||||
if (ipv6 == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
ipv6->reachable_time = net_if_ipv6_calc_reachable_time(ipv6);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -379,6 +379,20 @@ do { \
|
||||
_name##_buf32)))
|
||||
#endif
|
||||
|
||||
/* When the first argument of Z_CBPRINTF_STATIC_PACKAGE_GENERIC() is a
|
||||
* static memory location, some compiler warns you if you compare the
|
||||
* location against NULL. ___is_null() is used to kill this warning.
|
||||
*
|
||||
* The warnings would be visible when you built with -save-temps=obj,
|
||||
* our standard debugging tip for macro problems.
|
||||
*
|
||||
* https://github.com/zephyrproject-rtos/zephyr/issues/51528
|
||||
*/
|
||||
static ALWAYS_INLINE bool ___is_null(void *p)
|
||||
{
|
||||
return p == NULL;
|
||||
}
|
||||
|
||||
/** @brief Statically package a formatted string with arguments.
|
||||
*
|
||||
* @param buf buffer. If null then only length is calculated.
|
||||
@@ -427,7 +441,7 @@ do { \
|
||||
Z_CBPRINTF_ON_STACK_ALLOC(_ros_pos_buf, _ros_cnt); \
|
||||
uint8_t *_rws_buffer; \
|
||||
Z_CBPRINTF_ON_STACK_ALLOC(_rws_buffer, 2 * _rws_cnt); \
|
||||
size_t _pmax = (buf != NULL) ? _inlen : INT32_MAX; \
|
||||
size_t _pmax = !___is_null(buf) ? _inlen : INT32_MAX; \
|
||||
int _pkg_len = 0; \
|
||||
int _total_len = 0; \
|
||||
int _pkg_offset = _align_offset; \
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/posix/unistd.h>
|
||||
|
||||
@@ -14,8 +16,12 @@
|
||||
*/
|
||||
unsigned sleep(unsigned int seconds)
|
||||
{
|
||||
k_sleep(K_SECONDS(seconds));
|
||||
return 0;
|
||||
int rem;
|
||||
|
||||
rem = k_sleep(K_SECONDS(seconds));
|
||||
__ASSERT_NO_MSG(rem >= 0);
|
||||
|
||||
return rem / MSEC_PER_SEC;
|
||||
}
|
||||
/**
|
||||
* @brief Suspend execution for microsecond intervals.
|
||||
@@ -24,10 +30,19 @@ unsigned sleep(unsigned int seconds)
|
||||
*/
|
||||
int usleep(useconds_t useconds)
|
||||
{
|
||||
if (useconds < USEC_PER_MSEC) {
|
||||
k_busy_wait(useconds);
|
||||
} else {
|
||||
k_msleep(useconds / USEC_PER_MSEC);
|
||||
int32_t rem;
|
||||
|
||||
if (useconds >= USEC_PER_SEC) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
rem = k_usleep(useconds);
|
||||
__ASSERT_NO_MSG(rem >= 0);
|
||||
if (rem > 0) {
|
||||
/* sleep was interrupted by a call to k_wakeup() */
|
||||
errno = EINTR;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -20,7 +20,7 @@ or Nucleo-F746ZG board.
|
||||
For the NXP TWR-KE18F board:
|
||||
|
||||
.. zephyr-app-commands::
|
||||
:zephyr-app: samples/drivers/can
|
||||
:zephyr-app: samples/subsys/canbus/isotp
|
||||
:board: twr_ke18f
|
||||
:goals: build flash
|
||||
|
||||
|
||||
@@ -154,6 +154,12 @@ void main(void)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = can_start(can_dev);
|
||||
if (ret != 0) {
|
||||
printk("CAN: Failed to start device [%d]\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
tid = k_thread_create(&rx_8_0_thread_data, rx_8_0_thread_stack,
|
||||
K_THREAD_STACK_SIZEOF(rx_8_0_thread_stack),
|
||||
rx_8_0_thread, NULL, NULL, NULL,
|
||||
|
||||
@@ -18,7 +18,7 @@ CONFIG_MCUMGR_SMP_SHELL=y
|
||||
CONFIG_MCUMGR_SMP_REASSEMBLY_BT=y
|
||||
CONFIG_MCUMGR_BUF_SIZE=2475
|
||||
CONFIG_OS_MGMT_MCUMGR_PARAMS=y
|
||||
CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE=4096
|
||||
CONFIG_MCUMGR_SMP_WORKQUEUE_STACK_SIZE=4608
|
||||
|
||||
# Enable the LittleFS file system.
|
||||
CONFIG_FILE_SYSTEM=y
|
||||
@@ -30,3 +30,6 @@ CONFIG_MCUMGR_CMD_FS_MGMT=y
|
||||
# Enable the storage erase command.
|
||||
CONFIG_MCUMGR_GRP_ZEPHYR_BASIC=y
|
||||
CONFIG_MCUMGR_GRP_BASIC_CMD_STORAGE_ERASE=y
|
||||
|
||||
# Network settings
|
||||
CONFIG_NET_BUF_USER_DATA_SIZE=8
|
||||
|
||||
@@ -134,7 +134,7 @@ static int cmd_demo_getopt_ts(const struct shell *sh, size_t argc,
|
||||
shell_print(sh,
|
||||
"Option -%c requires an argument.",
|
||||
state->optopt);
|
||||
} else if (isprint(state->optopt)) {
|
||||
} else if (isprint(state->optopt) != 0) {
|
||||
shell_print(sh,
|
||||
"Unknown option `-%c'.",
|
||||
state->optopt);
|
||||
@@ -184,7 +184,7 @@ static int cmd_demo_getopt(const struct shell *sh, size_t argc,
|
||||
shell_print(sh,
|
||||
"Option -%c requires an argument.",
|
||||
optopt);
|
||||
} else if (isprint(optopt)) {
|
||||
} else if (isprint(optopt) != 0) {
|
||||
shell_print(sh, "Unknown option `-%c'.",
|
||||
optopt);
|
||||
} else {
|
||||
|
||||
@@ -7873,7 +7873,7 @@ uint16_t ull_conn_event_counter(struct ll_conn *conn)
|
||||
}
|
||||
|
||||
void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_t win_size,
|
||||
uint16_t win_offset_us, uint16_t interval, uint16_t latency,
|
||||
uint32_t win_offset_us, uint16_t interval, uint16_t latency,
|
||||
uint16_t timeout, uint16_t instant)
|
||||
{
|
||||
struct lll_conn *lll;
|
||||
|
||||
@@ -81,7 +81,7 @@ static inline void cpr_active_reset(void)
|
||||
uint16_t ull_conn_event_counter(struct ll_conn *conn);
|
||||
|
||||
void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc,
|
||||
uint8_t win_size, uint16_t win_offset_us,
|
||||
uint8_t win_size, uint32_t win_offset_us,
|
||||
uint16_t interval, uint16_t latency,
|
||||
uint16_t timeout, uint16_t instant);
|
||||
|
||||
|
||||
@@ -202,13 +202,13 @@ struct proc_ctx {
|
||||
uint8_t error;
|
||||
uint8_t rejected_opcode;
|
||||
uint8_t params_changed;
|
||||
uint16_t instant;
|
||||
uint8_t win_size;
|
||||
uint16_t win_offset_us;
|
||||
uint16_t instant;
|
||||
uint16_t interval_min;
|
||||
uint16_t interval_max;
|
||||
uint16_t latency;
|
||||
uint16_t timeout;
|
||||
uint32_t win_offset_us;
|
||||
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
|
||||
uint8_t preferred_periodicity;
|
||||
uint16_t reference_conn_event_count;
|
||||
|
||||
@@ -2577,13 +2577,15 @@ static void le_read_buffer_size_complete(struct net_buf *buf)
|
||||
BT_DBG("status 0x%02x", rp->status);
|
||||
|
||||
#if defined(CONFIG_BT_CONN)
|
||||
bt_dev.le.acl_mtu = sys_le16_to_cpu(rp->le_max_len);
|
||||
if (!bt_dev.le.acl_mtu) {
|
||||
uint16_t acl_mtu = sys_le16_to_cpu(rp->le_max_len);
|
||||
|
||||
if (!acl_mtu || !rp->le_max_num) {
|
||||
return;
|
||||
}
|
||||
|
||||
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num,
|
||||
bt_dev.le.acl_mtu);
|
||||
bt_dev.le.acl_mtu = acl_mtu;
|
||||
|
||||
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num, bt_dev.le.acl_mtu);
|
||||
|
||||
k_sem_init(&bt_dev.le.acl_pkts, rp->le_max_num, rp->le_max_num);
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
@@ -2597,25 +2599,26 @@ static void read_buffer_size_v2_complete(struct net_buf *buf)
|
||||
BT_DBG("status %u", rp->status);
|
||||
|
||||
#if defined(CONFIG_BT_CONN)
|
||||
bt_dev.le.acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
|
||||
if (!bt_dev.le.acl_mtu) {
|
||||
return;
|
||||
uint16_t acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
|
||||
|
||||
if (acl_mtu && rp->acl_max_num) {
|
||||
bt_dev.le.acl_mtu = acl_mtu;
|
||||
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->acl_max_num, bt_dev.le.acl_mtu);
|
||||
|
||||
k_sem_init(&bt_dev.le.acl_pkts, rp->acl_max_num, rp->acl_max_num);
|
||||
}
|
||||
|
||||
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->acl_max_num,
|
||||
bt_dev.le.acl_mtu);
|
||||
|
||||
k_sem_init(&bt_dev.le.acl_pkts, rp->acl_max_num, rp->acl_max_num);
|
||||
#endif /* CONFIG_BT_CONN */
|
||||
|
||||
bt_dev.le.iso_mtu = sys_le16_to_cpu(rp->iso_max_len);
|
||||
if (!bt_dev.le.iso_mtu) {
|
||||
uint16_t iso_mtu = sys_le16_to_cpu(rp->iso_max_len);
|
||||
|
||||
if (!iso_mtu || !rp->iso_max_num) {
|
||||
BT_ERR("ISO buffer size not set");
|
||||
return;
|
||||
}
|
||||
|
||||
BT_DBG("ISO buffers: pkts %u mtu %u", rp->iso_max_num,
|
||||
bt_dev.le.iso_mtu);
|
||||
bt_dev.le.iso_mtu = iso_mtu;
|
||||
|
||||
BT_DBG("ISO buffers: pkts %u mtu %u", rp->iso_max_num, bt_dev.le.iso_mtu);
|
||||
|
||||
k_sem_init(&bt_dev.le.iso_pkts, rp->iso_max_num, rp->iso_max_num);
|
||||
#endif /* CONFIG_BT_ISO */
|
||||
@@ -2885,6 +2888,7 @@ static int le_init_iso(void)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
read_buffer_size_v2_complete(rsp);
|
||||
|
||||
net_buf_unref(rsp);
|
||||
@@ -2898,6 +2902,7 @@ static int le_init_iso(void)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
le_read_buffer_size_complete(rsp);
|
||||
|
||||
net_buf_unref(rsp);
|
||||
@@ -2941,7 +2946,9 @@ static int le_init(void)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
le_read_buffer_size_complete(rsp);
|
||||
|
||||
net_buf_unref(rsp);
|
||||
}
|
||||
|
||||
|
||||
@@ -41,11 +41,9 @@ static const char * const severity_lvls_sorted[] = {
|
||||
*/
|
||||
static const struct log_backend *backend_find(char const *name)
|
||||
{
|
||||
const struct log_backend *backend;
|
||||
size_t slen = strlen(name);
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
backend = log_backend_get(i);
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (strncmp(name, backend->name, slen) == 0) {
|
||||
return backend;
|
||||
}
|
||||
@@ -343,13 +341,7 @@ static int cmd_log_backend_go(const struct shell *shell,
|
||||
static int cmd_log_backends_list(const struct shell *shell,
|
||||
size_t argc, char **argv)
|
||||
{
|
||||
int backend_count;
|
||||
|
||||
backend_count = log_backend_count_get();
|
||||
|
||||
for (int i = 0; i < backend_count; i++) {
|
||||
const struct log_backend *backend = log_backend_get(i);
|
||||
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
shell_fprintf(shell, SHELL_NORMAL,
|
||||
"%s\r\n"
|
||||
"\t- Status: %s\r\n"
|
||||
@@ -410,9 +402,7 @@ static void backend_name_get(size_t idx, struct shell_static_entry *entry)
|
||||
entry->subcmd = &sub_log_backend;
|
||||
entry->syntax = NULL;
|
||||
|
||||
if (idx < log_backend_count_get()) {
|
||||
const struct log_backend *backend = log_backend_get(idx);
|
||||
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
entry->syntax = backend->name;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,11 +159,9 @@ static void z_log_msg_post_finalize(void)
|
||||
|
||||
const struct log_backend *log_format_set_all_active_backends(size_t log_type)
|
||||
{
|
||||
const struct log_backend *backend;
|
||||
const struct log_backend *failed_backend = NULL;
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
backend = log_backend_get(i);
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (log_backend_is_active(backend)) {
|
||||
int retCode = log_backend_format_set(backend, log_type);
|
||||
|
||||
@@ -262,16 +260,15 @@ static uint32_t z_log_init(bool blocking, bool can_sleep)
|
||||
}
|
||||
|
||||
__ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_NUM_OF_SLOTS);
|
||||
int i;
|
||||
|
||||
if (atomic_inc(&initialized) != 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Assign ids to backends. */
|
||||
for (i = 0; i < log_backend_count_get(); i++) {
|
||||
const struct log_backend *backend = log_backend_get(i);
|
||||
int i = 0;
|
||||
|
||||
/* Assign ids to backends. */
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (backend->autostart) {
|
||||
log_backend_init(backend);
|
||||
|
||||
@@ -285,6 +282,8 @@ static uint32_t z_log_init(bool blocking, bool can_sleep)
|
||||
} else {
|
||||
mask |= BIT(i);
|
||||
}
|
||||
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -346,8 +345,6 @@ int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
|
||||
|
||||
void z_impl_log_panic(void)
|
||||
{
|
||||
struct log_backend const *backend;
|
||||
|
||||
if (panic_mode) {
|
||||
return;
|
||||
}
|
||||
@@ -364,9 +361,7 @@ void z_impl_log_panic(void)
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
backend = log_backend_get(i);
|
||||
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (log_backend_is_active(backend)) {
|
||||
log_backend_panic(backend);
|
||||
}
|
||||
@@ -425,10 +420,7 @@ static bool msg_filter_check(struct log_backend const *backend,
|
||||
|
||||
static void msg_process(union log_msg_generic *msg)
|
||||
{
|
||||
struct log_backend const *backend;
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
backend = log_backend_get(i);
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (log_backend_is_active(backend) &&
|
||||
msg_filter_check(backend, msg)) {
|
||||
log_backend_msg_process(backend, msg);
|
||||
@@ -440,9 +432,7 @@ void dropped_notify(void)
|
||||
{
|
||||
uint32_t dropped = z_log_dropped_read_and_clear();
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
struct log_backend const *backend = log_backend_get(i);
|
||||
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (log_backend_is_active(backend)) {
|
||||
log_backend_dropped(backend, dropped);
|
||||
}
|
||||
@@ -638,9 +628,7 @@ int log_mem_get_max_usage(uint32_t *max)
|
||||
static void log_backend_notify_all(enum log_backend_evt event,
|
||||
union log_backend_evt_arg *arg)
|
||||
{
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
const struct log_backend *backend = log_backend_get(i);
|
||||
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
log_backend_notify(backend, event, arg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,12 +104,10 @@ uint32_t z_impl_log_filter_set(struct log_backend const *const backend,
|
||||
uint32_t *filters = z_log_dynamic_filters_get(source_id);
|
||||
|
||||
if (backend == NULL) {
|
||||
struct log_backend const *iter_backend;
|
||||
uint32_t max = 0U;
|
||||
uint32_t current;
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
iter_backend = log_backend_get(i);
|
||||
STRUCT_SECTION_FOREACH(log_backend, iter_backend) {
|
||||
current = log_filter_set(iter_backend,
|
||||
domain_id,
|
||||
source_id, level);
|
||||
@@ -174,14 +172,12 @@ static void backend_filter_set(struct log_backend const *const backend,
|
||||
|
||||
const struct log_backend *log_backend_get_by_name(const char *backend_name)
|
||||
{
|
||||
const struct log_backend *ptr = __log_backends_start;
|
||||
|
||||
while (ptr < __log_backends_end) {
|
||||
if (strcmp(backend_name, ptr->name) == 0) {
|
||||
return ptr;
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (strcmp(backend_name, backend->name) == 0) {
|
||||
return backend;
|
||||
}
|
||||
ptr++;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -393,7 +393,7 @@ static void hexdump_line_print(const struct log_output *output,
|
||||
unsigned char c = (unsigned char)data[i];
|
||||
|
||||
print_formatted(output, "%c",
|
||||
isprint((int)c) ? c : '.');
|
||||
isprint((int)c) != 0 ? c : '.');
|
||||
} else {
|
||||
print_formatted(output, " ");
|
||||
}
|
||||
|
||||
@@ -239,6 +239,15 @@ img_mgmt_find_by_hash(uint8_t *find, struct image_version *ver)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resets upload status to defaults (no upload in progress)
|
||||
*/
|
||||
void img_mgmt_reset_upload(void)
|
||||
{
|
||||
memset(&g_img_mgmt_state, 0, sizeof(g_img_mgmt_state));
|
||||
g_img_mgmt_state.area_id = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Command handler: image erase
|
||||
*/
|
||||
@@ -279,6 +288,8 @@ img_mgmt_erase(struct mgmt_ctxt *ctxt)
|
||||
}
|
||||
|
||||
rc = img_mgmt_impl_erase_slot(slot);
|
||||
img_mgmt_reset_upload();
|
||||
|
||||
if (rc != 0) {
|
||||
img_mgmt_dfu_stopped();
|
||||
return rc;
|
||||
@@ -362,6 +373,7 @@ img_mgmt_upload(struct mgmt_ctxt *ctxt)
|
||||
int rc;
|
||||
struct img_mgmt_upload_action action;
|
||||
bool last = false;
|
||||
bool reset = false;
|
||||
|
||||
struct zcbor_map_decode_key_val image_upload_decode[] = {
|
||||
ZCBOR_MAP_DECODE_KEY_VAL(image, zcbor_uint32_decode, &req.image),
|
||||
@@ -460,9 +472,9 @@ img_mgmt_upload(struct mgmt_ctxt *ctxt)
|
||||
} else {
|
||||
/* Write failed, currently not able to recover from this */
|
||||
cmd_status_arg.status = IMG_MGMT_ID_UPLOAD_STATUS_COMPLETE;
|
||||
g_img_mgmt_state.area_id = -1;
|
||||
IMG_MGMT_UPLOAD_ACTION_SET_RC_RSN(&action,
|
||||
img_mgmt_err_str_flash_write_failed);
|
||||
reset = true;
|
||||
goto end;
|
||||
}
|
||||
|
||||
@@ -470,7 +482,7 @@ img_mgmt_upload(struct mgmt_ctxt *ctxt)
|
||||
/* Done */
|
||||
img_mgmt_dfu_pending();
|
||||
cmd_status_arg.status = IMG_MGMT_ID_UPLOAD_STATUS_COMPLETE;
|
||||
g_img_mgmt_state.area_id = -1;
|
||||
reset = true;
|
||||
}
|
||||
}
|
||||
end:
|
||||
@@ -484,7 +496,14 @@ end:
|
||||
return rc;
|
||||
}
|
||||
|
||||
return img_mgmt_upload_good_rsp(ctxt);
|
||||
rc = img_mgmt_upload_good_rsp(ctxt);
|
||||
|
||||
if (reset) {
|
||||
/* Reset the upload state struct back to default */
|
||||
img_mgmt_reset_upload();
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright Runtime.io 2018. All rights reserved.
|
||||
* Copyright (c) 2021 Nordic Semiconductor ASA
|
||||
* Copyright (c) 2021-2022 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -147,13 +147,15 @@ zephyr_smp_transport_init(struct zephyr_smp_transport *zst,
|
||||
zephyr_smp_transport_out_fn *output_func,
|
||||
zephyr_smp_transport_get_mtu_fn *get_mtu_func,
|
||||
zephyr_smp_transport_ud_copy_fn *ud_copy_func,
|
||||
zephyr_smp_transport_ud_free_fn *ud_free_func)
|
||||
zephyr_smp_transport_ud_free_fn *ud_free_func,
|
||||
zephyr_smp_transport_query_valid_check_fn *query_valid_check_func)
|
||||
{
|
||||
*zst = (struct zephyr_smp_transport) {
|
||||
.zst_output = output_func,
|
||||
.zst_get_mtu = get_mtu_func,
|
||||
.zst_ud_copy = ud_copy_func,
|
||||
.zst_ud_free = ud_free_func,
|
||||
.zst_query_valid_check = query_valid_check_func,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MCUMGR_SMP_REASSEMBLY
|
||||
@@ -164,6 +166,45 @@ zephyr_smp_transport_init(struct zephyr_smp_transport *zst,
|
||||
k_fifo_init(&zst->zst_fifo);
|
||||
}
|
||||
|
||||
void smp_rx_remove_invalid(struct zephyr_smp_transport *zst, void *arg)
|
||||
{
|
||||
struct net_buf *nb;
|
||||
struct k_fifo temp_fifo;
|
||||
|
||||
if (zst->zst_query_valid_check == NULL) {
|
||||
/* No check check function registered, abort check */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Cancel current work-queue if ongoing */
|
||||
if (k_work_busy_get(&zst->zst_work) & (K_WORK_RUNNING | K_WORK_QUEUED)) {
|
||||
k_work_cancel(&zst->zst_work);
|
||||
}
|
||||
|
||||
/* Run callback function and remove all buffers that are no longer needed. Store those
|
||||
* that are in a temporary FIFO
|
||||
*/
|
||||
k_fifo_init(&temp_fifo);
|
||||
|
||||
while ((nb = net_buf_get(&zst->zst_fifo, K_NO_WAIT)) != NULL) {
|
||||
if (zst->zst_query_valid_check(nb, arg)) {
|
||||
zephyr_smp_free_buf(nb, zst);
|
||||
} else {
|
||||
net_buf_put(&temp_fifo, nb);
|
||||
}
|
||||
}
|
||||
|
||||
/* Re-insert the remaining queued operations into the original FIFO */
|
||||
while ((nb = net_buf_get(&temp_fifo, K_NO_WAIT)) != NULL) {
|
||||
net_buf_put(&zst->zst_fifo, nb);
|
||||
}
|
||||
|
||||
/* If at least one entry remains, queue the workqueue for running */
|
||||
if (!k_fifo_is_empty(&zst->zst_fifo)) {
|
||||
k_work_submit_to_queue(&smp_work_queue, &zst->zst_work);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Enqueues an incoming SMP request packet for processing.
|
||||
*
|
||||
|
||||
@@ -30,13 +30,15 @@ config MCUMGR_BUF_USER_DATA_SIZE
|
||||
int "Size of mcumgr buffer user data"
|
||||
default 24 if MCUMGR_SMP_UDP && MCUMGR_SMP_UDP_IPV6
|
||||
default 8 if MCUMGR_SMP_UDP && MCUMGR_SMP_UDP_IPV4
|
||||
default 8 if MCUMGR_SMP_BT
|
||||
default 4
|
||||
help
|
||||
The size, in bytes, of user data to allocate for each mcumgr buffer.
|
||||
|
||||
Different mcumgr transports impose different requirements for this
|
||||
setting. A value of 4 is sufficient for UART, shell, and bluetooth.
|
||||
For UDP, the userdata must be large enough to hold a IPv4/IPv6 address.
|
||||
setting. A value of 4 is sufficient for UART and shell, a value of 8
|
||||
is sufficient for Bluetooth. For UDP, the userdata must be large
|
||||
enough to hold a IPv4/IPv6 address.
|
||||
|
||||
rsource "Kconfig.dummy"
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright Runtime.io 2018. All rights reserved.
|
||||
* Copyright (c) 2022 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@@ -63,8 +64,13 @@ BUILD_ASSERT((CONFIG_MCUMGR_SMP_BT_CONN_PARAM_CONTROL_TIMEOUT * 4U) >
|
||||
|
||||
struct smp_bt_user_data {
|
||||
struct bt_conn *conn;
|
||||
uint8_t id;
|
||||
};
|
||||
|
||||
/* Verification of user data being able to fit */
|
||||
BUILD_ASSERT(sizeof(struct smp_bt_user_data) <= CONFIG_MCUMGR_BUF_USER_DATA_SIZE,
|
||||
"CONFIG_MCUMGR_BUF_USER_DATA_SIZE not large enough to fit Bluetooth user data");
|
||||
|
||||
enum {
|
||||
CONN_PARAM_SMP_REQUESTED = BIT(0),
|
||||
};
|
||||
@@ -74,13 +80,14 @@ struct conn_param_data {
|
||||
struct k_work_delayable dwork;
|
||||
struct k_work_delayable ework;
|
||||
uint8_t state;
|
||||
uint8_t id;
|
||||
struct k_sem smp_notify_sem;
|
||||
};
|
||||
|
||||
static uint8_t next_id;
|
||||
static struct zephyr_smp_transport smp_bt_transport;
|
||||
static struct conn_param_data conn_data[CONFIG_BT_MAX_CONN];
|
||||
|
||||
K_SEM_DEFINE(smp_notify_sem, 0, 1);
|
||||
|
||||
/* SMP service.
|
||||
* {8D53DC1D-1DB7-4CD3-868B-8A527460AA84}
|
||||
*/
|
||||
@@ -93,20 +100,39 @@ static struct bt_uuid_128 smp_bt_svc_uuid = BT_UUID_INIT_128(
|
||||
static struct bt_uuid_128 smp_bt_chr_uuid = BT_UUID_INIT_128(
|
||||
BT_UUID_128_ENCODE(0xda2e7828, 0xfbce, 0x4e01, 0xae9e, 0x261174997c48));
|
||||
|
||||
/* SMP Bluetooth notification sent callback */
|
||||
static void smp_notify_finished(struct bt_conn *conn, void *user_data)
|
||||
{
|
||||
k_sem_give(&smp_notify_sem);
|
||||
}
|
||||
|
||||
/* Helper function that allocates conn_param_data for a conn. */
|
||||
static struct conn_param_data *conn_param_data_alloc(struct bt_conn *conn)
|
||||
{
|
||||
for (size_t i = 0; i < ARRAY_SIZE(conn_data); i++) {
|
||||
if (conn_data[i].conn == NULL) {
|
||||
bool valid = false;
|
||||
|
||||
conn_data[i].conn = conn;
|
||||
return &conn_data[i];
|
||||
|
||||
/* Generate an ID for this connection and reset semaphore */
|
||||
while (!valid) {
|
||||
valid = true;
|
||||
conn_data[i].id = next_id;
|
||||
++next_id;
|
||||
|
||||
if (next_id == 0) {
|
||||
/* Avoid use of 0 (invalid ID) */
|
||||
++next_id;
|
||||
}
|
||||
|
||||
for (size_t l = 0; l < ARRAY_SIZE(conn_data); l++) {
|
||||
if (l != i && conn_data[l].conn != NULL &&
|
||||
conn_data[l].id == conn_data[i].id) {
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
k_sem_reset(&conn_data[i].smp_notify_sem);
|
||||
|
||||
return &conn_data[i];
|
||||
}
|
||||
|
||||
/* Conn data must exists. */
|
||||
@@ -123,34 +149,45 @@ static struct conn_param_data *conn_param_data_get(const struct bt_conn *conn)
|
||||
}
|
||||
}
|
||||
|
||||
/* Conn data must exists. */
|
||||
__ASSERT_NO_MSG(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* SMP Bluetooth notification sent callback */
|
||||
static void smp_notify_finished(struct bt_conn *conn, void *user_data)
|
||||
{
|
||||
struct conn_param_data *cpd = conn_param_data_get(conn);
|
||||
|
||||
if (cpd != NULL) {
|
||||
k_sem_give(&cpd->smp_notify_sem);
|
||||
}
|
||||
}
|
||||
|
||||
/* Sets connection parameters for a given conn. */
|
||||
static void conn_param_set(struct bt_conn *conn, struct bt_le_conn_param *param)
|
||||
{
|
||||
int ret = 0;
|
||||
struct conn_param_data *cpd = conn_param_data_get(conn);
|
||||
|
||||
ret = bt_conn_le_param_update(conn, param);
|
||||
if (ret && (ret != -EALREADY)) {
|
||||
/* Try again to avoid being stuck with incorrect connection parameters. */
|
||||
(void)k_work_reschedule(&cpd->ework, K_MSEC(RETRY_TIME));
|
||||
} else {
|
||||
(void)k_work_cancel_delayable(&cpd->ework);
|
||||
if (cpd != NULL) {
|
||||
ret = bt_conn_le_param_update(conn, param);
|
||||
if (ret && (ret != -EALREADY)) {
|
||||
/* Try again to avoid being stuck with incorrect connection parameters. */
|
||||
(void)k_work_reschedule(&cpd->ework, K_MSEC(RETRY_TIME));
|
||||
} else {
|
||||
(void)k_work_cancel_delayable(&cpd->ework);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Work handler function for restoring the preferred connection parameters for the connection. */
|
||||
static void conn_param_on_pref_restore(struct k_work *work)
|
||||
{
|
||||
struct conn_param_data *cpd = CONTAINER_OF(work, struct conn_param_data, dwork);
|
||||
|
||||
conn_param_set(cpd->conn, CONN_PARAM_PREF);
|
||||
cpd->state &= ~CONN_PARAM_SMP_REQUESTED;
|
||||
if (cpd != NULL) {
|
||||
conn_param_set(cpd->conn, CONN_PARAM_PREF);
|
||||
cpd->state &= ~CONN_PARAM_SMP_REQUESTED;
|
||||
}
|
||||
}
|
||||
|
||||
/* Work handler function for retrying on conn negotiation API error. */
|
||||
@@ -167,13 +204,15 @@ static void conn_param_smp_enable(struct bt_conn *conn)
|
||||
{
|
||||
struct conn_param_data *cpd = conn_param_data_get(conn);
|
||||
|
||||
if (!(cpd->state & CONN_PARAM_SMP_REQUESTED)) {
|
||||
conn_param_set(conn, CONN_PARAM_SMP);
|
||||
cpd->state |= CONN_PARAM_SMP_REQUESTED;
|
||||
}
|
||||
if (cpd != NULL) {
|
||||
if (!(cpd->state & CONN_PARAM_SMP_REQUESTED)) {
|
||||
conn_param_set(conn, CONN_PARAM_SMP);
|
||||
cpd->state |= CONN_PARAM_SMP_REQUESTED;
|
||||
}
|
||||
|
||||
/* SMP characteristic in use; refresh the restore timeout. */
|
||||
(void)k_work_reschedule(&cpd->dwork, K_MSEC(RESTORE_TIME));
|
||||
/* SMP characteristic in use; refresh the restore timeout. */
|
||||
(void)k_work_reschedule(&cpd->dwork, K_MSEC(RESTORE_TIME));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -184,10 +223,16 @@ static ssize_t smp_bt_chr_write(struct bt_conn *conn,
|
||||
const void *buf, uint16_t len, uint16_t offset,
|
||||
uint8_t flags)
|
||||
{
|
||||
struct conn_param_data *cpd = conn_param_data_get(conn);
|
||||
#ifdef CONFIG_MCUMGR_SMP_REASSEMBLY_BT
|
||||
int ret;
|
||||
bool started;
|
||||
|
||||
if (cpd == NULL) {
|
||||
LOG_ERR("Null cpd object for connection %p", (void *)conn);
|
||||
return BT_GATT_ERR(BT_ATT_ERR_INSUFFICIENT_RESOURCES);
|
||||
}
|
||||
|
||||
started = (zephyr_smp_reassembly_expected(&smp_bt_transport) >= 0);
|
||||
|
||||
LOG_DBG("started = %s, buf len = %d", started ? "true" : "false", len);
|
||||
@@ -211,8 +256,8 @@ static ssize_t smp_bt_chr_write(struct bt_conn *conn,
|
||||
(struct smp_bt_user_data *)zephyr_smp_reassembly_get_ud(&smp_bt_transport);
|
||||
|
||||
if (ud != NULL) {
|
||||
bt_conn_unref(ud->conn);
|
||||
ud->conn = NULL;
|
||||
ud->id = 0;
|
||||
}
|
||||
|
||||
zephyr_smp_reassembly_drop(&smp_bt_transport);
|
||||
@@ -230,7 +275,8 @@ static ssize_t smp_bt_chr_write(struct bt_conn *conn,
|
||||
conn_param_smp_enable(conn);
|
||||
}
|
||||
|
||||
ud->conn = bt_conn_ref(conn);
|
||||
ud->conn = conn;
|
||||
ud->id = cpd->id;
|
||||
}
|
||||
|
||||
/* No more bytes are expected for this packet */
|
||||
@@ -244,6 +290,11 @@ static ssize_t smp_bt_chr_write(struct bt_conn *conn,
|
||||
struct smp_bt_user_data *ud;
|
||||
struct net_buf *nb;
|
||||
|
||||
if (cpd == NULL) {
|
||||
LOG_ERR("Null cpd object for connection %p", (void *)conn);
|
||||
return BT_GATT_ERR(BT_ATT_ERR_INSUFFICIENT_RESOURCES);
|
||||
}
|
||||
|
||||
nb = mcumgr_buf_alloc();
|
||||
if (!nb) {
|
||||
LOG_DBG("failed net_buf alloc for SMP packet");
|
||||
@@ -260,7 +311,8 @@ static ssize_t smp_bt_chr_write(struct bt_conn *conn,
|
||||
net_buf_add_mem(nb, buf, len);
|
||||
|
||||
ud = net_buf_user_data(nb);
|
||||
ud->conn = bt_conn_ref(conn);
|
||||
ud->conn = conn;
|
||||
ud->id = cpd->id;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MCUMGR_SMP_BT_CONN_PARAM_CONTROL)) {
|
||||
conn_param_smp_enable(conn);
|
||||
@@ -278,8 +330,8 @@ static void smp_bt_ccc_changed(const struct bt_gatt_attr *attr, uint16_t value)
|
||||
if (zephyr_smp_reassembly_expected(&smp_bt_transport) >= 0 && value == 0) {
|
||||
struct smp_bt_user_data *ud = zephyr_smp_reassembly_get_ud(&smp_bt_transport);
|
||||
|
||||
bt_conn_unref(ud->conn);
|
||||
ud->conn = NULL;
|
||||
ud->id = 0;
|
||||
|
||||
zephyr_smp_reassembly_drop(&smp_bt_transport);
|
||||
}
|
||||
@@ -326,7 +378,7 @@ static struct bt_conn *smp_bt_conn_from_pkt(const struct net_buf *nb)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return bt_conn_ref(ud->conn);
|
||||
return ud->conn;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -344,7 +396,6 @@ static uint16_t smp_bt_get_mtu(const struct net_buf *nb)
|
||||
}
|
||||
|
||||
mtu = bt_gatt_get_mtu(conn);
|
||||
bt_conn_unref(conn);
|
||||
|
||||
/* Account for the three-byte notification header. */
|
||||
return mtu - 3;
|
||||
@@ -355,8 +406,8 @@ static void smp_bt_ud_free(void *ud)
|
||||
struct smp_bt_user_data *user_data = ud;
|
||||
|
||||
if (user_data->conn) {
|
||||
bt_conn_unref(user_data->conn);
|
||||
user_data->conn = NULL;
|
||||
user_data->id = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -366,7 +417,8 @@ static int smp_bt_ud_copy(struct net_buf *dst, const struct net_buf *src)
|
||||
struct smp_bt_user_data *dst_ud = net_buf_user_data(dst);
|
||||
|
||||
if (src_ud->conn) {
|
||||
dst_ud->conn = bt_conn_ref(src_ud->conn);
|
||||
dst_ud->conn = src_ud->conn;
|
||||
dst_ud->id = src_ud->id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -387,6 +439,9 @@ static int smp_bt_tx_pkt(struct net_buf *nb)
|
||||
.data = nb->data,
|
||||
};
|
||||
bool sent = false;
|
||||
struct bt_conn_info info;
|
||||
struct conn_param_data *cpd;
|
||||
struct smp_bt_user_data *ud;
|
||||
|
||||
conn = smp_bt_conn_from_pkt(nb);
|
||||
if (conn == NULL) {
|
||||
@@ -394,6 +449,22 @@ static int smp_bt_tx_pkt(struct net_buf *nb)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Verify that the device is connected, the necessity for this check is that the remote
|
||||
* device might have sent a command and disconnected before the command has been processed
|
||||
* completely, if this happens then the the connection details will still be valid due to
|
||||
* the incremented connection reference count, but the connection has actually been
|
||||
* dropped, this avoids waiting for a semaphore that will never be given which would
|
||||
* otherwise cause a deadlock.
|
||||
*/
|
||||
rc = bt_conn_get_info(conn, &info);
|
||||
|
||||
if (rc != 0 || info.state != BT_CONN_STATE_CONNECTED) {
|
||||
/* Remote device has disconnected */
|
||||
bt_conn_unref(conn);
|
||||
rc = MGMT_ERR_ENOENT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Send data in chunks of the MTU size */
|
||||
mtu_size = smp_bt_get_mtu(nb);
|
||||
|
||||
@@ -403,18 +474,35 @@ static int smp_bt_tx_pkt(struct net_buf *nb)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
k_sem_reset(&smp_notify_sem);
|
||||
cpd = conn_param_data_get(conn);
|
||||
ud = net_buf_user_data(nb);
|
||||
|
||||
if (cpd == NULL || cpd->id == 0 || cpd->id != ud->id) {
|
||||
/* The device that sent this packet has disconnected or is not the same active
|
||||
* connection, drop the outgoing data
|
||||
*/
|
||||
rc = MGMT_ERR_ENOENT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
k_sem_reset(&cpd->smp_notify_sem);
|
||||
|
||||
while (off < nb->len) {
|
||||
if (cpd->id == 0 || cpd->id != ud->id) {
|
||||
/* The device that sent this packet has disconnected or is not the same
|
||||
* active connection, drop the outgoing data
|
||||
*/
|
||||
rc = MGMT_ERR_ENOENT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if ((off + mtu_size) > nb->len) {
|
||||
/* Final packet, limit size */
|
||||
mtu_size = nb->len - off;
|
||||
}
|
||||
|
||||
notify_param.len = mtu_size;
|
||||
|
||||
rc = bt_gatt_notify_cb(conn, ¬ify_param);
|
||||
k_sem_take(&smp_notify_sem, K_FOREVER);
|
||||
|
||||
if (rc == -ENOMEM) {
|
||||
if (sent == false) {
|
||||
@@ -443,17 +531,19 @@ static int smp_bt_tx_pkt(struct net_buf *nb)
|
||||
off += mtu_size;
|
||||
notify_param.data = &nb->data[off];
|
||||
sent = true;
|
||||
|
||||
/* Wait for the completion (or disconnect) semaphore before
|
||||
* continuing, allowing other parts of the system to run.
|
||||
*/
|
||||
k_sem_take(&cpd->smp_notify_sem, K_FOREVER);
|
||||
} else {
|
||||
/* No connection, cannot continue */
|
||||
rc = MGMT_ERR_EUNKNOWN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cleanup:
|
||||
if (rc != MGMT_ERR_ENOENT) {
|
||||
bt_conn_unref(conn);
|
||||
}
|
||||
|
||||
smp_bt_ud_free(net_buf_user_data(nb));
|
||||
mcumgr_buf_free(nb);
|
||||
|
||||
@@ -474,7 +564,7 @@ int smp_bt_unregister(void)
|
||||
static void connected(struct bt_conn *conn, uint8_t err)
|
||||
{
|
||||
if (err == 0) {
|
||||
conn_param_data_alloc(conn);
|
||||
(void)conn_param_data_alloc(conn);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -483,17 +573,69 @@ static void disconnected(struct bt_conn *conn, uint8_t reason)
|
||||
{
|
||||
struct conn_param_data *cpd = conn_param_data_get(conn);
|
||||
|
||||
/* Cancel work if ongoing. */
|
||||
(void)k_work_cancel_delayable(&cpd->dwork);
|
||||
(void)k_work_cancel_delayable(&cpd->ework);
|
||||
/* Clear all pending requests from this device which have yet to be processed from the
|
||||
* FIFO (for this specific connection).
|
||||
*/
|
||||
smp_rx_remove_invalid(&smp_bt_transport, (void *)conn);
|
||||
|
||||
/* Clear cpd. */
|
||||
cpd->state = 0;
|
||||
cpd->conn = NULL;
|
||||
/* Force giving the notification semaphore here, this is only needed if there is a pending
|
||||
* outgoing packet when the device has disconnected, as in this case the notification
|
||||
* callback will not be called and this is needed to prevent a deadlock.
|
||||
*/
|
||||
if (cpd != NULL) {
|
||||
/* Clear cpd. */
|
||||
cpd->id = 0;
|
||||
cpd->conn = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MCUMGR_SMP_BT_CONN_PARAM_CONTROL)) {
|
||||
/* Cancel work if ongoing. */
|
||||
(void)k_work_cancel_delayable(&cpd->dwork);
|
||||
(void)k_work_cancel_delayable(&cpd->ework);
|
||||
|
||||
/* Clear cpd. */
|
||||
cpd->state = 0;
|
||||
}
|
||||
|
||||
k_sem_give(&cpd->smp_notify_sem);
|
||||
} else {
|
||||
LOG_ERR("Null cpd object for connection %p", (void *)conn);
|
||||
}
|
||||
}
|
||||
|
||||
static void conn_param_control_init(void)
|
||||
{
|
||||
for (size_t i = 0; i < ARRAY_SIZE(conn_data); i++) {
|
||||
k_work_init_delayable(&conn_data[i].dwork, conn_param_on_pref_restore);
|
||||
k_work_init_delayable(&conn_data[i].ework, conn_param_on_error_retry);
|
||||
}
|
||||
}
|
||||
|
||||
static bool smp_bt_query_valid_check(struct net_buf *nb, void *arg)
|
||||
{
|
||||
const struct bt_conn *conn = (struct bt_conn *)arg;
|
||||
struct smp_bt_user_data *ud = net_buf_user_data(nb);
|
||||
struct conn_param_data *cpd;
|
||||
|
||||
if (conn == NULL || ud == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
cpd = conn_param_data_get(conn);
|
||||
|
||||
if (cpd == NULL || (ud->conn == conn && cpd->id != ud->id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int smp_bt_init(const struct device *dev)
|
||||
{
|
||||
uint8_t i = 0;
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
next_id = 1;
|
||||
|
||||
/* Register BT callbacks */
|
||||
static struct bt_conn_cb conn_callbacks = {
|
||||
.connected = connected,
|
||||
@@ -501,23 +643,18 @@ static void conn_param_control_init(void)
|
||||
};
|
||||
bt_conn_cb_register(&conn_callbacks);
|
||||
|
||||
for (size_t i = 0; i < ARRAY_SIZE(conn_data); i++) {
|
||||
k_work_init_delayable(&conn_data[i].dwork, conn_param_on_pref_restore);
|
||||
k_work_init_delayable(&conn_data[i].ework, conn_param_on_error_retry);
|
||||
}
|
||||
}
|
||||
|
||||
static int smp_bt_init(const struct device *dev)
|
||||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
if (IS_ENABLED(CONFIG_MCUMGR_SMP_BT_CONN_PARAM_CONTROL)) {
|
||||
conn_param_control_init();
|
||||
}
|
||||
|
||||
while (i < CONFIG_BT_MAX_CONN) {
|
||||
k_sem_init(&conn_data[i].smp_notify_sem, 0, 1);
|
||||
++i;
|
||||
}
|
||||
|
||||
zephyr_smp_transport_init(&smp_bt_transport, smp_bt_tx_pkt,
|
||||
smp_bt_get_mtu, smp_bt_ud_copy,
|
||||
smp_bt_ud_free);
|
||||
smp_bt_ud_free, smp_bt_query_valid_check);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -190,7 +190,7 @@ static int smp_dummy_init(const struct device *dev)
|
||||
k_sem_init(&smp_data_ready_sem, 0, 1);
|
||||
|
||||
zephyr_smp_transport_init(&smp_dummy_transport, smp_dummy_tx_pkt_int,
|
||||
smp_dummy_get_mtu, NULL, NULL);
|
||||
smp_dummy_get_mtu, NULL, NULL, NULL);
|
||||
dummy_mgumgr_recv_cb = smp_dummy_rx_frag;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -191,7 +191,7 @@ static int smp_shell_tx_pkt(struct net_buf *nb)
|
||||
int smp_shell_init(void)
|
||||
{
|
||||
zephyr_smp_transport_init(&smp_shell_transport, smp_shell_tx_pkt,
|
||||
smp_shell_get_mtu, NULL, NULL);
|
||||
smp_shell_get_mtu, NULL, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ static int smp_uart_init(const struct device *dev)
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
zephyr_smp_transport_init(&smp_uart_transport, smp_uart_tx_pkt,
|
||||
smp_uart_get_mtu, NULL, NULL);
|
||||
smp_uart_get_mtu, NULL, NULL, NULL);
|
||||
uart_mcumgr_register(smp_uart_rx_frag);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -166,13 +166,13 @@ static int smp_udp_init(const struct device *dev)
|
||||
#ifdef CONFIG_MCUMGR_SMP_UDP_IPV4
|
||||
zephyr_smp_transport_init(&configs.ipv4.smp_transport,
|
||||
smp_udp4_tx, smp_udp_get_mtu,
|
||||
smp_udp_ud_copy, NULL);
|
||||
smp_udp_ud_copy, NULL, NULL);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MCUMGR_SMP_UDP_IPV6
|
||||
zephyr_smp_transport_init(&configs.ipv6.smp_transport,
|
||||
smp_udp6_tx, smp_udp_get_mtu,
|
||||
smp_udp_ud_copy, NULL);
|
||||
smp_udp_ud_copy, NULL, NULL);
|
||||
#endif
|
||||
|
||||
return MGMT_ERR_EOK;
|
||||
|
||||
@@ -4211,6 +4211,74 @@ wait_reply:
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool is_pkt_part_of_slab(const struct k_mem_slab *slab, const char *ptr)
|
||||
{
|
||||
size_t last_offset = (slab->num_blocks - 1) * slab->block_size;
|
||||
size_t ptr_offset;
|
||||
|
||||
/* Check if pointer fits into slab buffer area. */
|
||||
if ((ptr < slab->buffer) || (ptr > slab->buffer + last_offset)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check if pointer offset is correct. */
|
||||
ptr_offset = ptr - slab->buffer;
|
||||
if (ptr_offset % slab->block_size != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct ctx_pkt_slab_info {
|
||||
const void *ptr;
|
||||
bool pkt_source_found;
|
||||
};
|
||||
|
||||
static void check_context_pool(struct net_context *context, void *user_data)
|
||||
{
|
||||
#if defined(CONFIG_NET_CONTEXT_NET_PKT_POOL)
|
||||
if (!net_context_is_used(context)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (context->tx_slab) {
|
||||
struct ctx_pkt_slab_info *info = user_data;
|
||||
struct k_mem_slab *slab = context->tx_slab();
|
||||
|
||||
if (is_pkt_part_of_slab(slab, info->ptr)) {
|
||||
info->pkt_source_found = true;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_NET_CONTEXT_NET_PKT_POOL */
|
||||
}
|
||||
|
||||
static bool is_pkt_ptr_valid(const void *ptr)
|
||||
{
|
||||
struct k_mem_slab *rx, *tx;
|
||||
|
||||
net_pkt_get_info(&rx, &tx, NULL, NULL);
|
||||
|
||||
if (is_pkt_part_of_slab(rx, ptr) || is_pkt_part_of_slab(tx, ptr)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_NET_CONTEXT_NET_PKT_POOL)) {
|
||||
struct ctx_pkt_slab_info info;
|
||||
|
||||
info.ptr = ptr;
|
||||
info.pkt_source_found = false;
|
||||
|
||||
net_context_foreach(check_context_pool, &info);
|
||||
|
||||
if (info.pkt_source_found) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct net_pkt *get_net_pkt(const char *ptr_str)
|
||||
{
|
||||
uint8_t buf[sizeof(intptr_t)];
|
||||
@@ -4286,6 +4354,14 @@ static int cmd_net_pkt(const struct shell *shell, size_t argc, char *argv[])
|
||||
if (!pkt) {
|
||||
PR_ERROR("Invalid ptr value (%s). "
|
||||
"Example: 0x01020304\n", argv[1]);
|
||||
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
if (!is_pkt_ptr_valid(pkt)) {
|
||||
PR_ERROR("Pointer is not recognized as net_pkt (%s).\n",
|
||||
argv[1]);
|
||||
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
|
||||
@@ -460,6 +460,11 @@ static ssize_t spair_write(void *obj, const void *buffer, size_t count)
|
||||
}
|
||||
|
||||
if (will_block) {
|
||||
if (k_is_in_isr()) {
|
||||
errno = EAGAIN;
|
||||
res = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (int signaled = false, result = -1; !signaled;
|
||||
result = -1) {
|
||||
@@ -646,6 +651,11 @@ static ssize_t spair_read(void *obj, void *buffer, size_t count)
|
||||
}
|
||||
|
||||
if (will_block) {
|
||||
if (k_is_in_isr()) {
|
||||
errno = EAGAIN;
|
||||
res = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (int signaled = false, result = -1; !signaled;
|
||||
result = -1) {
|
||||
|
||||
@@ -1049,7 +1049,7 @@ static void state_collect(const struct shell *shell)
|
||||
break;
|
||||
|
||||
default:
|
||||
if (isprint((int) data)) {
|
||||
if (isprint((int) data) != 0) {
|
||||
z_flag_history_exit_set(shell, true);
|
||||
z_shell_op_char_insert(shell, data);
|
||||
} else if (z_flag_echo_get(shell)) {
|
||||
@@ -1555,7 +1555,7 @@ void shell_hexdump_line(const struct shell *shell, unsigned int offset,
|
||||
char c = data[i];
|
||||
|
||||
shell_fprintf(shell, SHELL_NORMAL, "%c",
|
||||
isprint((int)c) ? c : '.');
|
||||
isprint((int)c) != 0 ? c : '.');
|
||||
} else {
|
||||
shell_fprintf(shell, SHELL_NORMAL, " ");
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ static void uart_isr(const struct device *dev, void *user_data)
|
||||
length = tracing_cmd_buffer_alloc(&cmd);
|
||||
}
|
||||
|
||||
if (!isprint(byte)) {
|
||||
if (isprint(byte) == 0) {
|
||||
if (byte == '\r') {
|
||||
cmd[cur] = '\0';
|
||||
tracing_cmd_handle(cmd, cur);
|
||||
|
||||
@@ -479,7 +479,7 @@ ZTEST(test_c_lib, test_checktype)
|
||||
|
||||
ptr = buf;
|
||||
for (int i = 0; i < 128; i++) {
|
||||
if (isprint(i)) {
|
||||
if (isprint(i) != 0) {
|
||||
*ptr++ = i;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,11 +23,9 @@ ZTEST(posix_apis, test_posix_clock)
|
||||
NULL);
|
||||
zassert_equal(errno, EINVAL);
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
/* 2 Sec Delay */
|
||||
sleep(SLEEP_SECONDS);
|
||||
usleep(SLEEP_SECONDS * USEC_PER_SEC);
|
||||
clock_gettime(CLOCK_MONOTONIC, &te);
|
||||
zassert_ok(clock_gettime(CLOCK_MONOTONIC, &ts));
|
||||
zassert_ok(k_sleep(K_SECONDS(SLEEP_SECONDS)));
|
||||
zassert_ok(clock_gettime(CLOCK_MONOTONIC, &te));
|
||||
|
||||
if (te.tv_nsec >= ts.tv_nsec) {
|
||||
secs_elapsed = te.tv_sec - ts.tv_sec;
|
||||
@@ -38,7 +36,7 @@ ZTEST(posix_apis, test_posix_clock)
|
||||
}
|
||||
|
||||
/*TESTPOINT: Check if POSIX clock API test passes*/
|
||||
zassert_equal(secs_elapsed, (2 * SLEEP_SECONDS),
|
||||
zassert_equal(secs_elapsed, SLEEP_SECONDS,
|
||||
"POSIX clock API test failed");
|
||||
|
||||
printk("POSIX clock APIs test done\n");
|
||||
|
||||
87
tests/posix/common/src/sleep.c
Normal file
87
tests/posix/common/src/sleep.c
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2022, Meta
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <zephyr/posix/unistd.h>
|
||||
#include <zephyr/ztest.h>
|
||||
|
||||
struct waker_work {
|
||||
k_tid_t tid;
|
||||
struct k_work_delayable dwork;
|
||||
};
|
||||
static struct waker_work ww;
|
||||
|
||||
static void waker_func(struct k_work *work)
|
||||
{
|
||||
struct waker_work *ww;
|
||||
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
|
||||
|
||||
ww = CONTAINER_OF(dwork, struct waker_work, dwork);
|
||||
k_wakeup(ww->tid);
|
||||
}
|
||||
K_WORK_DELAYABLE_DEFINE(waker, waker_func);
|
||||
|
||||
ZTEST(posix_apis, test_sleep)
|
||||
{
|
||||
uint32_t then;
|
||||
uint32_t now;
|
||||
/* call sleep(10), wakeup after 1s, expect >= 8s left */
|
||||
const uint32_t sleep_min_s = 1;
|
||||
const uint32_t sleep_max_s = 10;
|
||||
const uint32_t sleep_rem_s = 8;
|
||||
|
||||
/* sleeping for 0s should return 0 */
|
||||
zassert_ok(sleep(0));
|
||||
|
||||
/* test that sleeping for 1s sleeps for at least 1s */
|
||||
then = k_uptime_get();
|
||||
zassert_equal(0, sleep(1));
|
||||
now = k_uptime_get();
|
||||
zassert_true((now - then) >= 1 * MSEC_PER_SEC);
|
||||
|
||||
/* test that sleeping for 2s sleeps for at least 2s */
|
||||
then = k_uptime_get();
|
||||
zassert_equal(0, sleep(2));
|
||||
now = k_uptime_get();
|
||||
zassert_true((now - then) >= 2 * MSEC_PER_SEC);
|
||||
|
||||
/* test that sleep reports the remainder */
|
||||
ww.tid = k_current_get();
|
||||
k_work_init_delayable(&ww.dwork, waker_func);
|
||||
zassert_equal(1, k_work_schedule(&ww.dwork, K_SECONDS(sleep_min_s)));
|
||||
zassert_true(sleep(sleep_max_s) >= sleep_rem_s);
|
||||
}
|
||||
|
||||
ZTEST(posix_apis, test_usleep)
|
||||
{
|
||||
uint32_t then;
|
||||
uint32_t now;
|
||||
|
||||
/* test usleep works for small values */
|
||||
/* Note: k_usleep(), an implementation detail, is a cancellation point */
|
||||
zassert_equal(0, usleep(0));
|
||||
zassert_equal(0, usleep(1));
|
||||
|
||||
/* sleep for the spec limit */
|
||||
then = k_uptime_get();
|
||||
zassert_equal(0, usleep(USEC_PER_SEC - 1));
|
||||
now = k_uptime_get();
|
||||
zassert_true(((now - then) * USEC_PER_MSEC) / (USEC_PER_SEC - 1) >= 1);
|
||||
|
||||
/* sleep for exactly the limit threshold */
|
||||
zassert_equal(-1, usleep(USEC_PER_SEC));
|
||||
zassert_equal(errno, EINVAL);
|
||||
|
||||
/* sleep for over the spec limit */
|
||||
zassert_equal(-1, usleep((useconds_t)ULONG_MAX));
|
||||
zassert_equal(errno, EINVAL);
|
||||
|
||||
/* test that sleep reports errno = EINTR when woken up */
|
||||
ww.tid = k_current_get();
|
||||
k_work_init_delayable(&ww.dwork, waker_func);
|
||||
zassert_equal(1, k_work_schedule(&ww.dwork, K_USEC(USEC_PER_SEC / 2)));
|
||||
zassert_equal(-1, usleep(USEC_PER_SEC - 1));
|
||||
zassert_equal(EINTR, errno);
|
||||
}
|
||||
@@ -127,7 +127,10 @@ static void process_and_validate(bool backend2_enable, bool panic)
|
||||
mock_log_frontend_validate(panic);
|
||||
|
||||
if (NO_BACKENDS) {
|
||||
zassert_equal(log_backend_count_get(), 0);
|
||||
int cnt;
|
||||
|
||||
STRUCT_SECTION_COUNT(log_backend, &cnt);
|
||||
zassert_equal(cnt, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <zephyr/tc_util.h>
|
||||
#include <stdbool.h>
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/toolchain.h>
|
||||
#include <zephyr/ztest.h>
|
||||
#include <zephyr/logging/log_backend.h>
|
||||
#include <zephyr/logging/log_ctrl.h>
|
||||
@@ -111,7 +112,10 @@ LOG_BACKEND_DEFINE(backend2, backend_api, true, &context2);
|
||||
*/
|
||||
ZTEST(log_backend_init, test_log_backends_initialization)
|
||||
{
|
||||
if (log_backend_count_get() != 2) {
|
||||
int cnt;
|
||||
|
||||
STRUCT_SECTION_COUNT(log_backend, &cnt);
|
||||
if (cnt != 2) {
|
||||
/* Other backends should not be enabled. */
|
||||
ztest_test_skip();
|
||||
}
|
||||
|
||||
@@ -238,10 +238,7 @@ ZTEST(test_log_core_additional, test_log_early_logging)
|
||||
log_init();
|
||||
|
||||
/* deactivate other backends */
|
||||
const struct log_backend *backend;
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
backend = log_backend_get(i);
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (strcmp(backend->name, "test")) {
|
||||
log_backend_deactivate(backend);
|
||||
}
|
||||
@@ -309,10 +306,7 @@ ZTEST(test_log_core_additional, test_log_timestamping)
|
||||
|
||||
log_init();
|
||||
/* deactivate all other backend */
|
||||
const struct log_backend *backend;
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
backend = log_backend_get(i);
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
log_backend_deactivate(backend);
|
||||
}
|
||||
|
||||
@@ -356,18 +350,19 @@ ZTEST(test_log_core_additional, test_log_timestamping)
|
||||
#define UART_BACKEND "log_backend_uart"
|
||||
ZTEST(test_log_core_additional, test_multiple_backends)
|
||||
{
|
||||
int cnt;
|
||||
|
||||
TC_PRINT("Test multiple backends");
|
||||
/* enable both backend1 and backend2 */
|
||||
log_setup(true);
|
||||
zassert_true((log_backend_count_get() >= 2),
|
||||
STRUCT_SECTION_COUNT(log_backend, &cnt);
|
||||
zassert_true((cnt >= 2),
|
||||
"There is no multi backends");
|
||||
|
||||
if (IS_ENABLED(CONFIG_LOG_BACKEND_UART)) {
|
||||
bool have_uart = false;
|
||||
struct log_backend const *backend;
|
||||
|
||||
for (int i = 0; i < log_backend_count_get(); i++) {
|
||||
backend = log_backend_get(i);
|
||||
STRUCT_SECTION_FOREACH(log_backend, backend) {
|
||||
if (strcmp(backend->name, UART_BACKEND) == 0) {
|
||||
have_uart = true;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user