Compare commits
195 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
030fa9da45 | ||
|
|
43370b89c3 | ||
|
|
15fa28896a | ||
|
|
ce3eb90a83 | ||
|
|
ca24cd6c2d | ||
|
|
4fc4dc7b84 | ||
|
|
fb24b62dc5 | ||
|
|
60e7a97328 | ||
|
|
a1aa463783 | ||
|
|
29c1e08cf7 | ||
|
|
190f09df52 | ||
|
|
a166290f1a | ||
|
|
21e0870106 | ||
|
|
ebe3651f3d | ||
|
|
1b7c720c7f | ||
|
|
58af1b51bd | ||
|
|
70f2a4951a | ||
|
|
650d10805a | ||
|
|
25616b1021 | ||
|
|
f72519007c | ||
|
|
1b2a7ec251 | ||
|
|
9d2533fc92 | ||
|
|
e20b8f3f34 | ||
|
|
199d5d5448 | ||
|
|
5db2717f06 | ||
|
|
f3851326da | ||
|
|
5a8d05b968 | ||
|
|
eea42e38f3 | ||
|
|
0388a90e7b | ||
|
|
4c62d76fb7 | ||
|
|
6f8f9b5c7a | ||
|
|
afbc93287d | ||
|
|
a28aa01a88 | ||
|
|
677a374255 | ||
|
|
0389fa740b | ||
|
|
b02d34b855 | ||
|
|
29e3a4865f | ||
|
|
aaa6d280ce | ||
|
|
e02a3377e5 | ||
|
|
76c30dfa55 | ||
|
|
c3f512d606 | ||
|
|
f882abfd13 | ||
|
|
bc7300fea7 | ||
|
|
8da9a76464 | ||
|
|
298b8ea788 | ||
|
|
a9aaf048e8 | ||
|
|
e2b81b48c4 | ||
|
|
45c41bc344 | ||
|
|
f570a46719 | ||
|
|
675a349e1b | ||
|
|
16927a6cbb | ||
|
|
ab353d6b7d | ||
|
|
951b055b7f | ||
|
|
8e256b3399 | ||
|
|
74f2760771 | ||
|
|
85e0912291 | ||
|
|
f2c582c75d | ||
|
|
c908ee8133 | ||
|
|
175e76b302 | ||
|
|
c520749a71 | ||
|
|
584f52d5be | ||
|
|
d05c3bdf36 | ||
|
|
3ab0c9516f | ||
|
|
df6f0f477f | ||
|
|
2dc30ca1fb | ||
|
|
5cbda9f1c7 | ||
|
|
711506349d | ||
|
|
572921a44a | ||
|
|
7da64958f0 | ||
|
|
49e965fd63 | ||
|
|
c09b95fafd | ||
|
|
88f09f2eac | ||
|
|
568c09ce3a | ||
|
|
79f6c538c1 | ||
|
|
3400e6d9db | ||
|
|
fbea9e74c2 | ||
|
|
4d929827ac | ||
|
|
37b3641f00 | ||
|
|
3d940f1d1b | ||
|
|
f0d2a3e2fe | ||
|
|
7d405a43b1 | ||
|
|
2dbe845f21 | ||
|
|
4efa225daa | ||
|
|
0ee1955d5b | ||
|
|
a0eb50be3e | ||
|
|
2da9d7577f | ||
|
|
d5e2a071c1 | ||
|
|
633dd420d9 | ||
|
|
780b4e08cb | ||
|
|
281185e49d | ||
|
|
cb240b4f4c | ||
|
|
ff5ee88ac0 | ||
|
|
c3ef958116 | ||
|
|
727806f483 | ||
|
|
ec6c9d3637 | ||
|
|
c5e88dbbda | ||
|
|
c3e4d65dd1 | ||
|
|
16207ae32f | ||
|
|
dbf2ca1b0a | ||
|
|
0e204784ee | ||
|
|
c44406e091 | ||
|
|
1da82633b2 | ||
|
|
ad6636f09c | ||
|
|
8f4b366c0f | ||
|
|
5f8960f5ef | ||
|
|
d9200eb55d | ||
|
|
ccdc1d3777 | ||
|
|
197c4ddcbd | ||
|
|
7287947535 | ||
|
|
b0be164419 | ||
|
|
fab06842d5 | ||
|
|
9b4eafc54a | ||
|
|
9becb117b2 | ||
|
|
87ab3e4d16 | ||
|
|
9b8305cc11 | ||
|
|
728e5720cc | ||
|
|
6c11685863 | ||
|
|
0f783a4ce0 | ||
|
|
02dba17a59 | ||
|
|
860e7307bc | ||
|
|
7b087b8ac5 | ||
|
|
15f39300c0 | ||
|
|
f6f69516ac | ||
|
|
5f9dd18a87 | ||
|
|
7d8639b4a8 | ||
|
|
6e723ff755 | ||
|
|
bff97ed4cc | ||
|
|
97e2959452 | ||
|
|
91970658ec | ||
|
|
fc2585af00 | ||
|
|
f95edd3a85 | ||
|
|
2b9ed76734 | ||
|
|
5a041bff3d | ||
|
|
f61664c6f8 | ||
|
|
e2f05e9328 | ||
|
|
ea0b53b150 | ||
|
|
56664826b2 | ||
|
|
bbb49dec38 | ||
|
|
8211ebf759 | ||
|
|
1f3121b6b2 | ||
|
|
d7820faf7c | ||
|
|
5d29d52445 | ||
|
|
be11187e09 | ||
|
|
9044091e21 | ||
|
|
170ba8dfcb | ||
|
|
e3f1b6fc54 | ||
|
|
7ac05528ca | ||
|
|
64f411f0fb | ||
|
|
2e2dd96ae4 | ||
|
|
a311291294 | ||
|
|
5221787303 | ||
|
|
63d0c7fcae | ||
|
|
8abef50e97 | ||
|
|
0306e75a5f | ||
|
|
003de78ce0 | ||
|
|
9502d500b6 | ||
|
|
2a88e08296 | ||
|
|
e1ee34e55c | ||
|
|
2ad1ef651b | ||
|
|
089675af45 | ||
|
|
03ff0d471e | ||
|
|
cd96136bcb | ||
|
|
567fda57df | ||
|
|
b14f356c96 | ||
|
|
874d77bc75 | ||
|
|
ec0befb938 | ||
|
|
273e90a86f | ||
|
|
59dc65a7b4 | ||
|
|
8ff8cafc18 | ||
|
|
ba07347b60 | ||
|
|
e423902617 | ||
|
|
018f836c4d | ||
|
|
f4466c4760 | ||
|
|
9a5cbe3568 | ||
|
|
5b7b15fb2d | ||
|
|
e5a92a1fab | ||
|
|
74f0b6443a | ||
|
|
6c16b3492b | ||
|
|
1831431bab | ||
|
|
765f63c6b9 | ||
|
|
062306fc0b | ||
|
|
8fcf7f1d78 | ||
|
|
f06b3d922c | ||
|
|
b75c012c55 | ||
|
|
1efe6de3fe | ||
|
|
39270ed4a0 | ||
|
|
81ffa550ee | ||
|
|
8c2965e017 | ||
|
|
7aa38b4ac8 | ||
|
|
6dd320f791 | ||
|
|
ecac165d36 | ||
|
|
132d90d1bc | ||
|
|
58356313ac | ||
|
|
99cfd3e4d7 | ||
|
|
780588bd33 |
2
.github/workflows/backport.yml
vendored
2
.github/workflows/backport.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-20.04
|
||||
name: Backport
|
||||
steps:
|
||||
- name: Backport
|
||||
|
||||
30
.github/workflows/backport_issue_check.yml
vendored
Normal file
30
.github/workflows/backport_issue_check.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Backport Issue Check
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- v*-branch
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport Issue Check
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Check out source code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
sudo pip3 install -U setuptools wheel pip
|
||||
pip3 install -U pygithub
|
||||
|
||||
- name: Run backport issue checker
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ZB_GITHUB_TOKEN }}
|
||||
run: |
|
||||
./scripts/release/list_backports.py \
|
||||
-o ${{ github.event.repository.owner.login }} \
|
||||
-r ${{ github.event.repository.name }} \
|
||||
-b ${{ github.event.pull_request.base.ref }} \
|
||||
-p ${{ github.event.pull_request.number }}
|
||||
@@ -8,7 +8,7 @@ on:
|
||||
jobs:
|
||||
bluetooth-test-results:
|
||||
name: "Publish Bluetooth Test Results"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.event.workflow_run.conclusion != 'skipped'
|
||||
|
||||
steps:
|
||||
|
||||
22
.github/workflows/bluetooth-tests.yaml
vendored
22
.github/workflows/bluetooth-tests.yaml
vendored
@@ -10,17 +10,13 @@ on:
|
||||
- "soc/posix/**"
|
||||
- "arch/posix/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
bluetooth-test-prep:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
bluetooth-test-build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: bluetooth-test-prep
|
||||
bluetooth-test:
|
||||
runs-on: ubuntu-20.04
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
@@ -38,7 +34,7 @@ jobs:
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: west setup
|
||||
run: |
|
||||
@@ -55,7 +51,7 @@ jobs:
|
||||
|
||||
- name: Upload Test Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: bluetooth-test-results
|
||||
path: |
|
||||
@@ -64,7 +60,7 @@ jobs:
|
||||
|
||||
- name: Upload Event Details
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: event
|
||||
path: |
|
||||
|
||||
38
.github/workflows/clang.yaml
vendored
38
.github/workflows/clang.yaml
vendored
@@ -2,22 +2,18 @@ name: Build with Clang/LLVM
|
||||
|
||||
on: pull_request_target
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
clang-build-prep:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
clang-build:
|
||||
runs-on: zephyr_runner
|
||||
needs: clang-build-prep
|
||||
runs-on: zephyr-runner-linux-x64-4xlarge
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
volumes:
|
||||
- /home/runners/zephyrproject:/github/cache/zephyrproject
|
||||
- /repo-cache/zephyrproject:/github/cache/zephyrproject
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -30,12 +26,14 @@ jobs:
|
||||
outputs:
|
||||
report_needed: ${{ steps.twister.outputs.report_needed }}
|
||||
steps:
|
||||
- name: Cleanup
|
||||
- name: Clone cached Zephyr repository
|
||||
continue-on-error: true
|
||||
run: |
|
||||
# hotfix, until we have a better way to deal with existing data
|
||||
rm -rf zephyr zephyr-testing
|
||||
git clone --shared /github/cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -72,7 +70,7 @@ jobs:
|
||||
string(TIMESTAMP current_date "%Y-%m-%d-%H;%M;%S" UTC)
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "repo=${repo2}\n")
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: nashif/action-s3-cache@master
|
||||
@@ -80,8 +78,8 @@ jobs:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-clang-${{ matrix.platform }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -100,12 +98,12 @@ jobs:
|
||||
|
||||
# We can limit scope to just what has changed
|
||||
if [ -s testplan.csv ]; then
|
||||
echo "::set-output name=report_needed::1";
|
||||
echo "report_needed=1" >> $GITHUB_OUTPUT
|
||||
# Full twister but with options based on changes
|
||||
./scripts/twister --inline-logs -M -N -v --load-tests testplan.csv --retry-failed 2
|
||||
else
|
||||
# if nothing is run, skip reporting step
|
||||
echo "::set-output name=report_needed::0";
|
||||
echo "report_needed=0" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: ccache stats post
|
||||
@@ -114,7 +112,7 @@ jobs:
|
||||
|
||||
- name: Upload Unit Test Results
|
||||
if: always() && steps.twister.outputs.report_needed != 0
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Unit Test Results (Subset ${{ matrix.platform }})
|
||||
path: twister-out/twister.xml
|
||||
|
||||
40
.github/workflows/codecov.yaml
vendored
40
.github/workflows/codecov.yaml
vendored
@@ -4,22 +4,18 @@ on:
|
||||
schedule:
|
||||
- cron: '25 */3 * * 1-5'
|
||||
|
||||
jobs:
|
||||
codecov-prep:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
codecov:
|
||||
runs-on: zephyr_runner
|
||||
needs: codecov-prep
|
||||
runs-on: zephyr-runner-linux-x64-4xlarge
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
volumes:
|
||||
- /repo-cache/zephyrproject:/github/cache/zephyrproject
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -32,8 +28,14 @@ jobs:
|
||||
run: |
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Clone cached Zephyr repository
|
||||
continue-on-error: true
|
||||
run: |
|
||||
git clone --shared /github/cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -54,7 +56,7 @@ jobs:
|
||||
run: |
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "repo=${repo2}\n")
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
@@ -63,8 +65,8 @@ jobs:
|
||||
key: ${{ steps.ccache_cache_prop.outputs.repo }}-${{github.event_name}}-${{matrix.platform}}-codecov-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -94,7 +96,7 @@ jobs:
|
||||
|
||||
- name: Upload Coverage Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Coverage Data (Subset ${{ matrix.platform }})
|
||||
path: coverage/reports/${{ matrix.platform }}.info
|
||||
@@ -108,7 +110,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Download Artifacts
|
||||
@@ -144,8 +146,8 @@ jobs:
|
||||
set(MERGELIST "${MERGELIST} -a ${f}")
|
||||
endif()
|
||||
endforeach()
|
||||
message("::set-output name=mergefiles::${MERGELIST}")
|
||||
message("::set-output name=covfiles::${FILELIST}")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "mergefiles=${MERGELIST}\n")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "covfiles=${FILELIST}\n")
|
||||
|
||||
- name: Merge coverage files
|
||||
run: |
|
||||
|
||||
6
.github/workflows/coding_guidelines.yml
vendored
6
.github/workflows/coding_guidelines.yml
vendored
@@ -4,17 +4,17 @@ on: pull_request
|
||||
|
||||
jobs:
|
||||
compliance_job:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
name: Run coding guidelines checks on patch series (PR)
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-doc-pip
|
||||
|
||||
12
.github/workflows/compliance.yml
vendored
12
.github/workflows/compliance.yml
vendored
@@ -4,11 +4,11 @@ on: pull_request
|
||||
|
||||
jobs:
|
||||
maintainer_check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
name: Check MAINTAINERS file
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
python3 ./scripts/get_maintainer.py path CMakeLists.txt
|
||||
|
||||
check_compliance:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
name: Run compliance checks on patch series (PR)
|
||||
steps:
|
||||
- name: Update PATH for west
|
||||
@@ -28,13 +28,13 @@ jobs:
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-doc-pip
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
./scripts/ci/check_compliance.py -m Codeowners -m Devicetree -m Gitlint -m Identity -m Nits -m pylint -m checkpatch -m Kconfig -c origin/${BASE_REF}..
|
||||
|
||||
- name: upload-results
|
||||
uses: actions/upload-artifact@master
|
||||
uses: actions/upload-artifact@v3
|
||||
continue-on-error: True
|
||||
with:
|
||||
name: compliance.xml
|
||||
|
||||
8
.github/workflows/daily_test_version.yml
vendored
8
.github/workflows/daily_test_version.yml
vendored
@@ -12,15 +12,15 @@ on:
|
||||
|
||||
jobs:
|
||||
get_version:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
|
||||
steps:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: install-pip
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
pip3 install gitpython
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
20
.github/workflows/devicetree_checks.yml
vendored
20
.github/workflows/devicetree_checks.yml
vendored
@@ -6,10 +6,14 @@ name: Devicetree script tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v2.7-branch
|
||||
paths:
|
||||
- 'scripts/dts/**'
|
||||
- '.github/workflows/devicetree_checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- v2.7-branch
|
||||
paths:
|
||||
- 'scripts/dts/**'
|
||||
- '.github/workflows/devicetree_checks.yml'
|
||||
@@ -21,20 +25,22 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.6, 3.7, 3.8]
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
os: [ubuntu-20.04, macos-11, windows-2022]
|
||||
exclude:
|
||||
- os: macos-latest
|
||||
- os: macos-11
|
||||
python-version: 3.6
|
||||
- os: windows-2022
|
||||
python-version: 3.6
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: cache-pip-linux
|
||||
if: startsWith(runner.os, 'Linux')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
@@ -42,7 +48,7 @@ jobs:
|
||||
${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
- name: cache-pip-mac
|
||||
if: startsWith(runner.os, 'macOS')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/Library/Caches/pip
|
||||
# Trailing '-' was just to get a different cache name
|
||||
@@ -51,7 +57,7 @@ jobs:
|
||||
${{ runner.os }}-pip-${{ matrix.python-version }}-
|
||||
- name: cache-pip-win
|
||||
if: startsWith(runner.os, 'Windows')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~\AppData\Local\pip\Cache
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
|
||||
62
.github/workflows/doc-build.yml
vendored
62
.github/workflows/doc-build.yml
vendored
@@ -5,10 +5,10 @@ name: Documentation Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 */3 * * *'
|
||||
- cron: '0 */3 * * *'
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
- v*
|
||||
pull_request:
|
||||
paths:
|
||||
- 'doc/**'
|
||||
@@ -34,18 +34,23 @@ env:
|
||||
jobs:
|
||||
doc-build-html:
|
||||
name: "Documentation Build (HTML)"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 30
|
||||
|
||||
concurrency:
|
||||
group: doc-build-html-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: install-pkgs
|
||||
run: |
|
||||
sudo apt-get install -y ninja-build doxygen graphviz
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: pip-${{ hashFiles('scripts/requirements-doc.txt') }}
|
||||
@@ -69,38 +74,71 @@ jobs:
|
||||
DOC_TAG="development"
|
||||
fi
|
||||
|
||||
DOC_TAG=${DOC_TAG} SPHINXOPTS="-q -W -j auto" make -C doc html
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
DOC_TARGET="html-fast"
|
||||
else
|
||||
DOC_TARGET="html"
|
||||
fi
|
||||
|
||||
DOC_TAG=${DOC_TAG} SPHINXOPTS="-q -W" make -C doc ${DOC_TARGET}
|
||||
|
||||
- name: compress-docs
|
||||
run: |
|
||||
tar cfJ html-output.tar.xz --directory=doc/_build html
|
||||
|
||||
- name: upload-build
|
||||
uses: actions/upload-artifact@master
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: html-output
|
||||
path: html-output.tar.xz
|
||||
|
||||
- name: process-pr
|
||||
if: github.event_name == 'pull_request'
|
||||
run: |
|
||||
REPO_NAME="${{ github.event.repository.name }}"
|
||||
PR_NUM="${{ github.event.pull_request.number }}"
|
||||
DOC_URL="https://builds.zephyrproject.io/${REPO_NAME}/pr/${PR_NUM}/docs/"
|
||||
|
||||
echo "${PR_NUM}" > pr_num
|
||||
echo "::notice:: Documentation will be available shortly at: ${DOC_URL}"
|
||||
|
||||
- name: upload-pr-number
|
||||
uses: actions/upload-artifact@v3
|
||||
if: github.event_name == 'pull_request'
|
||||
with:
|
||||
name: pr_num
|
||||
path: pr_num
|
||||
|
||||
doc-build-pdf:
|
||||
name: "Documentation Build (PDF)"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
container: texlive/texlive:latest
|
||||
timeout-minutes: 30
|
||||
concurrency:
|
||||
group: doc-build-pdf-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: install-pkgs
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y python3-pip ninja-build doxygen graphviz librsvg2-bin
|
||||
apt-get install -y python3-pip python3-venv ninja-build doxygen graphviz librsvg2-bin
|
||||
|
||||
- name: cache-pip
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: pip-${{ hashFiles('scripts/requirements-doc.txt') }}
|
||||
|
||||
- name: setup-venv
|
||||
run: |
|
||||
python3 -m venv .venv
|
||||
. .venv/bin/activate
|
||||
echo PATH=$PATH >> $GITHUB_ENV
|
||||
|
||||
- name: install-pip
|
||||
run: |
|
||||
pip3 install -U setuptools wheel pip
|
||||
@@ -123,7 +161,7 @@ jobs:
|
||||
DOC_TAG=${DOC_TAG} SPHINXOPTS="-q -j auto" LATEXMKOPTS="-quiet -halt-on-error" make -C doc pdf
|
||||
|
||||
- name: upload-build
|
||||
uses: actions/upload-artifact@master
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: pdf-output
|
||||
path: doc/_build/latex/zephyr.pdf
|
||||
|
||||
63
.github/workflows/doc-publish-pr.yml
vendored
Normal file
63
.github/workflows/doc-publish-pr.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# Copyright (c) 2020 Linaro Limited.
|
||||
# Copyright (c) 2021 Nordic Semiconductor ASA
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Documentation Publish (Pull Request)
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Documentation Build"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
doc-publish:
|
||||
name: Publish Documentation
|
||||
runs-on: ubuntu-20.04
|
||||
if: |
|
||||
github.event.workflow_run.event == 'pull_request' &&
|
||||
github.event.workflow_run.conclusion == 'success' &&
|
||||
github.repository == 'zephyrproject-rtos/zephyr'
|
||||
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: dawidd6/action-download-artifact@v2
|
||||
with:
|
||||
workflow: doc-build.yml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
- name: Load PR number
|
||||
run: |
|
||||
echo "PR_NUM=$(<pr_num/pr_num)" >> $GITHUB_ENV
|
||||
|
||||
- name: Check PR number
|
||||
id: check-pr
|
||||
uses: carpentries/actions/check-valid-pr@v0.8
|
||||
with:
|
||||
pr: ${{ env.PR_NUM }}
|
||||
sha: ${{ github.event.workflow_run.head_sha }}
|
||||
|
||||
- name: Validate PR number
|
||||
if: steps.check-pr.outputs.VALID != 'true'
|
||||
run: |
|
||||
echo "ABORT: PR number validation failed!"
|
||||
exit 1
|
||||
|
||||
- name: Uncompress HTML docs
|
||||
run: |
|
||||
tar xf html-output/html-output.tar.xz -C html-output
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ vars.AWS_BUILDS_ZEPHYR_PR_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_BUILDS_ZEPHYR_PR_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Upload to AWS S3
|
||||
env:
|
||||
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
|
||||
run: |
|
||||
aws s3 sync --quiet html-output/html \
|
||||
s3://builds.zephyrproject.org/${{ github.event.repository.name }}/pr/${PR_NUM}/docs \
|
||||
--delete
|
||||
16
.github/workflows/doc-publish.yml
vendored
16
.github/workflows/doc-publish.yml
vendored
@@ -2,23 +2,21 @@
|
||||
# Copyright (c) 2021 Nordic Semiconductor ASA
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Publish Documentation
|
||||
name: Documentation Publish
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Documentation Build"]
|
||||
branches:
|
||||
- main
|
||||
- v*
|
||||
tags:
|
||||
- v*
|
||||
- main
|
||||
- v*
|
||||
types:
|
||||
- completed
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
doc-publish:
|
||||
name: Publish Documentation
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
|
||||
steps:
|
||||
@@ -34,8 +32,8 @@ jobs:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_DOCS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DOCS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Upload to AWS S3
|
||||
|
||||
4
.github/workflows/errno.yml
vendored
4
.github/workflows/errno.yml
vendored
@@ -6,13 +6,13 @@ on:
|
||||
|
||||
jobs:
|
||||
check-errno:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run errno.py
|
||||
run: |
|
||||
|
||||
21
.github/workflows/footprint-tracking.yml
vendored
21
.github/workflows/footprint-tracking.yml
vendored
@@ -13,19 +13,14 @@ on:
|
||||
# same commit
|
||||
- 'v*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
footprint-tracking-cancel:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
footprint-tracking:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
needs: footprint-tracking-cancel
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
@@ -44,7 +39,7 @@ jobs:
|
||||
sudo pip3 install -U setuptools wheel pip gitpython
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -58,8 +53,8 @@ jobs:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.FOOTPRINT_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.FOOTPRINT_AWS_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Record Footprint
|
||||
|
||||
21
.github/workflows/footprint.yml
vendored
21
.github/workflows/footprint.yml
vendored
@@ -2,19 +2,14 @@ name: Footprint Delta
|
||||
|
||||
on: pull_request
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
footprint-cancel:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
footprint-delta:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
needs: footprint-cancel
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
@@ -25,16 +20,12 @@ jobs:
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
ZEPHYR_TOOLCHAIN_VARIANT: zephyr
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
- name: Update PATH for west
|
||||
run: |
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
10
.github/workflows/issue_count.yml
vendored
10
.github/workflows/issue_count.yml
vendored
@@ -14,13 +14,13 @@ env:
|
||||
jobs:
|
||||
track-issues:
|
||||
name: "Collect Issue Stats"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
|
||||
steps:
|
||||
- name: Download configuration file
|
||||
run: |
|
||||
wget -q https://raw.githubusercontent.com/$GITHUB_REPOSITORY/master/.github/workflows/issues-report-config.json
|
||||
wget -q https://raw.githubusercontent.com/$GITHUB_REPOSITORY/main/.github/workflows/issues-report-config.json
|
||||
|
||||
- name: install-packages
|
||||
run: |
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: upload-stats
|
||||
uses: actions/upload-artifact@master
|
||||
uses: actions/upload-artifact@v3
|
||||
continue-on-error: True
|
||||
with:
|
||||
name: ${{ env.OUTPUT_FILE_NAME }}
|
||||
@@ -43,8 +43,8 @@ jobs:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-access-key-id: ${{ vars.AWS_TESTING_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Post Results
|
||||
|
||||
4
.github/workflows/labeler.yml
vendored
4
.github/workflows/labeler.yml
vendored
@@ -7,6 +7,4 @@ jobs:
|
||||
name: Pull Request Labeler
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v2.1.1
|
||||
with:
|
||||
repo-token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
- uses: actions/labeler@v4
|
||||
|
||||
4
.github/workflows/license_check.yml
vendored
4
.github/workflows/license_check.yml
vendored
@@ -4,7 +4,7 @@ on: [pull_request]
|
||||
|
||||
jobs:
|
||||
scancode_job:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
name: Scan code for licenses
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
with:
|
||||
directory-to-scan: 'scan/'
|
||||
- name: Artifact Upload
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: scancode
|
||||
path: ./artifacts
|
||||
|
||||
4
.github/workflows/manifest.yml
vendored
4
.github/workflows/manifest.yml
vendored
@@ -6,11 +6,11 @@ on:
|
||||
|
||||
jobs:
|
||||
contribs:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: Manifest
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: zephyrproject/zephyr
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
9
.github/workflows/release.yml
vendored
9
.github/workflows/release.yml
vendored
@@ -7,15 +7,16 @@ on:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get the version
|
||||
id: get_version
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/}
|
||||
run: |
|
||||
echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: REUSE Compliance Check
|
||||
uses: fsfe/reuse-action@v1
|
||||
@@ -23,7 +24,7 @@ jobs:
|
||||
args: spdx -o zephyr-${{ steps.get_version.outputs.VERSION }}.spdx
|
||||
|
||||
- name: upload-results
|
||||
uses: actions/upload-artifact@master
|
||||
uses: actions/upload-artifact@v3
|
||||
continue-on-error: True
|
||||
with:
|
||||
name: zephyr-${{ steps.get_version.outputs.VERSION }}.spdx
|
||||
|
||||
2
.github/workflows/stale_issue.yml
vendored
2
.github/workflows/stale_issue.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
jobs:
|
||||
stale:
|
||||
name: Find Stale issues and PRs
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
|
||||
59
.github/workflows/twister.yaml
vendored
59
.github/workflows/twister.yaml
vendored
@@ -2,29 +2,27 @@ name: Run tests with twister
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v2.7-branch
|
||||
pull_request_target:
|
||||
branches:
|
||||
- v2.7-branch
|
||||
schedule:
|
||||
# Run at 00:00 on Saturday
|
||||
- cron: '20 0 * * 6'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
twister-build-cleanup:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
twister-build-prep:
|
||||
|
||||
runs-on: zephyr_runner
|
||||
needs: twister-build-cleanup
|
||||
runs-on: zephyr-runner-linux-x64-4xlarge
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
volumes:
|
||||
- /home/runners/zephyrproject:/github/cache/zephyrproject
|
||||
- /repo-cache/zephyrproject:/github/cache/zephyrproject
|
||||
outputs:
|
||||
subset: ${{ steps.output-services.outputs.subset }}
|
||||
size: ${{ steps.output-services.outputs.size }}
|
||||
@@ -38,14 +36,16 @@ jobs:
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
BASE_REF: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Cleanup
|
||||
- name: Clone cached Zephyr repository
|
||||
if: github.event_name == 'pull_request_target'
|
||||
continue-on-error: true
|
||||
run: |
|
||||
# hotfix, until we have a better way to deal with existing data
|
||||
rm -rf zephyr zephyr-testing
|
||||
git clone --shared /github/cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: Checkout
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -102,18 +102,18 @@ jobs:
|
||||
else
|
||||
size=0
|
||||
fi
|
||||
echo "::set-output name=subset::${subset}";
|
||||
echo "::set-output name=size::${size}";
|
||||
echo "subset=${subset}" >> $GITHUB_OUTPUT
|
||||
echo "size=${size}" >> $GITHUB_OUTPUT
|
||||
|
||||
twister-build:
|
||||
runs-on: zephyr_runner
|
||||
runs-on: zephyr-runner-linux-x64-4xlarge
|
||||
needs: twister-build-prep
|
||||
if: needs.twister-build-prep.outputs.size != 0
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
volumes:
|
||||
- /home/runners/zephyrproject:/github/cache/zephyrproject
|
||||
- /repo-cache/zephyrproject:/github/cache/zephyrproject
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -128,13 +128,14 @@ jobs:
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
BASE_REF: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Cleanup
|
||||
- name: Clone cached Zephyr repository
|
||||
continue-on-error: true
|
||||
run: |
|
||||
# hotfix, until we have a better way to deal with existing data
|
||||
rm -rf zephyr zephyr-testing
|
||||
git clone --shared /github/cache/zephyrproject/zephyr .
|
||||
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -173,7 +174,7 @@ jobs:
|
||||
string(TIMESTAMP current_date "%Y-%m-%d-%H;%M;%S" UTC)
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
file(APPEND $ENV{GITHUB_OUTPUT} "repo=${repo2}\n")
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
@@ -182,8 +183,8 @@ jobs:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-${{github.event_name}}-${{ matrix.subset }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ vars.AWS_CCACHE_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_CCACHE_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
@@ -220,7 +221,7 @@ jobs:
|
||||
|
||||
- name: Upload Unit Test Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Unit Test Results (Subset ${{ matrix.subset }})
|
||||
if-no-files-found: ignore
|
||||
@@ -231,7 +232,7 @@ jobs:
|
||||
twister-test-results:
|
||||
name: "Publish Unit Tests Results"
|
||||
needs: twister-build
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
# the build-and-test job might be skipped, we don't need to run this job then
|
||||
if: success() || failure()
|
||||
|
||||
|
||||
12
.github/workflows/twister_tests.yml
vendored
12
.github/workflows/twister_tests.yml
vendored
@@ -5,12 +5,16 @@ name: Twister TestSuite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v2.7-branch
|
||||
paths:
|
||||
- 'scripts/pylib/twister/**'
|
||||
- 'scripts/twister'
|
||||
- 'scripts/tests/twister/**'
|
||||
- '.github/workflows/twister_tests.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- v2.7-branch
|
||||
paths:
|
||||
- 'scripts/pylib/twister/**'
|
||||
- 'scripts/twister'
|
||||
@@ -24,17 +28,17 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.6, 3.7, 3.8]
|
||||
os: [ubuntu-latest]
|
||||
os: [ubuntu-20.04]
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: cache-pip-linux
|
||||
if: startsWith(runner.os, 'Linux')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
|
||||
20
.github/workflows/west_cmds.yml
vendored
20
.github/workflows/west_cmds.yml
vendored
@@ -5,11 +5,15 @@ name: Zephyr West Command Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v2.7-branch
|
||||
paths:
|
||||
- 'scripts/west-commands.yml'
|
||||
- 'scripts/west_commands/**'
|
||||
- '.github/workflows/west_cmds.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- v2.7-branch
|
||||
paths:
|
||||
- 'scripts/west-commands.yml'
|
||||
- 'scripts/west_commands/**'
|
||||
@@ -22,20 +26,22 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.6, 3.7, 3.8]
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
os: [ubuntu-20.04, macos-11, windows-2022]
|
||||
exclude:
|
||||
- os: macos-latest
|
||||
- os: macos-11
|
||||
python-version: 3.6
|
||||
- os: windows-2022
|
||||
python-version: 3.6
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: cache-pip-linux
|
||||
if: startsWith(runner.os, 'Linux')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
@@ -43,7 +49,7 @@ jobs:
|
||||
${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
- name: cache-pip-mac
|
||||
if: startsWith(runner.os, 'macOS')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/Library/Caches/pip
|
||||
# Trailing '-' was just to get a different cache name
|
||||
@@ -52,7 +58,7 @@ jobs:
|
||||
${{ runner.os }}-pip-${{ matrix.python-version }}-
|
||||
- name: cache-pip-win
|
||||
if: startsWith(runner.os, 'Windows')
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~\AppData\Local\pip\Cache
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
|
||||
@@ -627,7 +627,7 @@ if(CONFIG_64BIT)
|
||||
endif()
|
||||
|
||||
if(CONFIG_TIMEOUT_64BIT)
|
||||
set(SYSCALL_SPLIT_TIMEOUT_ARG --split-type k_timeout_t)
|
||||
set(SYSCALL_SPLIT_TIMEOUT_ARG --split-type k_timeout_t --split-type k_ticks_t)
|
||||
endif()
|
||||
|
||||
add_custom_command(OUTPUT include/generated/syscall_dispatch.c ${syscall_list_h}
|
||||
|
||||
2
VERSION
2
VERSION
@@ -1,5 +1,5 @@
|
||||
VERSION_MAJOR = 2
|
||||
VERSION_MINOR = 7
|
||||
PATCHLEVEL = 2
|
||||
PATCHLEVEL = 5
|
||||
VERSION_TWEAK = 0
|
||||
EXTRAVERSION =
|
||||
|
||||
@@ -56,7 +56,7 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
* arc_cpu_wake_flag will protect arc_cpu_sp that
|
||||
* only one slave cpu can read it per time
|
||||
*/
|
||||
arc_cpu_sp = Z_THREAD_STACK_BUFFER(stack) + sz;
|
||||
arc_cpu_sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
|
||||
|
||||
arc_cpu_wake_flag = cpu_num;
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ endif # BOARD_BL5340_DVK_CPUAPP
|
||||
|
||||
config BUILD_WITH_TFM
|
||||
default y if BOARD_BL5340_DVK_CPUAPP_NS
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
if BUILD_WITH_TFM
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ config BOARD
|
||||
# force building with TF-M as the Secure Execution Environment.
|
||||
config BUILD_WITH_TFM
|
||||
default y if TRUSTED_EXECUTION_NONSECURE
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
|
||||
if GPIO
|
||||
|
||||
@@ -4,7 +4,10 @@ type: mcu
|
||||
arch: arm
|
||||
ram: 4096
|
||||
flash: 4096
|
||||
simulation: qemu
|
||||
# TFM is not supported by default in the Zephyr LTS release.
|
||||
# Excluding this board's simulator to avoid CI failures.
|
||||
#
|
||||
#simulation: qemu
|
||||
toolchain:
|
||||
- gnuarmemb
|
||||
- zephyr
|
||||
|
||||
@@ -13,6 +13,7 @@ config BOARD
|
||||
|
||||
config BUILD_WITH_TFM
|
||||
default y if BOARD_NRF5340DK_NRF5340_CPUAPP_NS
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
if BUILD_WITH_TFM
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ config BOARD
|
||||
|
||||
config BUILD_WITH_TFM
|
||||
default y if BOARD_NRF9160DK_NRF9160_NS
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
if BUILD_WITH_TFM
|
||||
|
||||
|
||||
@@ -186,8 +186,9 @@ To operate bluetooth on Nucleo WB55RG, Cortex-M0 core should be flashed with
|
||||
a valid STM32WB Coprocessor binaries (either 'Full stack' or 'HCI Layer').
|
||||
These binaries are delivered in STM32WB Cube packages, under
|
||||
Projects/STM32WB_Copro_Wireless_Binaries/STM32WB5x/
|
||||
To date, interoperability and backward compatibility has been tested and is
|
||||
guaranteed up to version 1.5 of STM32Cube package releases.
|
||||
For compatibility information with the various versions of these binaries,
|
||||
please check `modules/hal/stm32/lib/stm32wb/hci/README <https://github.com/zephyrproject-rtos/hal_stm32/blob/main/lib/stm32wb/hci/README>`__
|
||||
in the hal_stm32 repo.
|
||||
|
||||
Connections and IOs
|
||||
===================
|
||||
|
||||
@@ -518,7 +518,7 @@ function(zephyr_library_cc_option)
|
||||
string(MAKE_C_IDENTIFIER check${option} check)
|
||||
zephyr_check_compiler_flag(C ${option} ${check})
|
||||
|
||||
if(${check})
|
||||
if(${${check}})
|
||||
zephyr_library_compile_options(${option})
|
||||
endif()
|
||||
endforeach()
|
||||
@@ -1003,9 +1003,9 @@ endfunction()
|
||||
function(zephyr_check_compiler_flag lang option check)
|
||||
# Check if the option is covered by any hardcoded check before doing
|
||||
# an automated test.
|
||||
zephyr_check_compiler_flag_hardcoded(${lang} "${option}" check exists)
|
||||
zephyr_check_compiler_flag_hardcoded(${lang} "${option}" _${check} exists)
|
||||
if(exists)
|
||||
set(check ${check} PARENT_SCOPE)
|
||||
set(${check} ${_${check}} PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
@@ -1110,11 +1110,11 @@ function(zephyr_check_compiler_flag_hardcoded lang option check exists)
|
||||
# because they would produce a warning instead of an error during
|
||||
# the test. Exclude them by toolchain-specific blocklist.
|
||||
if((${lang} STREQUAL CXX) AND ("${option}" IN_LIST CXX_EXCLUDED_OPTIONS))
|
||||
set(check 0 PARENT_SCOPE)
|
||||
set(exists 1 PARENT_SCOPE)
|
||||
set(${check} 0 PARENT_SCOPE)
|
||||
set(${exists} 1 PARENT_SCOPE)
|
||||
else()
|
||||
# There does not exist a hardcoded check for this option.
|
||||
set(exists 0 PARENT_SCOPE)
|
||||
set(${exists} 0 PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction(zephyr_check_compiler_flag_hardcoded)
|
||||
|
||||
@@ -1862,7 +1862,7 @@ function(check_set_linker_property)
|
||||
zephyr_check_compiler_flag(C "" ${check})
|
||||
set(CMAKE_REQUIRED_FLAGS ${SAVED_CMAKE_REQUIRED_FLAGS})
|
||||
|
||||
if(${check})
|
||||
if(${${check}})
|
||||
set_property(TARGET ${LINKER_PROPERTY_TARGET} ${APPEND} PROPERTY ${property} ${option})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
include(${ZEPHYR_BASE}/cmake/toolchain/zephyr/host-tools.cmake)
|
||||
if(ZEPHYR_SDK_HOST_TOOLS)
|
||||
include(${ZEPHYR_BASE}/cmake/toolchain/zephyr/host-tools.cmake)
|
||||
endif()
|
||||
|
||||
# dtc is an optional dependency
|
||||
find_program(
|
||||
|
||||
@@ -163,12 +163,23 @@ endforeach()
|
||||
unset(EXTRA_KCONFIG_OPTIONS)
|
||||
get_cmake_property(cache_variable_names CACHE_VARIABLES)
|
||||
foreach (name ${cache_variable_names})
|
||||
if("${name}" MATCHES "^CONFIG_")
|
||||
if("${name}" MATCHES "^CLI_CONFIG_")
|
||||
# Variable was set by user in earlier invocation, let's append to extra
|
||||
# config unless a new value has been given.
|
||||
string(REGEX REPLACE "^CLI_" "" org_name ${name})
|
||||
if(NOT DEFINED ${org_name})
|
||||
set(EXTRA_KCONFIG_OPTIONS
|
||||
"${EXTRA_KCONFIG_OPTIONS}\n${org_name}=${${name}}"
|
||||
)
|
||||
endif()
|
||||
elseif("${name}" MATCHES "^CONFIG_")
|
||||
# When a cache variable starts with 'CONFIG_', it is assumed to be
|
||||
# a Kconfig symbol assignment from the CMake command line.
|
||||
set(EXTRA_KCONFIG_OPTIONS
|
||||
"${EXTRA_KCONFIG_OPTIONS}\n${name}=${${name}}"
|
||||
)
|
||||
set(CLI_${name} "${${name}}")
|
||||
list(APPEND cli_config_list ${name})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
@@ -296,21 +307,20 @@ add_custom_target(config-twister DEPENDS ${DOTCONFIG})
|
||||
# Remove the CLI Kconfig symbols from the namespace and
|
||||
# CMakeCache.txt. If the symbols end up in DOTCONFIG they will be
|
||||
# re-introduced to the namespace through 'import_kconfig'.
|
||||
foreach (name ${cache_variable_names})
|
||||
if("${name}" MATCHES "^CONFIG_")
|
||||
unset(${name})
|
||||
unset(${name} CACHE)
|
||||
endif()
|
||||
foreach (name ${cli_config_list})
|
||||
unset(${name})
|
||||
unset(${name} CACHE)
|
||||
endforeach()
|
||||
|
||||
# Parse the lines prefixed with CONFIG_ in the .config file from Kconfig
|
||||
import_kconfig(CONFIG_ ${DOTCONFIG})
|
||||
|
||||
# Re-introduce the CLI Kconfig symbols that survived
|
||||
foreach (name ${cache_variable_names})
|
||||
if("${name}" MATCHES "^CONFIG_")
|
||||
if(DEFINED ${name})
|
||||
set(${name} ${${name}} CACHE STRING "")
|
||||
endif()
|
||||
# Cache the CLI Kconfig symbols that survived through Kconfig, prefixed with CLI_.
|
||||
# Remove those who might have changed compared to earlier runs, if they no longer appears.
|
||||
foreach (name ${cli_config_list})
|
||||
if(DEFINED ${name})
|
||||
set(CLI_${name} ${CLI_${name}} CACHE INTERNAL "")
|
||||
else()
|
||||
unset(CLI_${name} CACHE)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
# Zephyr 0.11 SDK Toolchain
|
||||
|
||||
# Copyright (c) 2020 Linaro Limited.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
config TOOLCHAIN_ZEPHYR_0_11
|
||||
def_bool y
|
||||
select HAS_NEWLIB_LIBC_NANO if (ARC || ARM || RISCV)
|
||||
@@ -1,34 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set(TOOLCHAIN_HOME ${ZEPHYR_SDK_INSTALL_DIR})
|
||||
|
||||
set(COMPILER gcc)
|
||||
set(LINKER ld)
|
||||
set(BINTOOLS gnu)
|
||||
|
||||
# Find some toolchain that is distributed with this particular SDK
|
||||
|
||||
file(GLOB toolchain_paths
|
||||
LIST_DIRECTORIES true
|
||||
${TOOLCHAIN_HOME}/xtensa/*/*-zephyr-elf
|
||||
${TOOLCHAIN_HOME}/*-zephyr-elf
|
||||
${TOOLCHAIN_HOME}/*-zephyr-eabi
|
||||
)
|
||||
|
||||
if(toolchain_paths)
|
||||
list(GET toolchain_paths 0 some_toolchain_path)
|
||||
|
||||
get_filename_component(one_toolchain_root "${some_toolchain_path}" DIRECTORY)
|
||||
get_filename_component(one_toolchain "${some_toolchain_path}" NAME)
|
||||
|
||||
set(CROSS_COMPILE_TARGET ${one_toolchain})
|
||||
set(SYSROOT_TARGET ${one_toolchain})
|
||||
endif()
|
||||
|
||||
if(NOT CROSS_COMPILE_TARGET)
|
||||
message(FATAL_ERROR "Unable to find 'x86_64-zephyr-elf' or any other architecture in ${TOOLCHAIN_HOME}")
|
||||
endif()
|
||||
|
||||
set(CROSS_COMPILE ${one_toolchain_root}/${CROSS_COMPILE_TARGET}/bin/${CROSS_COMPILE_TARGET}-)
|
||||
set(SYSROOT_DIR ${one_toolchain_root}/${SYSROOT_TARGET}/${SYSROOT_TARGET})
|
||||
set(TOOLCHAIN_HAS_NEWLIB ON CACHE BOOL "True if toolchain supports newlib")
|
||||
@@ -1,12 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set(HOST_TOOLS_HOME ${ZEPHYR_SDK_INSTALL_DIR}/sysroots/${TOOLCHAIN_ARCH}-pokysdk-linux)
|
||||
|
||||
# Path used for searching by the find_*() functions, with appropriate
|
||||
# suffixes added. Ensures that the SDK's host tools will be found when
|
||||
# we call, e.g. find_program(QEMU qemu-system-x86)
|
||||
list(APPEND CMAKE_PREFIX_PATH ${HOST_TOOLS_HOME}/usr)
|
||||
|
||||
# TODO: Use find_* somehow for these as well?
|
||||
set_ifndef(QEMU_BIOS ${HOST_TOOLS_HOME}/usr/share/qemu)
|
||||
set_ifndef(OPENOCD_DEFAULT_PATH ${HOST_TOOLS_HOME}/usr/share/openocd/scripts)
|
||||
@@ -1,53 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set(CROSS_COMPILE_TARGET_arm arm-zephyr-eabi)
|
||||
set(CROSS_COMPILE_TARGET_arm64 aarch64-zephyr-elf)
|
||||
set(CROSS_COMPILE_TARGET_nios2 nios2-zephyr-elf)
|
||||
set(CROSS_COMPILE_TARGET_riscv riscv64-zephyr-elf)
|
||||
set(CROSS_COMPILE_TARGET_mips mipsel-zephyr-elf)
|
||||
set(CROSS_COMPILE_TARGET_xtensa xtensa-zephyr-elf)
|
||||
set(CROSS_COMPILE_TARGET_arc arc-zephyr-elf)
|
||||
set(CROSS_COMPILE_TARGET_x86 x86_64-zephyr-elf)
|
||||
set(CROSS_COMPILE_TARGET_sparc sparc-zephyr-elf)
|
||||
|
||||
set(CROSS_COMPILE_TARGET ${CROSS_COMPILE_TARGET_${ARCH}})
|
||||
set(SYSROOT_TARGET ${CROSS_COMPILE_TARGET})
|
||||
|
||||
if("${ARCH}" STREQUAL "xtensa")
|
||||
# Xtensa GCC needs a different toolchain per SOC
|
||||
if("${SOC_SERIES}" STREQUAL "cavs_v15")
|
||||
set(SR_XT_TC_SOC intel_apl_adsp)
|
||||
elseif("${SOC_SERIES}" STREQUAL "cavs_v18")
|
||||
set(SR_XT_TC_SOC intel_s1000)
|
||||
elseif("${SOC_SERIES}" STREQUAL "cavs_v20")
|
||||
set(SR_XT_TC_SOC intel_s1000)
|
||||
elseif("${SOC_SERIES}" STREQUAL "cavs_v25")
|
||||
set(SR_XT_TC_SOC intel_s1000)
|
||||
elseif("${SOC_SERIES}" STREQUAL "baytrail_adsp")
|
||||
set(SR_XT_TC_SOC intel_byt_adsp)
|
||||
elseif("${SOC_SERIES}" STREQUAL "broadwell_adsp")
|
||||
set(SR_XT_TC_SOC intel_bdw_adsp)
|
||||
elseif("${SOC_SERIES}" STREQUAL "imx8")
|
||||
set(SR_XT_TC_SOC nxp_imx_adsp)
|
||||
elseif("${SOC_SERIES}" STREQUAL "imx8m")
|
||||
set(SR_XT_TC_SOC nxp_imx8m_adsp)
|
||||
else()
|
||||
message(FATAL_ERROR "Not compiler set for SOC_SERIES ${SOC_SERIES}")
|
||||
endif()
|
||||
set(SYSROOT_DIR ${TOOLCHAIN_HOME}/xtensa/${SR_XT_TC_SOC}/${SYSROOT_TARGET})
|
||||
set(CROSS_COMPILE ${TOOLCHAIN_HOME}/xtensa/${SR_XT_TC_SOC}/${CROSS_COMPILE_TARGET}/bin/${CROSS_COMPILE_TARGET}-)
|
||||
else()
|
||||
# Non-Xtensa SDK toolchains follow a simpler convention
|
||||
set(SYSROOT_DIR ${TOOLCHAIN_HOME}/${SYSROOT_TARGET}/${SYSROOT_TARGET})
|
||||
set(CROSS_COMPILE ${TOOLCHAIN_HOME}/${CROSS_COMPILE_TARGET}/bin/${CROSS_COMPILE_TARGET}-)
|
||||
endif()
|
||||
|
||||
if("${ARCH}" STREQUAL "x86")
|
||||
if(CONFIG_X86_64)
|
||||
list(APPEND TOOLCHAIN_C_FLAGS -m64)
|
||||
list(APPEND TOOLCHAIN_LD_FLAGS -m64)
|
||||
else()
|
||||
list(APPEND TOOLCHAIN_C_FLAGS -m32)
|
||||
list(APPEND TOOLCHAIN_LD_FLAGS -m32)
|
||||
endif()
|
||||
endif()
|
||||
@@ -1,13 +1,7 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
if(${SDK_VERSION} VERSION_LESS_EQUAL 0.11.2)
|
||||
# For backward compatibility with 0.11.1 and 0.11.2
|
||||
# we need to source files from Zephyr repo
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/${SDK_MAJOR_MINOR}/generic.cmake)
|
||||
include(${ZEPHYR_SDK_INSTALL_DIR}/cmake/zephyr/generic.cmake)
|
||||
|
||||
set(TOOLCHAIN_KCONFIG_DIR ${CMAKE_CURRENT_LIST_DIR}/${SDK_MAJOR_MINOR})
|
||||
else()
|
||||
include(${ZEPHYR_SDK_INSTALL_DIR}/cmake/zephyr/generic.cmake)
|
||||
set(TOOLCHAIN_KCONFIG_DIR ${ZEPHYR_SDK_INSTALL_DIR}/cmake/zephyr)
|
||||
|
||||
set(TOOLCHAIN_KCONFIG_DIR ${ZEPHYR_SDK_INSTALL_DIR}/cmake/zephyr)
|
||||
endif()
|
||||
message(STATUS "Found toolchain: zephyr ${SDK_VERSION} (${ZEPHYR_SDK_INSTALL_DIR})")
|
||||
|
||||
@@ -1,55 +1,5 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# This is the minimum required version for Zephyr to work (Old style)
|
||||
set(REQUIRED_SDK_VER 0.11.1)
|
||||
cmake_host_system_information(RESULT TOOLCHAIN_ARCH QUERY OS_PLATFORM)
|
||||
include(${ZEPHYR_SDK_INSTALL_DIR}/cmake/zephyr/host-tools.cmake)
|
||||
|
||||
if(NOT DEFINED ZEPHYR_SDK_INSTALL_DIR)
|
||||
# Until https://github.com/zephyrproject-rtos/zephyr/issues/4912 is
|
||||
# resolved we use ZEPHYR_SDK_INSTALL_DIR to determine whether the user
|
||||
# wants to use the Zephyr SDK or not.
|
||||
return()
|
||||
endif()
|
||||
|
||||
# Cache the Zephyr SDK install dir.
|
||||
set(ZEPHYR_SDK_INSTALL_DIR ${ZEPHYR_SDK_INSTALL_DIR} CACHE PATH "Zephyr SDK install directory")
|
||||
|
||||
if(NOT DEFINED SDK_VERSION)
|
||||
if(ZEPHYR_TOOLCHAIN_VARIANT AND ZEPHYR_SDK_INSTALL_DIR)
|
||||
# Manual detection for Zephyr SDK 0.11.1 and 0.11.2 for backward compatibility.
|
||||
set(sdk_version_path ${ZEPHYR_SDK_INSTALL_DIR}/sdk_version)
|
||||
if(NOT (EXISTS ${sdk_version_path}))
|
||||
message(FATAL_ERROR
|
||||
"The file '${ZEPHYR_SDK_INSTALL_DIR}/sdk_version' was not found. \
|
||||
Is ZEPHYR_SDK_INSTALL_DIR=${ZEPHYR_SDK_INSTALL_DIR} misconfigured?")
|
||||
endif()
|
||||
|
||||
# Read version as published by the SDK
|
||||
file(READ ${sdk_version_path} SDK_VERSION_PRE1)
|
||||
# Remove any pre-release data, for example 0.10.0-beta4 -> 0.10.0
|
||||
string(REGEX REPLACE "-.*" "" SDK_VERSION_PRE2 ${SDK_VERSION_PRE1})
|
||||
# Strip any trailing spaces/newlines from the version string
|
||||
string(STRIP ${SDK_VERSION_PRE2} SDK_VERSION)
|
||||
string(REGEX MATCH "([0-9]*).([0-9]*)" SDK_MAJOR_MINOR ${SDK_VERSION})
|
||||
|
||||
string(REGEX MATCH "([0-9]+)\.([0-9]+)\.([0-9]+)" SDK_MAJOR_MINOR_MICRO ${SDK_VERSION})
|
||||
|
||||
#at least 0.0.0
|
||||
if(NOT SDK_MAJOR_MINOR_MICRO)
|
||||
message(FATAL_ERROR "sdk version: ${SDK_MAJOR_MINOR_MICRO} improper format.
|
||||
Expected format: x.y.z
|
||||
Check whether the Zephyr SDK was installed correctly.
|
||||
")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
message(STATUS "Using toolchain: zephyr ${SDK_VERSION} (${ZEPHYR_SDK_INSTALL_DIR})")
|
||||
|
||||
if(${SDK_VERSION} VERSION_LESS_EQUAL 0.11.2)
|
||||
# For backward compatibility with 0.11.1 and 0.11.2
|
||||
# we need to source files from Zephyr repo
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/${SDK_MAJOR_MINOR}/host-tools.cmake)
|
||||
else()
|
||||
include(${ZEPHYR_SDK_INSTALL_DIR}/cmake/zephyr/host-tools.cmake)
|
||||
endif()
|
||||
message(STATUS "Found host-tools: zephyr ${SDK_VERSION} (${ZEPHYR_SDK_INSTALL_DIR})")
|
||||
|
||||
@@ -1,18 +1,3 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
if(${SDK_VERSION} VERSION_LESS_EQUAL 0.11.2)
|
||||
# For backward compatibility with 0.11.1 and 0.11.2
|
||||
# we need to source files from Zephyr repo
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/${SDK_MAJOR_MINOR}/target.cmake)
|
||||
elseif(("${ARCH}" STREQUAL "sparc") AND (${SDK_VERSION} VERSION_LESS 0.12))
|
||||
# SDK 0.11.3, 0.11.4 does not have SPARC target support.
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/${SDK_MAJOR_MINOR}/target.cmake)
|
||||
else()
|
||||
include(${ZEPHYR_SDK_INSTALL_DIR}/cmake/zephyr/target.cmake)
|
||||
|
||||
# Workaround, FIXME: Waiting for new SDK.
|
||||
if("${ARCH}" STREQUAL "xtensa")
|
||||
set(SYSROOT_DIR ${TOOLCHAIN_HOME}/xtensa/${SOC_TOOLCHAIN_NAME}/${SYSROOT_TARGET})
|
||||
set(CROSS_COMPILE ${TOOLCHAIN_HOME}/xtensa/${SOC_TOOLCHAIN_NAME}/${CROSS_COMPILE_TARGET}/bin/${CROSS_COMPILE_TARGET}-)
|
||||
endif()
|
||||
endif()
|
||||
include(${ZEPHYR_SDK_INSTALL_DIR}/cmake/zephyr/target.cmake)
|
||||
|
||||
@@ -90,10 +90,6 @@ if(NOT DEFINED ZEPHYR_TOOLCHAIN_VARIANT)
|
||||
if (NOT Zephyr-sdk_CONSIDERED_VERSIONS)
|
||||
set(error_msg "ZEPHYR_TOOLCHAIN_VARIANT not specified and no Zephyr SDK is installed.\n")
|
||||
string(APPEND error_msg "Please set ZEPHYR_TOOLCHAIN_VARIANT to the toolchain to use or install the Zephyr SDK.")
|
||||
|
||||
if(NOT ZEPHYR_TOOLCHAIN_VARIANT AND NOT ZEPHYR_SDK_INSTALL_DIR)
|
||||
set(error_note "Note: If you are using Zephyr SDK 0.11.1 or 0.11.2, remember to set ZEPHYR_SDK_INSTALL_DIR and ZEPHYR_TOOLCHAIN_VARIANT")
|
||||
endif()
|
||||
else()
|
||||
# Note: When CMake mimimun version becomes >= 3.17, change this loop into:
|
||||
# foreach(version config IN ZIP_LISTS Zephyr-sdk_CONSIDERED_VERSIONS Zephyr-sdk_CONSIDERED_CONFIGS)
|
||||
@@ -116,11 +112,17 @@ if(NOT DEFINED ZEPHYR_TOOLCHAIN_VARIANT)
|
||||
message(FATAL_ERROR "${error_msg}
|
||||
The Zephyr SDK can be downloaded from:
|
||||
https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v${TOOLCHAIN_ZEPHYR_MINIMUM_REQUIRED_VERSION}/zephyr-sdk-${TOOLCHAIN_ZEPHYR_MINIMUM_REQUIRED_VERSION}-setup.run
|
||||
${error_note}
|
||||
")
|
||||
|
||||
endif()
|
||||
|
||||
if(DEFINED ZEPHYR_SDK_INSTALL_DIR)
|
||||
# Cache the Zephyr SDK install dir.
|
||||
set(ZEPHYR_SDK_INSTALL_DIR ${ZEPHYR_SDK_INSTALL_DIR} CACHE PATH "Zephyr SDK install directory")
|
||||
# Use the Zephyr SDK host-tools.
|
||||
set(ZEPHYR_SDK_HOST_TOOLS TRUE)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SCRIPT_MODE_FILE)
|
||||
if("${FORMAT}" STREQUAL "json")
|
||||
set(json "{\"ZEPHYR_TOOLCHAIN_VARIANT\" : \"${ZEPHYR_TOOLCHAIN_VARIANT}\", ")
|
||||
|
||||
@@ -2,24 +2,377 @@
|
||||
|
||||
.. _zephyr_2.7:
|
||||
|
||||
.. _zephyr_2.7.1:
|
||||
.. _zephyr_2.7.5:
|
||||
|
||||
Zephyr 2.7.1
|
||||
Zephyr 2.7.5
|
||||
####################
|
||||
|
||||
This is an LTS maintenance release with fixes.
|
||||
|
||||
Issues Fixed
|
||||
************
|
||||
|
||||
These GitHub issues were addressed since the previous 2.7.4 tagged
|
||||
release:
|
||||
|
||||
.. comment List derived from GitHub Issue query: ...
|
||||
* :github:`issuenumber` - issue title
|
||||
|
||||
* :github:`41111` - utils: tmcvt: fix integer overflow after 6.4 days with ``gettimeofday()`` and ``z_tmcvt()``
|
||||
* :github:`51663` - tests: kernel: increase coverage for kernel and mmu tests
|
||||
* :github:`53124` - bmake: fix argument passing in ``zephyr_check_compiler_flag()`` cmake function
|
||||
* :github:`53315` - net: tcp: fix possible underflow in ``tcp_flags()``.
|
||||
* :github:`53981` - scripts: fixes for ``gen_syscalls`` and ``gen_app_partitions``
|
||||
* :github:`53983` - init: correct early init time calls to ``k_current_get()`` when TLS is enabled
|
||||
* :github:`54140` - net: fix BUS FAULT when running nmap towards echo_async sample
|
||||
* :github:`54325` - coredump: support out-of-tree coredump backend definition
|
||||
* :github:`54386` - kernel: correct SMP scheduling with more than 2 CPUs
|
||||
* :github:`54527` - tests: kernel: remove faulty test from tests/kernel/poll
|
||||
* :github:`55019` - bluetooth: initialize backport of #54905 failed
|
||||
* :github:`55068` - net: ipv6: validate arguments in ``net_if_ipv6_set_reachable_time()``
|
||||
* :github:`55069` - net: core: ``net pkt`` shell command missing input validation
|
||||
* :github:`55323` - logging: fix userspace runtime filtering
|
||||
* :github:`55490` - cxx: fix compile error in C++ project for bad flags ``-Wno-pointer-sign`` and ``-Werror=implicit-int``
|
||||
* :github:`56071` - security: MbedTLS: update to v2.28.3
|
||||
* :github:`56729` - posix: SCHED_RR valid thread priorities
|
||||
* :github:`57210` - drivers: pcie: endpoint: pcie_ep_iproc: correct use of optional devicetree binding
|
||||
* :github:`57419` - tests: dma: support 64-bit addressing in tests
|
||||
* :github:`57710` - posix: support building eventfd on arm-clang
|
||||
|
||||
mbedTLS
|
||||
*******
|
||||
|
||||
Moving mbedTLS to 2.28.x series (2.28.3 precisely). This is a LTS release
|
||||
that will be supported with bug fixes and security fixes until the end of 2024.
|
||||
|
||||
Detailed information can be found in:
|
||||
https://github.com/Mbed-TLS/mbedtls/releases/tag/v2.28.3
|
||||
https://github.com/zephyrproject-rtos/zephyr/issues/56071
|
||||
|
||||
This version is incompatible with TF-M and because of this TF-M is no longer
|
||||
supported in Zephyr LTS. If TF-M is required it can be manually added back
|
||||
changing the mbedTLS revision on ``west.yaml`` to the previous one
|
||||
(5765cb7f75a9973ae9232d438e361a9d7bbc49e7). This should be carefully assessed
|
||||
by a security expert to ensure that the know vulnerabilities in that version
|
||||
don't affect the product.
|
||||
|
||||
Vulnerabilities addressed in this update:
|
||||
|
||||
* MBEDTLS_AESNI_C, which is enabled by default, was silently ignored on
|
||||
builds that couldn't compile the GCC-style assembly implementation
|
||||
(most notably builds with Visual Studio), leaving them vulnerable to
|
||||
timing side-channel attacks. There is now an intrinsics-based AES-NI
|
||||
implementation as a fallback for when the assembly one cannot be used.
|
||||
|
||||
* Fix potential heap buffer overread and overwrite in DTLS if
|
||||
MBEDTLS_SSL_DTLS_CONNECTION_ID is enabled and
|
||||
MBEDTLS_SSL_CID_IN_LEN_MAX > 2 * MBEDTLS_SSL_CID_OUT_LEN_MAX.
|
||||
|
||||
* An adversary with access to precise enough information about memory
|
||||
accesses (typically, an untrusted operating system attacking a secure
|
||||
enclave) could recover an RSA private key after observing the victim
|
||||
performing a single private-key operation if the window size used for the
|
||||
exponentiation was 3 or smaller. Found and reported by Zili KOU,
|
||||
Wenjian HE, Sharad Sinha, and Wei ZHANG. See "Cache Side-channel Attacks
|
||||
and Defenses of the Sliding Window Algorithm in TEEs" - Design, Automation
|
||||
and Test in Europe 2023.
|
||||
|
||||
* Zeroize dynamically-allocated buffers used by the PSA Crypto key storage
|
||||
module before freeing them. These buffers contain secret key material, and
|
||||
could thus potentially leak the key through freed heap.
|
||||
|
||||
* Fix a potential heap buffer overread in TLS 1.2 server-side when
|
||||
MBEDTLS_USE_PSA_CRYPTO is enabled, an opaque key (created with
|
||||
mbedtls_pk_setup_opaque()) is provisioned, and a static ECDH ciphersuite
|
||||
is selected. This may result in an application crash or potentially an
|
||||
information leak.
|
||||
|
||||
* Fix a buffer overread in DTLS ClientHello parsing in servers with
|
||||
MBEDTLS_SSL_DTLS_CLIENT_PORT_REUSE enabled. An unauthenticated client
|
||||
or a man-in-the-middle could cause a DTLS server to read up to 255 bytes
|
||||
after the end of the SSL input buffer. The buffer overread only happens
|
||||
when MBEDTLS_SSL_IN_CONTENT_LEN is less than a threshold that depends on
|
||||
the exact configuration: 258 bytes if using mbedtls_ssl_cookie_check(),
|
||||
and possibly up to 571 bytes with a custom cookie check function.
|
||||
Reported by the Cybeats PSI Team.
|
||||
|
||||
* Zeroize several intermediate variables used to calculate the expected
|
||||
value when verifying a MAC or AEAD tag. This hardens the library in
|
||||
case the value leaks through a memory disclosure vulnerability. For
|
||||
example, a memory disclosure vulnerability could have allowed a
|
||||
man-in-the-middle to inject fake ciphertext into a DTLS connection.
|
||||
|
||||
* In psa_cipher_generate_iv() and psa_cipher_encrypt(), do not read back
|
||||
from the output buffer. This fixes a potential policy bypass or decryption
|
||||
oracle vulnerability if the output buffer is in memory that is shared with
|
||||
an untrusted application.
|
||||
|
||||
* Fix a double-free that happened after mbedtls_ssl_set_session() or
|
||||
mbedtls_ssl_get_session() failed with MBEDTLS_ERR_SSL_ALLOC_FAILED
|
||||
(out of memory). After that, calling mbedtls_ssl_session_free()
|
||||
and mbedtls_ssl_free() would cause an internal session buffer to
|
||||
be free()'d twice.
|
||||
|
||||
* Fix a bias in the generation of finite-field Diffie-Hellman-Merkle (DHM)
|
||||
private keys and of blinding values for DHM and elliptic curves (ECP)
|
||||
computations.
|
||||
|
||||
* Fix a potential side channel vulnerability in ECDSA ephemeral key generation.
|
||||
An adversary who is capable of very precise timing measurements could
|
||||
learn partial information about the leading bits of the nonce used for the
|
||||
signature, allowing the recovery of the private key after observing a
|
||||
large number of signature operations. This completes a partial fix in
|
||||
Mbed TLS 2.20.0.
|
||||
|
||||
Security Vulnerability Related
|
||||
******************************
|
||||
|
||||
The following security vulnerabilities (CVEs) were addressed in this
|
||||
release:
|
||||
|
||||
* (N/A)
|
||||
* CVE-2023-0397: `Zephyr project bug tracker GHSA-wc2h-h868-q7hj
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-wc2h-h868-q7hj>`_
|
||||
|
||||
* CVE-2023-0779: `Zephyr project bug tracker GHSA-9xj8-6989-r549
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-9xj8-6989-r549>`_
|
||||
|
||||
More detailed information can be found in:
|
||||
https://docs.zephyrproject.org/latest/security/vulnerabilities.html
|
||||
|
||||
.. _zephyr_2.7.4:
|
||||
|
||||
Zephyr 2.7.4
|
||||
####################
|
||||
|
||||
This is an LTS maintenance release with fixes.
|
||||
|
||||
Issues Fixed
|
||||
************
|
||||
|
||||
These GitHub issues were addressed since the previous 2.7.3 tagged
|
||||
release:
|
||||
|
||||
.. comment List derived from GitHub Issue query: ...
|
||||
* :github:`issuenumber` - issue title
|
||||
|
||||
* :github:`25417` - net: socket: socketpair: check for ISR context
|
||||
* :github:`41012` - irq_enable() doesn’t support enabling NVIC IRQ number more than 127
|
||||
* :github:`44070` - west spdx TypeError: 'NoneType' object is not iterable
|
||||
* :github:`46072` - subsys/hawkBit: Debug log error in hawkbit example "CONFIG_LOG_STRDUP_MAX_STRING"
|
||||
* :github:`48056` - Possible null pointer dereference after k_mutex_lock times out
|
||||
* :github:`49102` - hawkbit - dns name randomly not resolved
|
||||
* :github:`49139` - can't run west or DT tests on windows / py 3.6
|
||||
* :github:`49564` - Newer versions of pylink are not supported in latest zephyr 2.7 release
|
||||
* :github:`49569` - Backport cmake string cache fix to v2.7 branch
|
||||
* :github:`50221` - tests: debug: test case subsys/debug/coredump failed on acrn_ehl_crb on branch v2.7
|
||||
* :github:`50467` - Possible memory corruption on ARC when userspace is enabled
|
||||
* :github:`50468` - Incorrect Z_THREAD_STACK_BUFFER in arch_start_cpu for Xtensa
|
||||
* :github:`50961` - drivers: counter: Update counter_set_channel_alarm documentation
|
||||
* :github:`51714` - Bluetooth: Application with buffer that cannot unref it in disconnect handler leads to advertising issues
|
||||
* :github:`51776` - POSIX API is not portable across arches
|
||||
* :github:`52247` - mgmt: mcumgr: image upload, then image erase, then image upload does not restart upload from start
|
||||
* :github:`52517` - lib: posix: sleep() does not return the number of seconds left if interrupted
|
||||
* :github:`52518` - lib: posix: usleep() does not follow the POSIX spec
|
||||
* :github:`52542` - lib: posix: make sleep() and usleep() standards-compliant
|
||||
* :github:`52591` - mcumgr user data size out of sync with net buffer user data size
|
||||
* :github:`52829` - kernel/sched: Fix SMP race on pend
|
||||
* :github:`53088` - Unable to chage initialization priority of logging subsys
|
||||
|
||||
Security Vulnerability Related
|
||||
******************************
|
||||
|
||||
The following security vulnerabilities (CVEs) were addressed in this
|
||||
release:
|
||||
|
||||
* CVE-2022-2741: `Zephyr project bug tracker GHSA-hx5v-j59q-c3j8
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-hx5v-j59q-c3j8>`_
|
||||
|
||||
* CVE-2022-1841: `Zephyr project bug tracker GHSA-5c3j-p8cr-2pgh
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-5c3j-p8cr-2pgh>`_
|
||||
|
||||
More detailed information can be found in:
|
||||
https://docs.zephyrproject.org/latest/security/vulnerabilities.html
|
||||
|
||||
.. _zephyr_2.7.3:
|
||||
|
||||
Zephyr 2.7.3
|
||||
####################
|
||||
|
||||
This is an LTS maintenance release with fixes.
|
||||
|
||||
Issues Fixed
|
||||
************
|
||||
|
||||
These GitHub issues were addressed since the previous 2.7.2 tagged
|
||||
release:
|
||||
|
||||
.. comment List derived from GitHub Issue query: ...
|
||||
* :github:`issuenumber` - issue title
|
||||
|
||||
* :github:`39882` - Bluetooth Host qualification on 2.7 branch
|
||||
* :github:`41074` - can_mcan_send sends corrupted CAN frames with a byte-by-byte memcpy implementation
|
||||
* :github:`43479` - Bluetooth: Controller: Fix per adv scheduling issue
|
||||
* :github:`43694` - drivers: spi: stm32 spi with dma must enable cs after periph
|
||||
* :github:`44089` - logging: shell backend: null-deref when logs are dropped
|
||||
* :github:`45341` - Add new EHL SKUs for IBECC
|
||||
* :github:`45529` - GdbStub get_mem_region bugfix
|
||||
* :github:`46621` - drivers: i2c: Infinite recursion in driver unregister function
|
||||
* :github:`46698` - sm351 driver faults when using global thread
|
||||
* :github:`46706` - add missing checks for segment number
|
||||
* :github:`46757` - Bluetooth: Controller: Missing validation of unsupported PHY when performing PHY update
|
||||
* :github:`46807` - lib: posix: semaphore: use consistent timebase in sem_timedwait
|
||||
* :github:`46822` - L2CAP disconnected packet timing in ecred reconf function
|
||||
* :github:`46994` - Incorrect Xtensa toolchain path resolution
|
||||
* :github:`47356` - cpp: global static object initialisation may fail for MMU and MPU platforms
|
||||
* :github:`47609` - posix: pthread: descriptor leak with pthread_join
|
||||
* :github:`47955` - drivers: can: various RTR fixes
|
||||
* :github:`48249` - boards: nucleo_wb55rg: documentation BLE binary compatibility issue
|
||||
* :github:`48271` - net: Possible net_pkt leak in ipv6 multicast forwarding
|
||||
|
||||
Security Vulnerability Related
|
||||
******************************
|
||||
|
||||
The following security vulnerabilities (CVEs) were addressed in this
|
||||
release:
|
||||
|
||||
* CVE-2022-2741: Under embargo until 2022-10-14
|
||||
|
||||
* CVE-2022-1042: `Zephyr project bug tracker GHSA-j7v7-w73r-mm5x
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-j7v7-w73r-mm5x>`_
|
||||
|
||||
* CVE-2022-1041: `Zephyr project bug tracker GHSA-p449-9hv9-pj38
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-p449-9hv9-pj38>`_
|
||||
|
||||
* CVE-2021-3966: `Zephyr project bug tracker GHSA-hfxq-3w6x-fv2m
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-hfxq-3w6x-fv2m>`_
|
||||
|
||||
More detailed information can be found in:
|
||||
https://docs.zephyrproject.org/latest/security/vulnerabilities.html
|
||||
|
||||
.. _zephyr_2.7.2:
|
||||
|
||||
Zephyr 2.7.2
|
||||
####################
|
||||
|
||||
This is an LTS maintenance release with fixes.
|
||||
|
||||
Issues Fixed
|
||||
************
|
||||
|
||||
These GitHub issues were addressed since the previous 2.7.1 tagged
|
||||
release:
|
||||
|
||||
.. comment List derived from GitHub Issue query: ...
|
||||
* :github:`issuenumber` - issue title
|
||||
|
||||
* :github:`23419` - posix: clock: No thread safety clock_getime / clock_settime
|
||||
* :github:`30367` - TCP2 does not send our MSS to peer
|
||||
* :github:`37389` - nucleo_g0b1re: Swapping image in mcuboot results in hard fault and softbricks the device
|
||||
* :github:`38268` - Multiple defects in "Multi Producer Single Consumer Packet Buffer" library
|
||||
* :github:`38576` - net shell: self-connecting to TCP might lead to a crash
|
||||
* :github:`39184` - HawkBit hash mismatch
|
||||
* :github:`39242` - net: sockets: Zephyr Fatal in dns_resolve_cb if dns request was attempted in offline state
|
||||
* :github:`39399` - linker: Missing align __itcm_load_start / __dtcm_data_load_start linker symbols
|
||||
* :github:`39608` - stm32: lpuart: 9600 baudrate doesn't work
|
||||
* :github:`39609` - spi: slave: division by zero in timeout calculation
|
||||
* :github:`39660` - poll() not notified when a TLS/TCP connection is closed without TLS close_notify
|
||||
* :github:`39687` - sensor: qdec_nrfx: PM callback has incorrect signature
|
||||
* :github:`39774` - modem: uart mux reading optimization never used
|
||||
* :github:`39882` - Bluetooth Host qualification on 2.7 branch
|
||||
* :github:`40163` - Use correct clock frequency for systick+DWT
|
||||
* :github:`40464` - Dereferencing NULL with getsockname() on TI Simplelink Platform
|
||||
* :github:`40578` - MODBUS RS-485 transceiver support broken on several platforms due to DE race condition
|
||||
* :github:`40614` - poll: the code judgment condition is always true
|
||||
* :github:`40640` - drivers: usb_dc_native_posix: segfault when using composite USB device
|
||||
* :github:`40730` - More power supply modes on STM32H7XX
|
||||
* :github:`40775` - stm32: multi-threading broken after #40173
|
||||
* :github:`40795` - Timer signal thread execution loop break SMP on ARM64
|
||||
* :github:`40925` - mesh_badge not working reel_board_v2
|
||||
* :github:`40985` - net: icmpv6: Add support for Route Info option in Router Advertisement
|
||||
* :github:`41026` - LoRa: sx126x: DIO1 interrupt left enabled in sleep mode
|
||||
* :github:`41077` - console: gsm_mux: could not send more than 128 bytes of data on dlci
|
||||
* :github:`41089` - power modes for STM32H7
|
||||
* :github:`41095` - libc: newlib: 'gettimeofday' causes stack overflow on non-POSIX builds
|
||||
* :github:`41237` - drivers: ieee802154_dw1000: use dedicated workqueue
|
||||
* :github:`41240` - logging can get messed up when messages are dropped
|
||||
* :github:`41284` - pthread_cond_wait return value incorrect
|
||||
* :github:`41339` - stm32, Unable to read UART while checking from Framing error.
|
||||
* :github:`41488` - Stall logging on nrf52840
|
||||
* :github:`41499` - drivers: iwdg: stm32: WDT_OPT_PAUSE_HALTED_BY_DBG might not work
|
||||
* :github:`41503` - including net/socket.h fails with redefinition of struct zsock_timeval (sometimes :-) )
|
||||
* :github:`41529` - documentation: generate Doxygen tag file
|
||||
* :github:`41536` - Backport STM32 SMPS Support to v2.7.0
|
||||
* :github:`41582` - stm32h7: CSI as PLL source is broken
|
||||
* :github:`41683` - http_client: Unreliable rsp->body_start pointer
|
||||
* :github:`41915` - regression: Build fails after switching logging to V2
|
||||
* :github:`41942` - k_delayable_work being used as k_work in work's handler
|
||||
* :github:`41952` - Log timestamp overflows when using LOGv2
|
||||
* :github:`42164` - tests/bluetooth/tester broken after switch to logging v2
|
||||
* :github:`42271` - drivers: can: m_can: The can_set_bitrate() function doesn't work.
|
||||
* :github:`42299` - spi: nRF HAL driver asserts when PM is used
|
||||
* :github:`42373` - add k_spin_lock() to doxygen prior to v3.0 release
|
||||
* :github:`42581` - include: drivers: clock_control: stm32 incorrect DT_PROP is used for xtpre
|
||||
* :github:`42615` - Bluetooth: Controller: Missing ticks slot offset calculation in Periodic Advertising event scheduling
|
||||
* :github:`42622` - pm: pm_device structure bigger than nessecary when PM_DEVICE_RUNTIME not set
|
||||
* :github:`42631` - Unable to identify owner of net_mgmt_lock easily
|
||||
* :github:`42825` - MQTT client disconnection (EAGAIN) on publish with big payload
|
||||
* :github:`42862` - Bluetooth: L2CAP: Security check on l2cap request is wrong
|
||||
* :github:`43117` - Not possible to create more than one shield.
|
||||
* :github:`43130` - STM32WL ADC idles / doesn't work
|
||||
* :github:`43176` - net/icmpv4: client possible to ddos itself when there's an error for the broadcasted packet
|
||||
* :github:`43177` - net: shell: errno not cleared before calling the strtol
|
||||
* :github:`43178` - net: ip: route: log_strdup misuse
|
||||
* :github:`43179` - net: tcp: forever loop in tcp_resend_data
|
||||
* :github:`43180` - net: tcp: possible deadlock in tcp_conn_unref()
|
||||
* :github:`43181` - net: sockets: net_pkt leak in accept
|
||||
* :github:`43182` - net: arp: ARP retransmission source address selection
|
||||
* :github:`43183` - net: mqtt: setsockopt leak on failure
|
||||
* :github:`43184` - arm: Wrong macro used for z_interrupt_stacks declaration in stack.h
|
||||
* :github:`43185` - arm: cortex-m: uninitialised ptr_esf in get_esf() in fault.c
|
||||
* :github:`43470` - wifi: esp_at: race condition on mutex's leading to deadlock
|
||||
* :github:`43490` - net: sockets: userspace accept() crashes with NULL addr/addrlen pointer
|
||||
* :github:`43548` - gen_relocate_app truncates files on incremental builds
|
||||
* :github:`43572` - stm32: wrong clock the LSI freq for stm32l0x mcus
|
||||
* :github:`43580` - hl7800: tcp stack freezes on slow response from modem
|
||||
* :github:`43807` - Test "cpp.libcxx.newlib.exception" failed on platforms which use zephyr.bin to run tests.
|
||||
* :github:`43839` - Bluetooth: controller: missing NULL assign to df_cfg in ll_adv_set
|
||||
* :github:`43853` - X86 MSI messages always get to BSP core (need a fix to be backported)
|
||||
* :github:`43858` - mcumgr seems to lock up when it receives command for group that does not exist
|
||||
* :github:`44107` - The SMP nsim boards are started incorrectly when launching on real HW
|
||||
* :github:`44310` - net: gptp: type mismatch calculation error in gptp_mi
|
||||
* :github:`44336` - nucleo_wb55rg: stm32cubeprogrammer runner is missing for twister tests
|
||||
* :github:`44337` - twister: Miss sn option to stm32cubeprogrgammer runner
|
||||
* :github:`44352` - stm32l5x boards missing the openocd runner
|
||||
* :github:`44497` - Add guide for disabling MSD on JLink OB devices and link to from smp_svr page
|
||||
* :github:`44531` - bl654_usb without mcuboot maximum image size is not limited
|
||||
* :github:`44886` - Unable to boot Zephyr on FVP_BaseR_AEMv8R
|
||||
* :github:`44902` - x86: FPU registers are not initialised for userspace (eager FPU sharing)
|
||||
* :github:`45869` - doc: update requirements
|
||||
* :github:`45870` - drivers: virt_ivshmem: Allow multiple instances of ivShMem devices
|
||||
* :github:`45871` - ci: split Bluetooth workflow
|
||||
* :github:`45872` - ci: make git credentials non-persistent
|
||||
* :github:`45873` - soc: esp32: use PYTHON_EXECUTABLE from build system
|
||||
|
||||
Security Vulnerability Related
|
||||
******************************
|
||||
|
||||
The following security vulnerabilities (CVEs) were addressed in this
|
||||
release:
|
||||
|
||||
* CVE-2021-3966: `Zephyr project bug tracker GHSA-hfxq-3w6x-fv2m
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-hfxq-3w6x-fv2m>`_
|
||||
|
||||
More detailed information can be found in:
|
||||
https://docs.zephyrproject.org/latest/security/vulnerabilities.html
|
||||
|
||||
|
||||
.. _zephyr_2.7.1:
|
||||
|
||||
Zephyr 2.7.1
|
||||
####################
|
||||
|
||||
This is an LTS maintenance release with fixes.
|
||||
|
||||
Issues Fixed
|
||||
************
|
||||
|
||||
|
||||
@@ -11,6 +11,9 @@
|
||||
#include "can_loopback.h"
|
||||
|
||||
#include <logging/log.h>
|
||||
|
||||
#include "can_utils.h"
|
||||
|
||||
LOG_MODULE_DECLARE(can_driver, CONFIG_CAN_LOG_LEVEL);
|
||||
|
||||
K_KERNEL_STACK_DEFINE(tx_thread_stack,
|
||||
@@ -41,13 +44,6 @@ static void dispatch_frame(const struct zcan_frame *frame,
|
||||
filter->rx_cb(&frame_tmp, filter->cb_arg);
|
||||
}
|
||||
|
||||
static inline int check_filter_match(const struct zcan_frame *frame,
|
||||
const struct zcan_filter *filter)
|
||||
{
|
||||
return ((filter->id & filter->id_mask) ==
|
||||
(frame->id & filter->id_mask));
|
||||
}
|
||||
|
||||
void tx_thread(void *data_arg, void *arg2, void *arg3)
|
||||
{
|
||||
ARG_UNUSED(arg2);
|
||||
@@ -63,7 +59,7 @@ void tx_thread(void *data_arg, void *arg2, void *arg3)
|
||||
for (int i = 0; i < CONFIG_CAN_MAX_FILTER; i++) {
|
||||
filter = &data->filters[i];
|
||||
if (filter->rx_cb &&
|
||||
check_filter_match(&frame.frame, &filter->filter)) {
|
||||
can_utils_filter_match(&frame.frame, &filter->filter) != 0) {
|
||||
dispatch_frame(&frame.frame, filter);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,34 @@ LOG_MODULE_DECLARE(can_driver, CONFIG_CAN_LOG_LEVEL);
|
||||
#define MCAN_MAX_DLC CAN_MAX_DLC
|
||||
#endif
|
||||
|
||||
static void memcpy32_volatile(volatile void *dst_, const volatile void *src_,
|
||||
size_t len)
|
||||
{
|
||||
volatile uint32_t *dst = dst_;
|
||||
const volatile uint32_t *src = src_;
|
||||
|
||||
__ASSERT(len % 4 == 0, "len must be a multiple of 4!");
|
||||
len /= sizeof(uint32_t);
|
||||
|
||||
while (len--) {
|
||||
*dst = *src;
|
||||
++dst;
|
||||
++src;
|
||||
}
|
||||
}
|
||||
|
||||
static void memset32_volatile(volatile void *dst_, uint32_t val, size_t len)
|
||||
{
|
||||
volatile uint32_t *dst = dst_;
|
||||
|
||||
__ASSERT(len % 4 == 0, "len must be a multiple of 4!");
|
||||
len /= sizeof(uint32_t);
|
||||
|
||||
while (len--) {
|
||||
*dst++ = val;
|
||||
}
|
||||
}
|
||||
|
||||
static int can_exit_sleep_mode(struct can_mcan_reg *can)
|
||||
{
|
||||
uint32_t start_time;
|
||||
@@ -389,12 +417,7 @@ int can_mcan_init(const struct device *dev, const struct can_mcan_config *cfg,
|
||||
}
|
||||
|
||||
/* No memset because only aligned ptr are allowed */
|
||||
for (uint32_t *ptr = (uint32_t *)msg_ram;
|
||||
ptr < (uint32_t *)msg_ram +
|
||||
sizeof(struct can_mcan_msg_sram) / sizeof(uint32_t);
|
||||
ptr++) {
|
||||
*ptr = 0;
|
||||
}
|
||||
memset32_volatile(msg_ram, 0, sizeof(struct can_mcan_msg_sram));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -486,15 +509,17 @@ static void can_mcan_get_message(struct can_mcan_data *data,
|
||||
uint32_t get_idx, filt_idx;
|
||||
struct zcan_frame frame;
|
||||
can_rx_callback_t cb;
|
||||
volatile uint32_t *src, *dst, *end;
|
||||
int data_length;
|
||||
void *cb_arg;
|
||||
struct can_mcan_rx_fifo_hdr hdr;
|
||||
bool rtr_filter_mask;
|
||||
bool rtr_filter;
|
||||
|
||||
while ((*fifo_status_reg & CAN_MCAN_RXF0S_F0FL)) {
|
||||
get_idx = (*fifo_status_reg & CAN_MCAN_RXF0S_F0GI) >>
|
||||
CAN_MCAN_RXF0S_F0GI_POS;
|
||||
hdr = fifo[get_idx].hdr;
|
||||
memcpy32_volatile(&hdr, &fifo[get_idx].hdr,
|
||||
sizeof(struct can_mcan_rx_fifo_hdr));
|
||||
|
||||
if (hdr.xtd) {
|
||||
frame.id = hdr.ext_id;
|
||||
@@ -514,24 +539,25 @@ static void can_mcan_get_message(struct can_mcan_data *data,
|
||||
|
||||
filt_idx = hdr.fidx;
|
||||
|
||||
/* Check if RTR must match */
|
||||
if ((hdr.xtd && data->ext_filt_rtr_mask & (1U << filt_idx) &&
|
||||
((data->ext_filt_rtr >> filt_idx) & 1U) != frame.rtr) ||
|
||||
(data->std_filt_rtr_mask & (1U << filt_idx) &&
|
||||
((data->std_filt_rtr >> filt_idx) & 1U) != frame.rtr)) {
|
||||
if (hdr.xtd != 0) {
|
||||
rtr_filter_mask = (data->ext_filt_rtr_mask & BIT(filt_idx)) != 0;
|
||||
rtr_filter = (data->ext_filt_rtr & BIT(filt_idx)) != 0;
|
||||
} else {
|
||||
rtr_filter_mask = (data->std_filt_rtr_mask & BIT(filt_idx)) != 0;
|
||||
rtr_filter = (data->std_filt_rtr & BIT(filt_idx)) != 0;
|
||||
}
|
||||
|
||||
if (rtr_filter_mask && (rtr_filter != frame.rtr)) {
|
||||
/* RTR bit does not match filter RTR mask and bit, drop frame */
|
||||
*fifo_ack_reg = get_idx;
|
||||
continue;
|
||||
}
|
||||
|
||||
data_length = can_dlc_to_bytes(frame.dlc);
|
||||
if (data_length <= sizeof(frame.data)) {
|
||||
/* data needs to be written in 32 bit blocks!*/
|
||||
for (src = fifo[get_idx].data_32,
|
||||
dst = frame.data_32,
|
||||
end = dst + CAN_DIV_CEIL(data_length, sizeof(uint32_t));
|
||||
dst < end;
|
||||
src++, dst++) {
|
||||
*dst = *src;
|
||||
}
|
||||
memcpy32_volatile(frame.data_32, fifo[get_idx].data_32,
|
||||
ROUND_UP(data_length, sizeof(uint32_t)));
|
||||
|
||||
if (frame.id_type == CAN_STANDARD_IDENTIFIER) {
|
||||
LOG_DBG("Frame on filter %d, ID: 0x%x",
|
||||
@@ -647,8 +673,6 @@ int can_mcan_send(const struct can_mcan_config *cfg,
|
||||
uint32_t put_idx;
|
||||
int ret;
|
||||
struct can_mcan_mm mm;
|
||||
volatile uint32_t *dst, *end;
|
||||
const uint32_t *src;
|
||||
|
||||
LOG_DBG("Sending %d bytes. Id: 0x%x, ID type: %s %s %s %s",
|
||||
data_length, frame->id,
|
||||
@@ -696,15 +720,9 @@ int can_mcan_send(const struct can_mcan_config *cfg,
|
||||
tx_hdr.ext_id = frame->id;
|
||||
}
|
||||
|
||||
msg_ram->tx_buffer[put_idx].hdr = tx_hdr;
|
||||
|
||||
for (src = frame->data_32,
|
||||
dst = msg_ram->tx_buffer[put_idx].data_32,
|
||||
end = dst + CAN_DIV_CEIL(data_length, sizeof(uint32_t));
|
||||
dst < end;
|
||||
src++, dst++) {
|
||||
*dst = *src;
|
||||
}
|
||||
memcpy32_volatile(&msg_ram->tx_buffer[put_idx].hdr, &tx_hdr, sizeof(tx_hdr));
|
||||
memcpy32_volatile(msg_ram->tx_buffer[put_idx].data_32, frame->data_32,
|
||||
ROUND_UP(data_length, 4));
|
||||
|
||||
data->tx_fin_cb[put_idx] = callback;
|
||||
data->tx_fin_cb_arg[put_idx] = callback_arg;
|
||||
@@ -761,7 +779,8 @@ int can_mcan_attach_std(struct can_mcan_data *data,
|
||||
filter_element.sfce = filter_nr & 0x01 ? CAN_MCAN_FCE_FIFO1 :
|
||||
CAN_MCAN_FCE_FIFO0;
|
||||
|
||||
msg_ram->std_filt[filter_nr] = filter_element;
|
||||
memcpy32_volatile(&msg_ram->std_filt[filter_nr], &filter_element,
|
||||
sizeof(struct can_mcan_std_filter));
|
||||
|
||||
k_mutex_unlock(&data->inst_mutex);
|
||||
|
||||
@@ -820,7 +839,8 @@ static int can_mcan_attach_ext(struct can_mcan_data *data,
|
||||
filter_element.efce = filter_nr & 0x01 ? CAN_MCAN_FCE_FIFO1 :
|
||||
CAN_MCAN_FCE_FIFO0;
|
||||
|
||||
msg_ram->ext_filt[filter_nr] = filter_element;
|
||||
memcpy32_volatile(&msg_ram->ext_filt[filter_nr], &filter_element,
|
||||
sizeof(struct can_mcan_ext_filter));
|
||||
|
||||
k_mutex_unlock(&data->inst_mutex);
|
||||
|
||||
@@ -874,9 +894,6 @@ int can_mcan_attach_isr(struct can_mcan_data *data,
|
||||
void can_mcan_detach(struct can_mcan_data *data,
|
||||
struct can_mcan_msg_sram *msg_ram, int filter_nr)
|
||||
{
|
||||
const struct can_mcan_ext_filter ext_filter = {0};
|
||||
const struct can_mcan_std_filter std_filter = {0};
|
||||
|
||||
k_mutex_lock(&data->inst_mutex, K_FOREVER);
|
||||
if (filter_nr >= NUM_STD_FILTER_DATA) {
|
||||
filter_nr -= NUM_STD_FILTER_DATA;
|
||||
@@ -885,10 +902,12 @@ void can_mcan_detach(struct can_mcan_data *data,
|
||||
return;
|
||||
}
|
||||
|
||||
msg_ram->ext_filt[filter_nr] = ext_filter;
|
||||
memset32_volatile(&msg_ram->ext_filt[filter_nr], 0,
|
||||
sizeof(struct can_mcan_ext_filter));
|
||||
data->rx_cb_ext[filter_nr] = NULL;
|
||||
} else {
|
||||
msg_ram->std_filt[filter_nr] = std_filter;
|
||||
memset32_volatile(&msg_ram->std_filt[filter_nr], 0,
|
||||
sizeof(struct can_mcan_std_filter));
|
||||
data->rx_cb_std[filter_nr] = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ struct can_mcan_rx_fifo_hdr {
|
||||
volatile uint32_t res : 2; /* Reserved */
|
||||
volatile uint32_t fidx : 7; /* Filter Index */
|
||||
volatile uint32_t anmf : 1; /* Accepted non-matching frame */
|
||||
} __packed;
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct can_mcan_rx_fifo {
|
||||
struct can_mcan_rx_fifo_hdr hdr;
|
||||
@@ -56,7 +56,7 @@ struct can_mcan_rx_fifo {
|
||||
volatile uint8_t data[64];
|
||||
volatile uint32_t data_32[16];
|
||||
};
|
||||
} __packed;
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct can_mcan_mm {
|
||||
volatile uint8_t idx : 5;
|
||||
@@ -84,7 +84,7 @@ struct can_mcan_tx_buffer_hdr {
|
||||
volatile uint8_t res2 : 1; /* Reserved */
|
||||
volatile uint8_t efc : 1; /* Event FIFO control (Store Tx events) */
|
||||
struct can_mcan_mm mm; /* Message marker */
|
||||
} __packed;
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct can_mcan_tx_buffer {
|
||||
struct can_mcan_tx_buffer_hdr hdr;
|
||||
@@ -92,7 +92,7 @@ struct can_mcan_tx_buffer {
|
||||
volatile uint8_t data[64];
|
||||
volatile uint32_t data_32[16];
|
||||
};
|
||||
} __packed;
|
||||
} __packed __aligned(4);
|
||||
|
||||
#define CAN_MCAN_TE_TX 0x1 /* TX event */
|
||||
#define CAN_MCAN_TE_TXC 0x2 /* TX event in spite of cancellation */
|
||||
@@ -109,7 +109,7 @@ struct can_mcan_tx_event_fifo {
|
||||
volatile uint8_t fdf : 1; /* FD Format */
|
||||
volatile uint8_t et : 2; /* Event type */
|
||||
struct can_mcan_mm mm; /* Message marker */
|
||||
} __packed;
|
||||
} __packed __aligned(4);
|
||||
|
||||
#define CAN_MCAN_FCE_DISABLE 0x0
|
||||
#define CAN_MCAN_FCE_FIFO0 0x1
|
||||
@@ -130,7 +130,7 @@ struct can_mcan_std_filter {
|
||||
volatile uint32_t id1 : 11;
|
||||
volatile uint32_t sfce : 3; /* Filter config */
|
||||
volatile uint32_t sft : 2; /* Filter type */
|
||||
} __packed;
|
||||
} __packed __aligned(4);
|
||||
|
||||
#define CAN_MCAN_EFT_RANGE_XIDAM 0x0
|
||||
#define CAN_MCAN_EFT_DUAL 0x1
|
||||
@@ -143,7 +143,7 @@ struct can_mcan_ext_filter {
|
||||
volatile uint32_t id2 : 29; /* ID2 for dual or range, mask otherwise */
|
||||
volatile uint32_t res : 1;
|
||||
volatile uint32_t eft : 2; /* Filter type */
|
||||
} __packed;
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct can_mcan_msg_sram {
|
||||
volatile struct can_mcan_std_filter std_filt[NUM_STD_FILTER_ELEMENTS];
|
||||
@@ -153,7 +153,7 @@ struct can_mcan_msg_sram {
|
||||
volatile struct can_mcan_rx_fifo rx_buffer[NUM_RX_BUF_ELEMENTS];
|
||||
volatile struct can_mcan_tx_event_fifo tx_event_fifo[NUM_TX_BUF_ELEMENTS];
|
||||
volatile struct can_mcan_tx_buffer tx_buffer[NUM_TX_BUF_ELEMENTS];
|
||||
} __packed;
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct can_mcan_data {
|
||||
struct k_mutex inst_mutex;
|
||||
|
||||
@@ -253,13 +253,11 @@ static void mcux_flexcan_copy_zfilter_to_mbconfig(const struct zcan_filter *src,
|
||||
if (src->id_type == CAN_STANDARD_IDENTIFIER) {
|
||||
dest->format = kFLEXCAN_FrameFormatStandard;
|
||||
dest->id = FLEXCAN_ID_STD(src->id);
|
||||
*mask = FLEXCAN_RX_MB_STD_MASK(src->id_mask,
|
||||
src->rtr & src->rtr_mask, 1);
|
||||
*mask = FLEXCAN_RX_MB_STD_MASK(src->id_mask, src->rtr_mask, 1);
|
||||
} else {
|
||||
dest->format = kFLEXCAN_FrameFormatExtend;
|
||||
dest->id = FLEXCAN_ID_EXT(src->id);
|
||||
*mask = FLEXCAN_RX_MB_EXT_MASK(src->id_mask,
|
||||
src->rtr & src->rtr_mask, 1);
|
||||
*mask = FLEXCAN_RX_MB_EXT_MASK(src->id_mask, src->rtr_mask, 1);
|
||||
}
|
||||
|
||||
if ((src->rtr & src->rtr_mask) == CAN_DATAFRAME) {
|
||||
@@ -646,6 +644,7 @@ static inline void mcux_flexcan_transfer_rx_idle(const struct device *dev,
|
||||
static FLEXCAN_CALLBACK(mcux_flexcan_transfer_callback)
|
||||
{
|
||||
struct mcux_flexcan_data *data = (struct mcux_flexcan_data *)userData;
|
||||
const struct mcux_flexcan_config *config = data->dev->config;
|
||||
|
||||
switch (status) {
|
||||
case kStatus_FLEXCAN_UnHandled:
|
||||
@@ -654,6 +653,7 @@ static FLEXCAN_CALLBACK(mcux_flexcan_transfer_callback)
|
||||
mcux_flexcan_transfer_error_status(data->dev, (uint64_t)result);
|
||||
break;
|
||||
case kStatus_FLEXCAN_TxSwitchToRx:
|
||||
FLEXCAN_TransferAbortReceive(config->base, &data->handle, (uint64_t)result);
|
||||
__fallthrough;
|
||||
case kStatus_FLEXCAN_TxIdle:
|
||||
/* The result field is a MB value which is limited to 32bit value */
|
||||
|
||||
@@ -302,6 +302,12 @@ int edac_ibecc_init(const struct device *dev)
|
||||
case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU11):
|
||||
__fallthrough;
|
||||
case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU12):
|
||||
__fallthrough;
|
||||
case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU13):
|
||||
__fallthrough;
|
||||
case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU14):
|
||||
__fallthrough;
|
||||
case PCIE_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SKU15):
|
||||
break;
|
||||
default:
|
||||
LOG_ERR("PCI Probe failed");
|
||||
|
||||
@@ -18,6 +18,9 @@
|
||||
#define PCI_DEVICE_ID_SKU10 0x452e
|
||||
#define PCI_DEVICE_ID_SKU11 0x4532
|
||||
#define PCI_DEVICE_ID_SKU12 0x4518
|
||||
#define PCI_DEVICE_ID_SKU13 0x451a
|
||||
#define PCI_DEVICE_ID_SKU14 0x4534
|
||||
#define PCI_DEVICE_ID_SKU15 0x4536
|
||||
|
||||
/* TODO: Move to correct place NMI registers */
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ static inline int z_vrfy_i2c_slave_driver_register(const struct device *dev)
|
||||
static inline int z_vrfy_i2c_slave_driver_unregister(const struct device *dev)
|
||||
{
|
||||
Z_OOPS(Z_SYSCALL_OBJ(dev, K_OBJ_DRIVER_I2C));
|
||||
return z_vrfy_i2c_slave_driver_unregister(dev);
|
||||
return z_impl_i2c_slave_driver_unregister(dev);
|
||||
}
|
||||
#include <syscalls/i2c_slave_driver_unregister_mrsh.c>
|
||||
|
||||
|
||||
@@ -467,7 +467,7 @@ err_out:
|
||||
|
||||
static struct iproc_pcie_ep_ctx iproc_pcie_ep_ctx_0;
|
||||
|
||||
static struct iproc_pcie_ep_config iproc_pcie_ep_config_0 = {
|
||||
static const struct iproc_pcie_ep_config iproc_pcie_ep_config_0 = {
|
||||
.id = 0,
|
||||
.base = (struct iproc_pcie_reg *)DT_INST_REG_ADDR(0),
|
||||
.reg_size = DT_INST_REG_SIZE(0),
|
||||
@@ -475,19 +475,21 @@ static struct iproc_pcie_ep_config iproc_pcie_ep_config_0 = {
|
||||
.map_low_size = DT_INST_REG_SIZE_BY_NAME(0, map_lowmem),
|
||||
.map_high_base = DT_INST_REG_ADDR_BY_NAME(0, map_highmem),
|
||||
.map_high_size = DT_INST_REG_SIZE_BY_NAME(0, map_highmem),
|
||||
#if DT_INST_NODE_HAS_PROP(0, dmas)
|
||||
.pl330_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_IDX(0, 0)),
|
||||
.pl330_tx_chan_id = DT_INST_DMAS_CELL_BY_NAME(0, txdma, channel),
|
||||
.pl330_rx_chan_id = DT_INST_DMAS_CELL_BY_NAME(0, rxdma, channel),
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct pcie_ep_driver_api iproc_pcie_ep_api = {
|
||||
static const struct pcie_ep_driver_api iproc_pcie_ep_api = {
|
||||
.conf_read = iproc_pcie_conf_read,
|
||||
.conf_write = iproc_pcie_conf_write,
|
||||
.map_addr = iproc_pcie_map_addr,
|
||||
.unmap_addr = iproc_pcie_unmap_addr,
|
||||
.raise_irq = iproc_pcie_raise_irq,
|
||||
.register_reset_cb = iproc_pcie_register_reset_cb,
|
||||
.dma_xfer = iproc_pcie_pl330_dma_xfer,
|
||||
.dma_xfer = DT_INST_NODE_HAS_PROP(0, dmas) ? iproc_pcie_pl330_dma_xfer : NULL,
|
||||
};
|
||||
|
||||
DEVICE_DT_INST_DEFINE(0, &iproc_pcie_ep_init, NULL,
|
||||
|
||||
@@ -209,9 +209,9 @@ static int sm351lt_init(const struct device *dev)
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SM351LT_TRIGGER)
|
||||
#if defined(CONFIG_SM351LT_TRIGGER_OWN_THREAD)
|
||||
data->dev = dev;
|
||||
|
||||
#if defined(CONFIG_SM351LT_TRIGGER_OWN_THREAD)
|
||||
k_sem_init(&data->gpio_sem, 0, K_SEM_MAX_LIMIT);
|
||||
|
||||
k_thread_create(&data->thread, data->thread_stack,
|
||||
|
||||
@@ -718,11 +718,11 @@ static int transceive_dma(const struct device *dev,
|
||||
/* Set buffers info */
|
||||
spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
|
||||
|
||||
LL_SPI_Enable(spi);
|
||||
|
||||
/* This is turned off in spi_stm32_complete(). */
|
||||
spi_stm32_cs_control(dev, true);
|
||||
|
||||
LL_SPI_Enable(spi);
|
||||
|
||||
while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) {
|
||||
size_t dma_len;
|
||||
|
||||
|
||||
@@ -126,6 +126,31 @@ struct coredump_mem_hdr_t {
|
||||
uintptr_t end;
|
||||
} __packed;
|
||||
|
||||
typedef void (*coredump_backend_start_t)(void);
|
||||
typedef void (*coredump_backend_end_t)(void);
|
||||
typedef void (*coredump_backend_buffer_output_t)(uint8_t *buf, size_t buflen);
|
||||
typedef int (*coredump_backend_query_t)(enum coredump_query_id query_id,
|
||||
void *arg);
|
||||
typedef int (*coredump_backend_cmd_t)(enum coredump_cmd_id cmd_id,
|
||||
void *arg);
|
||||
|
||||
struct coredump_backend_api {
|
||||
/* Signal to backend of the start of coredump. */
|
||||
coredump_backend_start_t start;
|
||||
|
||||
/* Signal to backend of the end of coredump. */
|
||||
coredump_backend_end_t end;
|
||||
|
||||
/* Raw buffer output */
|
||||
coredump_backend_buffer_output_t buffer_output;
|
||||
|
||||
/* Perform query on backend */
|
||||
coredump_backend_query_t query;
|
||||
|
||||
/* Perform command on backend */
|
||||
coredump_backend_cmd_t cmd;
|
||||
};
|
||||
|
||||
void coredump(unsigned int reason, const z_arch_esf_t *esf,
|
||||
struct k_thread *thread);
|
||||
void coredump_memory_dump(uintptr_t start_addr, uintptr_t end_addr);
|
||||
|
||||
@@ -385,6 +385,7 @@ static inline int z_impl_counter_get_value(const struct device *dev,
|
||||
* interrupts or requested channel).
|
||||
* @retval -EINVAL if alarm settings are invalid.
|
||||
* @retval -ETIME if absolute alarm was set too late.
|
||||
* @retval -EBUSY if alarm is already active.
|
||||
*/
|
||||
__syscall int counter_set_channel_alarm(const struct device *dev,
|
||||
uint8_t chan_id,
|
||||
|
||||
@@ -162,6 +162,11 @@ struct z_kernel {
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
struct k_thread *threads; /* singly linked list of ALL threads */
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
/* Need to signal an IPI at the next scheduling point */
|
||||
bool pending_ipi;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct z_kernel _kernel_t;
|
||||
|
||||
@@ -302,10 +302,8 @@ static inline char z_log_minimal_level_to_char(int level)
|
||||
} \
|
||||
\
|
||||
bool is_user_context = k_is_user_context(); \
|
||||
uint32_t filters = IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ? \
|
||||
(_dsource)->filters : 0;\
|
||||
if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) && !is_user_context && \
|
||||
_level > Z_LOG_RUNTIME_FILTER(filters)) { \
|
||||
_level > Z_LOG_RUNTIME_FILTER((_dsource)->filters)) { \
|
||||
break; \
|
||||
} \
|
||||
if (IS_ENABLED(CONFIG_LOG2)) { \
|
||||
@@ -347,8 +345,6 @@ static inline char z_log_minimal_level_to_char(int level)
|
||||
break; \
|
||||
} \
|
||||
bool is_user_context = k_is_user_context(); \
|
||||
uint32_t filters = IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ? \
|
||||
(_dsource)->filters : 0;\
|
||||
\
|
||||
if (IS_ENABLED(CONFIG_LOG_MINIMAL)) { \
|
||||
Z_LOG_TO_PRINTK(_level, "%s", _str); \
|
||||
@@ -357,7 +353,7 @@ static inline char z_log_minimal_level_to_char(int level)
|
||||
break; \
|
||||
} \
|
||||
if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) && !is_user_context && \
|
||||
_level > Z_LOG_RUNTIME_FILTER(filters)) { \
|
||||
_level > Z_LOG_RUNTIME_FILTER((_dsource)->filters)) { \
|
||||
break; \
|
||||
} \
|
||||
if (IS_ENABLED(CONFIG_LOG2)) { \
|
||||
|
||||
@@ -199,10 +199,11 @@ struct net_conn_handle;
|
||||
* anyway. This saves 12 bytes / context in IPv6.
|
||||
*/
|
||||
__net_socket struct net_context {
|
||||
/** User data.
|
||||
*
|
||||
* First member of the structure to let users either have user data
|
||||
* associated with a context, or put contexts into a FIFO.
|
||||
/** First member of the structure to allow to put contexts into a FIFO.
|
||||
*/
|
||||
void *fifo_reserved;
|
||||
|
||||
/** User data associated with a context.
|
||||
*/
|
||||
void *user_data;
|
||||
|
||||
|
||||
@@ -1368,6 +1368,10 @@ uint32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6);
|
||||
static inline void net_if_ipv6_set_reachable_time(struct net_if_ipv6 *ipv6)
|
||||
{
|
||||
#if defined(CONFIG_NET_NATIVE_IPV6)
|
||||
if (ipv6 == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
ipv6->reachable_time = net_if_ipv6_calc_reachable_time(ipv6);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -7,6 +7,9 @@
|
||||
#ifndef ZEPHYR_INCLUDE_TIME_UNITS_H_
|
||||
#define ZEPHYR_INCLUDE_TIME_UNITS_H_
|
||||
|
||||
#include <sys/util.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -56,6 +59,21 @@ static TIME_CONSTEXPR inline int sys_clock_hw_cycles_per_sec(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/** @internal
|
||||
* Macro determines if fast conversion algorithm can be used. It checks if
|
||||
* maximum timeout represented in source frequency domain and multiplied by
|
||||
* target frequency fits in 64 bits.
|
||||
*
|
||||
* @param from_hz Source frequency.
|
||||
* @param to_hz Target frequency.
|
||||
*
|
||||
* @retval true Use faster algorithm.
|
||||
* @retval false Use algorithm preventing overflow of intermediate value.
|
||||
*/
|
||||
#define Z_TMCVT_USE_FAST_ALGO(from_hz, to_hz) \
|
||||
((ceiling_fraction(CONFIG_SYS_CLOCK_MAX_TIMEOUT_DAYS * 24ULL * 3600ULL * from_hz, \
|
||||
UINT32_MAX) * to_hz) <= UINT32_MAX)
|
||||
|
||||
/* Time converter generator gadget. Selects from one of three
|
||||
* conversion algorithms: ones that take advantage when the
|
||||
* frequencies are an integer ratio (in either direction), or a full
|
||||
@@ -123,8 +141,18 @@ static TIME_CONSTEXPR ALWAYS_INLINE uint64_t z_tmcvt(uint64_t t, uint32_t from_h
|
||||
} else {
|
||||
if (result32) {
|
||||
return (uint32_t)((t * to_hz + off) / from_hz);
|
||||
} else if (const_hz && Z_TMCVT_USE_FAST_ALGO(from_hz, to_hz)) {
|
||||
/* Faster algorithm but source is first multiplied by target frequency
|
||||
* and it can overflow even though final result would not overflow.
|
||||
* Kconfig option shall prevent use of this algorithm when there is a
|
||||
* risk of overflow.
|
||||
*/
|
||||
return ((t * to_hz + off) / from_hz);
|
||||
} else {
|
||||
return (t * to_hz + off) / from_hz;
|
||||
/* Slower algorithm but input is first divided before being multiplied
|
||||
* which prevents overflow of intermediate value.
|
||||
*/
|
||||
return (t / from_hz) * to_hz + ((t % from_hz) * to_hz + off) / from_hz;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -613,6 +613,17 @@ config TIMEOUT_64BIT
|
||||
availability of absolute timeout values (which require the
|
||||
extra precision).
|
||||
|
||||
config SYS_CLOCK_MAX_TIMEOUT_DAYS
|
||||
int "Max timeout (in days) used in conversions"
|
||||
default 365
|
||||
help
|
||||
Value is used in the time conversion static inline function to determine
|
||||
at compile time which algorithm to use. One algorithm is faster, takes
|
||||
less code but may overflow if multiplication of source and target
|
||||
frequency exceeds 64 bits. Second algorithm prevents that. Faster
|
||||
algorithm is selected for conversion if maximum timeout represented in
|
||||
source frequency domain multiplied by target frequency fits in 64 bits.
|
||||
|
||||
config XIP
|
||||
bool "Execute in place"
|
||||
help
|
||||
|
||||
@@ -161,15 +161,21 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
|
||||
|
||||
key = k_spin_lock(&lock);
|
||||
|
||||
struct k_thread *waiter = z_waitq_head(&mutex->wait_q);
|
||||
/*
|
||||
* Check if mutex was unlocked after this thread was unpended.
|
||||
* If so, skip adjusting owner's priority down.
|
||||
*/
|
||||
if (likely(mutex->owner != NULL)) {
|
||||
struct k_thread *waiter = z_waitq_head(&mutex->wait_q);
|
||||
|
||||
new_prio = (waiter != NULL) ?
|
||||
new_prio_for_inheritance(waiter->base.prio, mutex->owner_orig_prio) :
|
||||
mutex->owner_orig_prio;
|
||||
new_prio = (waiter != NULL) ?
|
||||
new_prio_for_inheritance(waiter->base.prio, mutex->owner_orig_prio) :
|
||||
mutex->owner_orig_prio;
|
||||
|
||||
LOG_DBG("adjusting prio down on mutex %p", mutex);
|
||||
LOG_DBG("adjusting prio down on mutex %p", mutex);
|
||||
|
||||
resched = adjust_owner_prio(mutex, new_prio) || resched;
|
||||
resched = adjust_owner_prio(mutex, new_prio) || resched;
|
||||
}
|
||||
|
||||
if (resched) {
|
||||
z_reschedule(&lock, key);
|
||||
|
||||
@@ -576,6 +576,9 @@ static void triggered_work_expiration_handler(struct _timeout *timeout)
|
||||
k_work_submit_to_queue(twork->workq, &twork->work);
|
||||
}
|
||||
|
||||
extern int z_work_submit_to_queue(struct k_work_q *queue,
|
||||
struct k_work *work);
|
||||
|
||||
static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
|
||||
{
|
||||
struct z_poller *poller = event->poller;
|
||||
@@ -587,7 +590,7 @@ static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
|
||||
|
||||
z_abort_timeout(&twork->timeout);
|
||||
twork->poll_result = 0;
|
||||
k_work_submit_to_queue(work_q, &twork->work);
|
||||
z_work_submit_to_queue(work_q, &twork->work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -219,6 +219,25 @@ static ALWAYS_INLINE void dequeue_thread(void *pq,
|
||||
}
|
||||
}
|
||||
|
||||
static void signal_pending_ipi(void)
|
||||
{
|
||||
/* Synchronization note: you might think we need to lock these
|
||||
* two steps, but an IPI is idempotent. It's OK if we do it
|
||||
* twice. All we require is that if a CPU sees the flag true,
|
||||
* it is guaranteed to send the IPI, and if a core sets
|
||||
* pending_ipi, the IPI will be sent the next time through
|
||||
* this code.
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
if (CONFIG_MP_NUM_CPUS > 1) {
|
||||
if (_kernel.pending_ipi) {
|
||||
_kernel.pending_ipi = false;
|
||||
arch_sched_ipi();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Called out of z_swap() when CONFIG_SMP. The current thread can
|
||||
* never live in the run queue until we are inexorably on the context
|
||||
@@ -231,6 +250,7 @@ void z_requeue_current(struct k_thread *curr)
|
||||
if (z_is_thread_queued(curr)) {
|
||||
_priq_run_add(&_kernel.ready_q.runq, curr);
|
||||
}
|
||||
signal_pending_ipi();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -481,6 +501,15 @@ static bool thread_active_elsewhere(struct k_thread *thread)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void flag_ipi(void)
|
||||
{
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
if (CONFIG_MP_NUM_CPUS > 1) {
|
||||
_kernel.pending_ipi = true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void ready_thread(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
@@ -495,9 +524,7 @@ static void ready_thread(struct k_thread *thread)
|
||||
|
||||
queue_thread(&_kernel.ready_q.runq, thread);
|
||||
update_cache(0);
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
arch_sched_ipi();
|
||||
#endif
|
||||
flag_ipi();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -626,17 +653,13 @@ static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
|
||||
}
|
||||
}
|
||||
|
||||
static void pend(struct k_thread *thread, _wait_q_t *wait_q,
|
||||
k_timeout_t timeout)
|
||||
static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
|
||||
k_timeout_t timeout)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_COHERENCE
|
||||
__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
|
||||
#endif
|
||||
|
||||
LOCKED(&sched_spinlock) {
|
||||
add_to_waitq_locked(thread, wait_q);
|
||||
}
|
||||
|
||||
add_to_waitq_locked(thread, wait_q);
|
||||
add_thread_timeout(thread, timeout);
|
||||
}
|
||||
|
||||
@@ -644,7 +667,9 @@ void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
|
||||
k_timeout_t timeout)
|
||||
{
|
||||
__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
|
||||
pend(thread, wait_q, timeout);
|
||||
LOCKED(&sched_spinlock) {
|
||||
pend_locked(thread, wait_q, timeout);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void unpend_thread_no_timeout(struct k_thread *thread)
|
||||
@@ -686,7 +711,12 @@ void z_thread_timeout(struct _timeout *timeout)
|
||||
|
||||
int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
|
||||
{
|
||||
pend(_current, wait_q, timeout);
|
||||
/* This is a legacy API for pre-switch architectures and isn't
|
||||
* correctly synchronized for multi-cpu use
|
||||
*/
|
||||
__ASSERT_NO_MSG(!IS_ENABLED(CONFIG_SMP));
|
||||
|
||||
pend_locked(_current, wait_q, timeout);
|
||||
|
||||
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
||||
pending_current = _current;
|
||||
@@ -709,8 +739,20 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
||||
pending_current = _current;
|
||||
#endif
|
||||
pend(_current, wait_q, timeout);
|
||||
return z_swap(lock, key);
|
||||
__ASSERT_NO_MSG(sizeof(sched_spinlock) == 0 || lock != &sched_spinlock);
|
||||
|
||||
/* We do a "lock swap" prior to calling z_swap(), such that
|
||||
* the caller's lock gets released as desired. But we ensure
|
||||
* that we hold the scheduler lock and leave local interrupts
|
||||
* masked until we reach the context swich. z_swap() itself
|
||||
* has similar code; the duplication is because it's a legacy
|
||||
* API that doesn't expect to be called with scheduler lock
|
||||
* held.
|
||||
*/
|
||||
(void) k_spin_lock(&sched_spinlock);
|
||||
pend_locked(_current, wait_q, timeout);
|
||||
k_spin_release(lock);
|
||||
return z_swap(&sched_spinlock, key);
|
||||
}
|
||||
|
||||
struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
|
||||
@@ -784,9 +826,7 @@ void z_thread_priority_set(struct k_thread *thread, int prio)
|
||||
{
|
||||
bool need_sched = z_set_prio(thread, prio);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
arch_sched_ipi();
|
||||
#endif
|
||||
flag_ipi();
|
||||
|
||||
if (need_sched && _current->base.sched_locked == 0U) {
|
||||
z_reschedule_unlocked();
|
||||
@@ -826,6 +866,7 @@ void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
|
||||
z_swap(lock, key);
|
||||
} else {
|
||||
k_spin_unlock(lock, key);
|
||||
signal_pending_ipi();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -835,6 +876,7 @@ void z_reschedule_irqlock(uint32_t key)
|
||||
z_swap_irqlock(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
signal_pending_ipi();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -868,7 +910,16 @@ void k_sched_unlock(void)
|
||||
struct k_thread *z_swap_next_thread(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return next_up();
|
||||
struct k_thread *ret = next_up();
|
||||
|
||||
if (ret == _current) {
|
||||
/* When not swapping, have to signal IPIs here. In
|
||||
* the context switch case it must happen later, after
|
||||
* _current gets requeued.
|
||||
*/
|
||||
signal_pending_ipi();
|
||||
}
|
||||
return ret;
|
||||
#else
|
||||
return _kernel.ready_q.cache;
|
||||
#endif
|
||||
@@ -935,6 +986,7 @@ void *z_get_next_switch_handle(void *interrupted)
|
||||
new_thread->switch_handle = NULL;
|
||||
}
|
||||
}
|
||||
signal_pending_ipi();
|
||||
return ret;
|
||||
#else
|
||||
_current->switch_handle = interrupted;
|
||||
@@ -1331,9 +1383,7 @@ void z_impl_k_wakeup(k_tid_t thread)
|
||||
z_mark_thread_as_not_suspended(thread);
|
||||
z_ready_thread(thread);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
arch_sched_ipi();
|
||||
#endif
|
||||
flag_ipi();
|
||||
|
||||
if (!arch_is_in_isr()) {
|
||||
z_reschedule_unlocked();
|
||||
@@ -1520,6 +1570,9 @@ void z_thread_abort(struct k_thread *thread)
|
||||
/* It's running somewhere else, flag and poke */
|
||||
thread->base.thread_state |= _THREAD_ABORTING;
|
||||
|
||||
/* We're going to spin, so need a true synchronous IPI
|
||||
* here, not deferred!
|
||||
*/
|
||||
#ifdef CONFIG_SCHED_IPI_SUPPORTED
|
||||
arch_sched_ipi();
|
||||
#endif
|
||||
|
||||
@@ -1011,7 +1011,7 @@ void z_thread_mark_switched_in(void)
|
||||
#ifdef CONFIG_THREAD_RUNTIME_STATS
|
||||
struct k_thread *thread;
|
||||
|
||||
thread = k_current_get();
|
||||
thread = z_current_get();
|
||||
#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
|
||||
thread->rt_stats.last_switched_in = timing_counter_get();
|
||||
#else
|
||||
@@ -1033,7 +1033,7 @@ void z_thread_mark_switched_out(void)
|
||||
uint64_t diff;
|
||||
struct k_thread *thread;
|
||||
|
||||
thread = k_current_get();
|
||||
thread = z_current_get();
|
||||
|
||||
if (unlikely(thread->rt_stats.last_switched_in == 0)) {
|
||||
/* Has not run before */
|
||||
|
||||
@@ -68,8 +68,14 @@ static int32_t next_timeout(void)
|
||||
{
|
||||
struct _timeout *to = first();
|
||||
int32_t ticks_elapsed = elapsed();
|
||||
int32_t ret = to == NULL ? MAX_WAIT
|
||||
: CLAMP(to->dticks - ticks_elapsed, 0, MAX_WAIT);
|
||||
int32_t ret;
|
||||
|
||||
if ((to == NULL) ||
|
||||
((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
|
||||
ret = MAX_WAIT;
|
||||
} else {
|
||||
ret = MAX(0, to->dticks - ticks_elapsed);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
|
||||
@@ -238,6 +244,18 @@ void sys_clock_announce(int32_t ticks)
|
||||
|
||||
k_spinlock_key_t key = k_spin_lock(&timeout_lock);
|
||||
|
||||
/* We release the lock around the callbacks below, so on SMP
|
||||
* systems someone might be already running the loop. Don't
|
||||
* race (which will cause paralllel execution of "sequential"
|
||||
* timeouts and confuse apps), just increment the tick count
|
||||
* and return.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) {
|
||||
announce_remaining += ticks;
|
||||
k_spin_unlock(&timeout_lock, key);
|
||||
return;
|
||||
}
|
||||
|
||||
announce_remaining = ticks;
|
||||
|
||||
while (first() != NULL && first()->dticks <= announce_remaining) {
|
||||
@@ -245,13 +263,13 @@ void sys_clock_announce(int32_t ticks)
|
||||
int dt = t->dticks;
|
||||
|
||||
curr_tick += dt;
|
||||
announce_remaining -= dt;
|
||||
t->dticks = 0;
|
||||
remove_timeout(t);
|
||||
|
||||
k_spin_unlock(&timeout_lock, key);
|
||||
t->fn(t);
|
||||
key = k_spin_lock(&timeout_lock);
|
||||
announce_remaining -= dt;
|
||||
}
|
||||
|
||||
if (first() != NULL) {
|
||||
@@ -271,7 +289,7 @@ int64_t sys_clock_tick_get(void)
|
||||
uint64_t t = 0U;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
t = curr_tick + sys_clock_elapsed();
|
||||
t = curr_tick + elapsed();
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
@@ -355,26 +355,45 @@ static int submit_to_queue_locked(struct k_work *work,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int k_work_submit_to_queue(struct k_work_q *queue,
|
||||
struct k_work *work)
|
||||
/* Submit work to a queue but do not yield the current thread.
|
||||
*
|
||||
* Intended for internal use.
|
||||
*
|
||||
* See also submit_to_queue_locked().
|
||||
*
|
||||
* @param queuep pointer to a queue reference.
|
||||
* @param work the work structure to be submitted
|
||||
*
|
||||
* @retval see submit_to_queue_locked()
|
||||
*/
|
||||
int z_work_submit_to_queue(struct k_work_q *queue,
|
||||
struct k_work *work)
|
||||
{
|
||||
__ASSERT_NO_MSG(work != NULL);
|
||||
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
|
||||
|
||||
int ret = submit_to_queue_locked(work, &queue);
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
/* If we changed the queue contents (as indicated by a positive ret)
|
||||
* the queue thread may now be ready, but we missed the reschedule
|
||||
* point because the lock was held. If this is being invoked by a
|
||||
* preemptible thread then yield.
|
||||
return ret;
|
||||
}
|
||||
|
||||
int k_work_submit_to_queue(struct k_work_q *queue,
|
||||
struct k_work *work)
|
||||
{
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
|
||||
|
||||
int ret = z_work_submit_to_queue(queue, work);
|
||||
|
||||
/* submit_to_queue_locked() won't reschedule on its own
|
||||
* (really it should, otherwise this process will result in
|
||||
* spurious calls to z_swap() due to the race), so do it here
|
||||
* if the queue state changed.
|
||||
*/
|
||||
if ((ret > 0) && (k_is_preempt_thread() != 0)) {
|
||||
k_yield();
|
||||
if (ret > 0) {
|
||||
z_reschedule_unlocked();
|
||||
}
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
|
||||
@@ -586,6 +605,7 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
|
||||
struct k_work *work = NULL;
|
||||
k_work_handler_t handler = NULL;
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
bool yield;
|
||||
|
||||
/* Check for and prepare any new work. */
|
||||
node = sys_slist_get(&queue->pending);
|
||||
@@ -644,34 +664,30 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
if (work != NULL) {
|
||||
bool yield;
|
||||
__ASSERT_NO_MSG(handler != NULL);
|
||||
handler(work);
|
||||
|
||||
__ASSERT_NO_MSG(handler != NULL);
|
||||
handler(work);
|
||||
/* Mark the work item as no longer running and deal
|
||||
* with any cancellation issued while it was running.
|
||||
* Clear the BUSY flag and optionally yield to prevent
|
||||
* starving other threads.
|
||||
*/
|
||||
key = k_spin_lock(&lock);
|
||||
|
||||
/* Mark the work item as no longer running and deal
|
||||
* with any cancellation issued while it was running.
|
||||
* Clear the BUSY flag and optionally yield to prevent
|
||||
* starving other threads.
|
||||
*/
|
||||
key = k_spin_lock(&lock);
|
||||
flag_clear(&work->flags, K_WORK_RUNNING_BIT);
|
||||
if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
|
||||
finalize_cancel_locked(work);
|
||||
}
|
||||
|
||||
flag_clear(&work->flags, K_WORK_RUNNING_BIT);
|
||||
if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
|
||||
finalize_cancel_locked(work);
|
||||
}
|
||||
flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
|
||||
yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
|
||||
yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
/* Optionally yield to prevent the work queue from
|
||||
* starving other threads.
|
||||
*/
|
||||
if (yield) {
|
||||
k_yield();
|
||||
}
|
||||
/* Optionally yield to prevent the work queue from
|
||||
* starving other threads.
|
||||
*/
|
||||
if (yield) {
|
||||
k_yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ void free(void *ptr)
|
||||
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
||||
}
|
||||
|
||||
SYS_INIT(malloc_prepare, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
SYS_INIT(malloc_prepare, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
#else /* No malloc arena */
|
||||
void *malloc(size_t size)
|
||||
{
|
||||
|
||||
@@ -133,7 +133,7 @@ static int malloc_prepare(const struct device *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(malloc_prepare, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
SYS_INIT(malloc_prepare, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
|
||||
/* Current offset from HEAP_BASE of unused memory */
|
||||
LIBC_BSS static size_t heap_sz;
|
||||
|
||||
@@ -19,6 +19,7 @@ config POSIX_API
|
||||
config PTHREAD_IPC
|
||||
bool "POSIX pthread IPC API"
|
||||
default y if POSIX_API
|
||||
depends on POSIX_CLOCK
|
||||
help
|
||||
This enables a mostly-standards-compliant implementation of
|
||||
the pthread mutex, condition variable and barrier IPC
|
||||
@@ -111,6 +112,8 @@ config APP_LINK_WITH_POSIX_SUBSYS
|
||||
config EVENTFD
|
||||
bool "Enable support for eventfd"
|
||||
depends on !ARCH_POSIX
|
||||
select POLL
|
||||
default y if POSIX_API
|
||||
help
|
||||
Enable support for event file descriptors, eventfd. An eventfd can
|
||||
be used as an event wait/notify mechanism together with POSIX calls
|
||||
|
||||
@@ -27,7 +27,6 @@ static struct k_spinlock rt_clock_base_lock;
|
||||
*/
|
||||
int z_impl_clock_gettime(clockid_t clock_id, struct timespec *ts)
|
||||
{
|
||||
uint64_t elapsed_nsecs;
|
||||
struct timespec base;
|
||||
k_spinlock_key_t key;
|
||||
|
||||
@@ -48,9 +47,13 @@ int z_impl_clock_gettime(clockid_t clock_id, struct timespec *ts)
|
||||
return -1;
|
||||
}
|
||||
|
||||
elapsed_nsecs = k_ticks_to_ns_floor64(k_uptime_ticks());
|
||||
ts->tv_sec = (int32_t) (elapsed_nsecs / NSEC_PER_SEC);
|
||||
ts->tv_nsec = (int32_t) (elapsed_nsecs % NSEC_PER_SEC);
|
||||
uint64_t ticks = k_uptime_ticks();
|
||||
uint64_t elapsed_secs = ticks / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
|
||||
uint64_t nremainder = ticks - elapsed_secs * CONFIG_SYS_CLOCK_TICKS_PER_SEC;
|
||||
|
||||
ts->tv_sec = (time_t) elapsed_secs;
|
||||
/* For ns 32 bit conversion can be used since its smaller than 1sec. */
|
||||
ts->tv_nsec = (int32_t) k_ticks_to_ns_floor32(nremainder);
|
||||
|
||||
ts->tv_sec += base.tv_sec;
|
||||
ts->tv_nsec += base.tv_nsec;
|
||||
|
||||
@@ -15,12 +15,10 @@
|
||||
#define PTHREAD_INIT_FLAGS PTHREAD_CANCEL_ENABLE
|
||||
#define PTHREAD_CANCELED ((void *) -1)
|
||||
|
||||
#define LOWEST_POSIX_THREAD_PRIORITY 1
|
||||
|
||||
PTHREAD_MUTEX_DEFINE(pthread_key_lock);
|
||||
|
||||
static const pthread_attr_t init_pthread_attrs = {
|
||||
.priority = LOWEST_POSIX_THREAD_PRIORITY,
|
||||
.priority = 0,
|
||||
.stack = NULL,
|
||||
.stacksize = 0,
|
||||
.flags = PTHREAD_INIT_FLAGS,
|
||||
@@ -54,9 +52,11 @@ static uint32_t zephyr_to_posix_priority(int32_t z_prio, int *policy)
|
||||
if (z_prio < 0) {
|
||||
*policy = SCHED_FIFO;
|
||||
prio = -1 * (z_prio + 1);
|
||||
__ASSERT_NO_MSG(prio < CONFIG_NUM_COOP_PRIORITIES);
|
||||
} else {
|
||||
*policy = SCHED_RR;
|
||||
prio = (CONFIG_NUM_PREEMPT_PRIORITIES - z_prio);
|
||||
prio = (CONFIG_NUM_PREEMPT_PRIORITIES - z_prio - 1);
|
||||
__ASSERT_NO_MSG(prio < CONFIG_NUM_PREEMPT_PRIORITIES);
|
||||
}
|
||||
|
||||
return prio;
|
||||
@@ -68,9 +68,11 @@ static int32_t posix_to_zephyr_priority(uint32_t priority, int policy)
|
||||
|
||||
if (policy == SCHED_FIFO) {
|
||||
/* Zephyr COOP priority starts from -1 */
|
||||
__ASSERT_NO_MSG(priority < CONFIG_NUM_COOP_PRIORITIES);
|
||||
prio = -1 * (priority + 1);
|
||||
} else {
|
||||
prio = (CONFIG_NUM_PREEMPT_PRIORITIES - priority);
|
||||
__ASSERT_NO_MSG(priority < CONFIG_NUM_PREEMPT_PRIORITIES);
|
||||
prio = (CONFIG_NUM_PREEMPT_PRIORITIES - priority - 1);
|
||||
}
|
||||
|
||||
return prio;
|
||||
@@ -150,7 +152,7 @@ int pthread_create(pthread_t *newthread, const pthread_attr_t *attr,
|
||||
for (pthread_num = 0;
|
||||
pthread_num < CONFIG_MAX_PTHREAD_COUNT; pthread_num++) {
|
||||
thread = &posix_thread_pool[pthread_num];
|
||||
if (thread->state == PTHREAD_TERMINATED) {
|
||||
if (thread->state == PTHREAD_EXITED || thread->state == PTHREAD_TERMINATED) {
|
||||
thread->state = PTHREAD_JOINABLE;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -7,13 +7,9 @@
|
||||
#include <kernel.h>
|
||||
#include <posix/posix_sched.h>
|
||||
|
||||
static bool valid_posix_policy(int policy)
|
||||
static inline bool valid_posix_policy(int policy)
|
||||
{
|
||||
if (policy != SCHED_FIFO && policy != SCHED_RR) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return policy == SCHED_FIFO || policy == SCHED_RR;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -23,25 +19,12 @@ static bool valid_posix_policy(int policy)
|
||||
*/
|
||||
int sched_get_priority_min(int policy)
|
||||
{
|
||||
if (valid_posix_policy(policy) == false) {
|
||||
if (!valid_posix_policy(policy)) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_COOP_ENABLED)) {
|
||||
if (policy == SCHED_FIFO) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_ENABLED)) {
|
||||
if (policy == SCHED_RR) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -51,25 +34,10 @@ int sched_get_priority_min(int policy)
|
||||
*/
|
||||
int sched_get_priority_max(int policy)
|
||||
{
|
||||
if (valid_posix_policy(policy) == false) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_COOP_ENABLED)) {
|
||||
if (policy == SCHED_FIFO) {
|
||||
/* Posix COOP priority starts from 0
|
||||
* whereas zephyr starts from -1
|
||||
*/
|
||||
return (CONFIG_NUM_COOP_PRIORITIES - 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_ENABLED)) {
|
||||
if (policy == SCHED_RR) {
|
||||
return CONFIG_NUM_PREEMPT_PRIORITIES;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_COOP_ENABLED) && policy == SCHED_FIFO) {
|
||||
return CONFIG_NUM_COOP_PRIORITIES - 1;
|
||||
} else if (IS_ENABLED(CONFIG_PREEMPT_ENABLED) && policy == SCHED_RR) {
|
||||
return CONFIG_NUM_PREEMPT_PRIORITIES - 1;
|
||||
}
|
||||
|
||||
errno = EINVAL;
|
||||
|
||||
@@ -91,6 +91,7 @@ int sem_post(sem_t *semaphore)
|
||||
int sem_timedwait(sem_t *semaphore, struct timespec *abstime)
|
||||
{
|
||||
int32_t timeout;
|
||||
struct timespec current;
|
||||
int64_t current_ms, abstime_ms;
|
||||
|
||||
__ASSERT(abstime, "abstime pointer NULL");
|
||||
@@ -100,8 +101,12 @@ int sem_timedwait(sem_t *semaphore, struct timespec *abstime)
|
||||
return -1;
|
||||
}
|
||||
|
||||
current_ms = (int64_t)k_uptime_get();
|
||||
if (clock_gettime(CLOCK_REALTIME, ¤t) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
abstime_ms = (int64_t)_ts_to_ms(abstime);
|
||||
current_ms = (int64_t)_ts_to_ms(¤t);
|
||||
|
||||
if (abstime_ms <= current_ms) {
|
||||
timeout = 0;
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
#include <kernel.h>
|
||||
#include <posix/unistd.h>
|
||||
|
||||
@@ -14,8 +16,12 @@
|
||||
*/
|
||||
unsigned sleep(unsigned int seconds)
|
||||
{
|
||||
k_sleep(K_SECONDS(seconds));
|
||||
return 0;
|
||||
int rem;
|
||||
|
||||
rem = k_sleep(K_SECONDS(seconds));
|
||||
__ASSERT_NO_MSG(rem >= 0);
|
||||
|
||||
return rem / MSEC_PER_SEC;
|
||||
}
|
||||
/**
|
||||
* @brief Suspend execution for microsecond intervals.
|
||||
@@ -24,10 +30,19 @@ unsigned sleep(unsigned int seconds)
|
||||
*/
|
||||
int usleep(useconds_t useconds)
|
||||
{
|
||||
if (useconds < USEC_PER_MSEC) {
|
||||
k_busy_wait(useconds);
|
||||
} else {
|
||||
k_msleep(useconds / USEC_PER_MSEC);
|
||||
int32_t rem;
|
||||
|
||||
if (useconds >= USEC_PER_SEC) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
rem = k_usleep(useconds);
|
||||
__ASSERT_NO_MSG(rem >= 0);
|
||||
if (rem > 0) {
|
||||
/* sleep was interrupted by a call to k_wakeup() */
|
||||
errno = EINTR;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -24,6 +24,7 @@ config TFM_BOARD
|
||||
|
||||
menuconfig BUILD_WITH_TFM
|
||||
bool "Build with TF-M as the Secure Execution Environment"
|
||||
depends on ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
depends on TRUSTED_EXECUTION_NONSECURE
|
||||
depends on TFM_BOARD != ""
|
||||
depends on ARM_TRUSTZONE_M
|
||||
|
||||
@@ -12,4 +12,5 @@ CONFIG_TEST_RANDOM_GENERATOR=y
|
||||
|
||||
# Use Portable threads
|
||||
CONFIG_PTHREAD_IPC=y
|
||||
CONFIG_POSIX_CLOCK=y
|
||||
CONFIG_NET_SOCKETS_POSIX_NAMES=y
|
||||
|
||||
@@ -19,3 +19,6 @@ CONFIG_STATS_NAMES=n
|
||||
|
||||
# Disable Logging for footprint reduction
|
||||
CONFIG_LOG=n
|
||||
|
||||
# Network settings
|
||||
CONFIG_NET_BUF_USER_DATA_SIZE=8
|
||||
|
||||
@@ -16,3 +16,6 @@ CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE=2304
|
||||
|
||||
# Enable file system commands
|
||||
CONFIG_MCUMGR_CMD_FS_MGMT=y
|
||||
|
||||
# Network settings
|
||||
CONFIG_NET_BUF_USER_DATA_SIZE=8
|
||||
|
||||
@@ -8,6 +8,7 @@ tests:
|
||||
platform_allow: mps2_an521_ns lpcxpresso55s69_ns nrf5340dk_nrf5340_cpuapp_ns
|
||||
nrf9160dk_nrf9160_ns nucleo_l552ze_q_ns v2m_musca_s1_ns stm32l562e_dk_ns
|
||||
bl5340_dvk_cpuapp_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
|
||||
@@ -5,6 +5,7 @@ common:
|
||||
tags: psa
|
||||
platform_allow: mps2_an521_ns v2m_musca_s1_ns
|
||||
nrf5340dk_nrf5340_cpuapp_ns nrf9160dk_nrf9160_ns bl5340_dvk_cpuapp_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
@@ -22,3 +23,4 @@ common:
|
||||
tests:
|
||||
sample.tfm.protected_storage:
|
||||
tags: tfm
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
@@ -8,6 +8,7 @@ tests:
|
||||
platform_allow: mps2_an521_ns lpcxpresso55s69_ns
|
||||
nrf5340dk_nrf5340_cpuapp_ns nrf9160dk_nrf9160_ns nucleo_l552ze_q_ns
|
||||
stm32l562e_dk_ns v2m_musca_s1_ns v2m_musca_b1_ns bl5340_dvk_cpuapp_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
@@ -21,6 +22,7 @@ tests:
|
||||
platform_allow: mps2_an521_ns
|
||||
extra_configs:
|
||||
- CONFIG_TFM_BL2=n
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
|
||||
@@ -3,6 +3,7 @@ common:
|
||||
platform_allow: mps2_an521_ns
|
||||
nrf5340dk_nrf5340_cpuapp_ns nrf9160dk_nrf9160_ns
|
||||
v2m_musca_s1_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
@@ -16,5 +17,7 @@ tests:
|
||||
sample.tfm.psa_protected_storage_test:
|
||||
extra_args: "CONFIG_TFM_PSA_TEST_PROTECTED_STORAGE=y"
|
||||
timeout: 100
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
sample.tfm.psa_internal_trusted_storage_test:
|
||||
extra_args: "CONFIG_TFM_PSA_TEST_INTERNAL_TRUSTED_STORAGE=y"
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
@@ -3,6 +3,7 @@ common:
|
||||
platform_allow: lpcxpresso55s69_ns
|
||||
nrf5340dk_nrf5340_cpuapp_ns nrf9160dk_nrf9160_ns
|
||||
v2m_musca_s1_ns
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
harness: console
|
||||
harness_config:
|
||||
type: multi_line
|
||||
@@ -18,3 +19,4 @@ tests:
|
||||
sample.tfm.tfm_regression:
|
||||
extra_args: ""
|
||||
timeout: 200
|
||||
filter: CONFIG_ZEPHYR_TRUSTED_FIRMWARE_M_MODULE
|
||||
|
||||
@@ -15,3 +15,5 @@ tests:
|
||||
sample.kernel.memory_protection.shared_mem:
|
||||
filter: CONFIG_ARCH_HAS_USERSPACE
|
||||
platform_exclude: twr_ke18f
|
||||
extra_configs:
|
||||
- CONFIG_TEST_HW_STACK_PROTECTION=n
|
||||
|
||||
@@ -122,7 +122,7 @@ class GdbStub(abc.ABC):
|
||||
|
||||
def get_mem_region(addr):
|
||||
for r in self.mem_regions:
|
||||
if r['start'] <= addr <= r['end']:
|
||||
if r['start'] <= addr < r['end']:
|
||||
return r
|
||||
|
||||
return None
|
||||
|
||||
@@ -2769,18 +2769,6 @@ class _BindingLoader(Loader):
|
||||
# Add legacy '!include foo.yaml' handling
|
||||
_BindingLoader.add_constructor("!include", _binding_include)
|
||||
|
||||
# Use OrderedDict instead of plain dict for YAML mappings, to preserve
|
||||
# insertion order on Python 3.5 and earlier (plain dicts only preserve
|
||||
# insertion order on Python 3.6+). This makes testing easier and avoids
|
||||
# surprises.
|
||||
#
|
||||
# Adapted from
|
||||
# https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts.
|
||||
# Hopefully this API stays stable.
|
||||
_BindingLoader.add_constructor(
|
||||
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
|
||||
lambda loader, node: OrderedDict(loader.construct_pairs(node)))
|
||||
|
||||
#
|
||||
# "Default" binding for properties which are defined by the spec.
|
||||
#
|
||||
|
||||
@@ -5,7 +5,7 @@ envlist=py3
|
||||
deps =
|
||||
setuptools-scm
|
||||
pytest
|
||||
types-PyYAML
|
||||
types-PyYAML==6.0.7
|
||||
mypy
|
||||
setenv =
|
||||
TOXTEMPDIR={envtmpdir}
|
||||
|
||||
@@ -58,7 +58,7 @@ data_template = """
|
||||
"""
|
||||
|
||||
library_data_template = """
|
||||
*{0}:*(.data .data.*)
|
||||
*{0}:*(.data .data.* .sdata .sdata.*)
|
||||
"""
|
||||
|
||||
bss_template = """
|
||||
@@ -67,7 +67,7 @@ bss_template = """
|
||||
"""
|
||||
|
||||
library_bss_template = """
|
||||
*{0}:*(.bss .bss.* COMMON COMMON.*)
|
||||
*{0}:*(.bss .bss.* .sbss .sbss.* COMMON COMMON.*)
|
||||
"""
|
||||
|
||||
footer_template = """
|
||||
|
||||
@@ -55,8 +55,8 @@ const _k_syscall_handler_t _k_syscall_table[K_SYSCALL_LIMIT] = {
|
||||
};
|
||||
"""
|
||||
|
||||
list_template = """
|
||||
/* auto-generated by gen_syscalls.py, don't edit */
|
||||
list_template = """/* auto-generated by gen_syscalls.py, don't edit */
|
||||
|
||||
#ifndef ZEPHYR_SYSCALL_LIST_H
|
||||
#define ZEPHYR_SYSCALL_LIST_H
|
||||
|
||||
@@ -82,17 +82,6 @@ syscall_template = """
|
||||
|
||||
#include <linker/sections.h>
|
||||
|
||||
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
|
||||
#pragma GCC diagnostic push
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
||||
#if !defined(__XCC__)
|
||||
#pragma GCC diagnostic ignored "-Warray-bounds"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -103,10 +92,6 @@ extern "C" {
|
||||
}
|
||||
#endif
|
||||
|
||||
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif /* include guard */
|
||||
"""
|
||||
@@ -153,25 +138,13 @@ def need_split(argtype):
|
||||
# Note: "lo" and "hi" are named in little endian conventions,
|
||||
# but it doesn't matter as long as they are consistently
|
||||
# generated.
|
||||
def union_decl(type):
|
||||
return "union { struct { uintptr_t lo, hi; } split; %s val; }" % type
|
||||
def union_decl(type, split):
|
||||
middle = "struct { uintptr_t lo, hi; } split" if split else "uintptr_t x"
|
||||
return "union { %s; %s val; }" % (middle, type)
|
||||
|
||||
def wrapper_defs(func_name, func_type, args):
|
||||
ret64 = need_split(func_type)
|
||||
mrsh_args = [] # List of rvalue expressions for the marshalled invocation
|
||||
split_args = []
|
||||
nsplit = 0
|
||||
for argtype, argname in args:
|
||||
if need_split(argtype):
|
||||
split_args.append((argtype, argname))
|
||||
mrsh_args.append("parm%d.split.lo" % nsplit)
|
||||
mrsh_args.append("parm%d.split.hi" % nsplit)
|
||||
nsplit += 1
|
||||
else:
|
||||
mrsh_args.append("*(uintptr_t *)&" + argname)
|
||||
|
||||
if ret64:
|
||||
mrsh_args.append("(uintptr_t)&ret64")
|
||||
|
||||
decl_arglist = ", ".join([" ".join(argrec) for argrec in args]) or "void"
|
||||
|
||||
@@ -184,10 +157,24 @@ def wrapper_defs(func_name, func_type, args):
|
||||
wrap += ("\t" + "uint64_t ret64;\n") if ret64 else ""
|
||||
wrap += "\t" + "if (z_syscall_trap()) {\n"
|
||||
|
||||
for parmnum, rec in enumerate(split_args):
|
||||
(argtype, argname) = rec
|
||||
wrap += "\t\t%s parm%d;\n" % (union_decl(argtype), parmnum)
|
||||
wrap += "\t\t" + "parm%d.val = %s;\n" % (parmnum, argname)
|
||||
valist_args = []
|
||||
for argnum, (argtype, argname) in enumerate(args):
|
||||
split = need_split(argtype)
|
||||
wrap += "\t\t%s parm%d" % (union_decl(argtype, split), argnum)
|
||||
if argtype != "va_list":
|
||||
wrap += " = { .val = %s };\n" % argname
|
||||
else:
|
||||
# va_list objects are ... peculiar.
|
||||
wrap += ";\n" + "\t\t" + "va_copy(parm%d.val, %s);\n" % (argnum, argname)
|
||||
valist_args.append("parm%d.val" % argnum)
|
||||
if split:
|
||||
mrsh_args.append("parm%d.split.lo" % argnum)
|
||||
mrsh_args.append("parm%d.split.hi" % argnum)
|
||||
else:
|
||||
mrsh_args.append("parm%d.x" % argnum)
|
||||
|
||||
if ret64:
|
||||
mrsh_args.append("(uintptr_t)&ret64")
|
||||
|
||||
if len(mrsh_args) > 6:
|
||||
wrap += "\t\t" + "uintptr_t more[] = {\n"
|
||||
@@ -200,21 +187,23 @@ def wrapper_defs(func_name, func_type, args):
|
||||
% (len(mrsh_args),
|
||||
", ".join(mrsh_args + [syscall_id])))
|
||||
|
||||
# Coverity does not understand syscall mechanism
|
||||
# and will already complain when any function argument
|
||||
# is not of exact size as uintptr_t. So tell Coverity
|
||||
# to ignore this particular rule here.
|
||||
wrap += "\t\t/* coverity[OVERRUN] */\n"
|
||||
|
||||
if ret64:
|
||||
wrap += "\t\t" + "(void)%s;\n" % invoke
|
||||
wrap += "\t\t" + "return (%s)ret64;\n" % func_type
|
||||
invoke = "\t\t" + "(void) %s;\n" % invoke
|
||||
retcode = "\t\t" + "return (%s) ret64;\n" % func_type
|
||||
elif func_type == "void":
|
||||
wrap += "\t\t" + "%s;\n" % invoke
|
||||
wrap += "\t\t" + "return;\n"
|
||||
invoke = "\t\t" + "(void) %s;\n" % invoke
|
||||
retcode = "\t\t" + "return;\n"
|
||||
elif valist_args:
|
||||
invoke = "\t\t" + "%s retval = %s;\n" % (func_type, invoke)
|
||||
retcode = "\t\t" + "return retval;\n"
|
||||
else:
|
||||
wrap += "\t\t" + "return (%s) %s;\n" % (func_type, invoke)
|
||||
invoke = "\t\t" + "return (%s) %s;\n" % (func_type, invoke)
|
||||
retcode = ""
|
||||
|
||||
wrap += invoke
|
||||
for argname in valist_args:
|
||||
wrap += "\t\t" + "va_end(%s);\n" % argname
|
||||
wrap += retcode
|
||||
wrap += "\t" + "}\n"
|
||||
wrap += "#endif\n"
|
||||
|
||||
@@ -244,16 +233,11 @@ def marshall_defs(func_name, func_type, args):
|
||||
mrsh_name = "z_mrsh_" + func_name
|
||||
|
||||
nmrsh = 0 # number of marshalled uintptr_t parameter
|
||||
vrfy_parms = [] # list of (arg_num, mrsh_or_parm_num, bool_is_split)
|
||||
split_parms = [] # list of a (arg_num, mrsh_num) for each split
|
||||
for i, (argtype, _) in enumerate(args):
|
||||
if need_split(argtype):
|
||||
vrfy_parms.append((i, len(split_parms), True))
|
||||
split_parms.append((i, nmrsh))
|
||||
nmrsh += 2
|
||||
else:
|
||||
vrfy_parms.append((i, nmrsh, False))
|
||||
nmrsh += 1
|
||||
vrfy_parms = [] # list of (argtype, bool_is_split)
|
||||
for (argtype, _) in args:
|
||||
split = need_split(argtype)
|
||||
vrfy_parms.append((argtype, split))
|
||||
nmrsh += 2 if split else 1
|
||||
|
||||
# Final argument for a 64 bit return value?
|
||||
if need_split(func_type):
|
||||
@@ -275,25 +259,22 @@ def marshall_defs(func_name, func_type, args):
|
||||
|
||||
if nmrsh > 6:
|
||||
mrsh += ("\tZ_OOPS(Z_SYSCALL_MEMORY_READ(more, "
|
||||
+ str(nmrsh - 6) + " * sizeof(uintptr_t)));\n")
|
||||
+ str(nmrsh - 5) + " * sizeof(uintptr_t)));\n")
|
||||
|
||||
for i, split_rec in enumerate(split_parms):
|
||||
arg_num, mrsh_num = split_rec
|
||||
arg_type = args[arg_num][0]
|
||||
mrsh += "\t%s parm%d;\n" % (union_decl(arg_type), i)
|
||||
mrsh += "\t" + "parm%d.split.lo = %s;\n" % (i, mrsh_rval(mrsh_num,
|
||||
nmrsh))
|
||||
mrsh += "\t" + "parm%d.split.hi = %s;\n" % (i, mrsh_rval(mrsh_num + 1,
|
||||
nmrsh))
|
||||
# Finally, invoke the verify function
|
||||
out_args = []
|
||||
for i, argn, is_split in vrfy_parms:
|
||||
if is_split:
|
||||
out_args.append("parm%d.val" % argn)
|
||||
argnum = 0
|
||||
for i, (argtype, split) in enumerate(vrfy_parms):
|
||||
mrsh += "\t%s parm%d;\n" % (union_decl(argtype, split), i)
|
||||
if split:
|
||||
mrsh += "\t" + "parm%d.split.lo = %s;\n" % (i, mrsh_rval(argnum, nmrsh))
|
||||
argnum += 1
|
||||
mrsh += "\t" + "parm%d.split.hi = %s;\n" % (i, mrsh_rval(argnum, nmrsh))
|
||||
else:
|
||||
out_args.append("*(%s*)&%s" % (args[i][0], mrsh_rval(argn, nmrsh)))
|
||||
mrsh += "\t" + "parm%d.x = %s;\n" % (i, mrsh_rval(argnum, nmrsh))
|
||||
argnum += 1
|
||||
|
||||
vrfy_call = "z_vrfy_%s(%s)\n" % (func_name, ", ".join(out_args))
|
||||
# Finally, invoke the verify function
|
||||
out_args = ", ".join(["parm%d.val" % i for i in range(len(args))])
|
||||
vrfy_call = "z_vrfy_%s(%s)" % (func_name, out_args)
|
||||
|
||||
if func_type == "void":
|
||||
mrsh += "\t" + "%s;\n" % vrfy_call
|
||||
@@ -436,19 +417,10 @@ def main():
|
||||
mrsh_fn = os.path.join(args.base_output, fn + "_mrsh.c")
|
||||
|
||||
with open(mrsh_fn, "w") as fp:
|
||||
fp.write("/* auto-generated by gen_syscalls.py, don't edit */\n")
|
||||
fp.write("#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n")
|
||||
fp.write("#pragma GCC diagnostic push\n")
|
||||
fp.write("#endif\n")
|
||||
fp.write("#ifdef __GNUC__\n")
|
||||
fp.write("#pragma GCC diagnostic ignored \"-Wstrict-aliasing\"\n")
|
||||
fp.write("#endif\n")
|
||||
fp.write("/* auto-generated by gen_syscalls.py, don't edit */\n\n")
|
||||
fp.write(mrsh_includes[fn] + "\n")
|
||||
fp.write("\n")
|
||||
fp.write(mrsh_defs[fn] + "\n")
|
||||
fp.write("#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n")
|
||||
fp.write("#pragma GCC diagnostic pop\n")
|
||||
fp.write("#endif\n")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -26,17 +26,17 @@ def parse_args():
|
||||
parser.add_argument('-a', '--all', dest='all',
|
||||
help='Show all bugs squashed', action='store_true')
|
||||
parser.add_argument('-t', '--token', dest='tokenfile',
|
||||
help='File containing GitHub token', metavar='FILE')
|
||||
parser.add_argument('-b', '--begin', dest='begin', help='begin date (YYYY-mm-dd)',
|
||||
metavar='date', type=valid_date_type, required=True)
|
||||
help='File containing GitHub token (alternatively, use GITHUB_TOKEN env variable)', metavar='FILE')
|
||||
parser.add_argument('-s', '--start', dest='start', help='start date (YYYY-mm-dd)',
|
||||
metavar='START_DATE', type=valid_date_type, required=True)
|
||||
parser.add_argument('-e', '--end', dest='end', help='end date (YYYY-mm-dd)',
|
||||
metavar='date', type=valid_date_type, required=True)
|
||||
metavar='END_DATE', type=valid_date_type, required=True)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.end < args.begin:
|
||||
if args.end < args.start:
|
||||
raise ValueError(
|
||||
'end date {} is before begin date {}'.format(args.end, args.begin))
|
||||
'end date {} is before start date {}'.format(args.end, args.start))
|
||||
|
||||
if args.tokenfile:
|
||||
with open(args.tokenfile, 'r') as file:
|
||||
@@ -53,12 +53,12 @@ def parse_args():
|
||||
|
||||
|
||||
class BugBashTally(object):
|
||||
def __init__(self, gh, begin_date, end_date):
|
||||
def __init__(self, gh, start_date, end_date):
|
||||
"""Create a BugBashTally object with the provided Github object,
|
||||
begin datetime object, and end datetime object"""
|
||||
start datetime object, and end datetime object"""
|
||||
self._gh = gh
|
||||
self._repo = gh.get_repo('zephyrproject-rtos/zephyr')
|
||||
self._begin_date = begin_date
|
||||
self._start_date = start_date
|
||||
self._end_date = end_date
|
||||
|
||||
self._issues = []
|
||||
@@ -122,12 +122,12 @@ class BugBashTally(object):
|
||||
|
||||
cutoff = self._end_date + timedelta(1)
|
||||
issues = self._repo.get_issues(state='closed', labels=[
|
||||
'bug'], since=self._begin_date)
|
||||
'bug'], since=self._start_date)
|
||||
|
||||
for i in issues:
|
||||
# the PyGithub API and v3 REST API do not facilitate 'until'
|
||||
# or 'end date' :-/
|
||||
if i.closed_at < self._begin_date or i.closed_at > cutoff:
|
||||
if i.closed_at < self._start_date or i.closed_at > cutoff:
|
||||
continue
|
||||
|
||||
ipr = i.pull_request
|
||||
@@ -167,7 +167,7 @@ def print_top_ten(top_ten):
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
bbt = BugBashTally(Github(args.token), args.begin, args.end)
|
||||
bbt = BugBashTally(Github(args.token), args.start, args.end)
|
||||
if args.all:
|
||||
# print one issue per line
|
||||
issues = bbt.get_issues()
|
||||
|
||||
341
scripts/release/list_backports.py
Executable file
341
scripts/release/list_backports.py
Executable file
@@ -0,0 +1,341 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2022, Meta
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""Query issues in a release branch
|
||||
|
||||
This script searches for issues referenced via pull-requests in a release
|
||||
branch in order to simplify tracking changes such as automated backports,
|
||||
manual backports, security fixes, and stability fixes.
|
||||
|
||||
A formatted report is printed to standard output either in JSON or
|
||||
reStructuredText.
|
||||
|
||||
Since an issue is required for all changes to release branches, merged PRs
|
||||
must have at least one instance of the phrase "Fixes #1234" in the body. This
|
||||
script will throw an error if a PR has been made without an associated issue.
|
||||
|
||||
Usage:
|
||||
./scripts/release/list_backports.py \
|
||||
-t ~/.ghtoken \
|
||||
-b v2.7-branch \
|
||||
-s 2021-12-15 -e 2022-04-22 \
|
||||
-P 45074 -P 45868 -P 44918 -P 41234 -P 41174 \
|
||||
-j | jq . | tee /tmp/backports.json
|
||||
|
||||
GITHUB_TOKEN="<secret>" \
|
||||
./scripts/release/list_backports.py \
|
||||
-b v3.0-branch \
|
||||
-p 43381 \
|
||||
-j | jq . | tee /tmp/backports.json
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from datetime import datetime, timedelta
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Requires PyGithub
|
||||
from github import Github
|
||||
|
||||
|
||||
# https://gist.github.com/monkut/e60eea811ef085a6540f
|
||||
def valid_date_type(arg_date_str):
|
||||
"""custom argparse *date* type for user dates values given from the
|
||||
command line"""
|
||||
try:
|
||||
return datetime.strptime(arg_date_str, "%Y-%m-%d")
|
||||
except ValueError:
|
||||
msg = "Given Date ({0}) not valid! Expected format, YYYY-MM-DD!".format(arg_date_str)
|
||||
raise argparse.ArgumentTypeError(msg)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-t', '--token', dest='tokenfile',
|
||||
help='File containing GitHub token (alternatively, use GITHUB_TOKEN env variable)', metavar='FILE')
|
||||
parser.add_argument('-b', '--base', dest='base',
|
||||
help='branch (base) for PRs (e.g. v2.7-branch)', metavar='BRANCH', required=True)
|
||||
parser.add_argument('-j', '--json', dest='json', action='store_true',
|
||||
help='print output in JSON rather than RST')
|
||||
parser.add_argument('-s', '--start', dest='start', help='start date (YYYY-mm-dd)',
|
||||
metavar='START_DATE', type=valid_date_type)
|
||||
parser.add_argument('-e', '--end', dest='end', help='end date (YYYY-mm-dd)',
|
||||
metavar='END_DATE', type=valid_date_type)
|
||||
parser.add_argument("-o", "--org", default="zephyrproject-rtos",
|
||||
help="Github organisation")
|
||||
parser.add_argument('-p', '--include-pull', dest='includes',
|
||||
help='include pull request (can be specified multiple times)',
|
||||
metavar='PR', type=int, action='append', default=[])
|
||||
parser.add_argument('-P', '--exclude-pull', dest='excludes',
|
||||
help='exlude pull request (can be specified multiple times, helpful for version bumps and release notes)',
|
||||
metavar='PR', type=int, action='append', default=[])
|
||||
parser.add_argument("-r", "--repo", default="zephyr",
|
||||
help="Github repository")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.includes:
|
||||
if getattr(args, 'start'):
|
||||
logging.error(
|
||||
'the --start argument should not be used with --include-pull')
|
||||
return None
|
||||
if getattr(args, 'end'):
|
||||
logging.error(
|
||||
'the --end argument should not be used with --include-pull')
|
||||
return None
|
||||
else:
|
||||
if not getattr(args, 'start'):
|
||||
logging.error(
|
||||
'if --include-pr PR is not used, --start START_DATE is required')
|
||||
return None
|
||||
|
||||
if not getattr(args, 'end'):
|
||||
setattr(args, 'end', datetime.now())
|
||||
|
||||
if args.end < args.start:
|
||||
logging.error(
|
||||
f'end date {args.end} is before start date {args.start}')
|
||||
return None
|
||||
|
||||
if args.tokenfile:
|
||||
with open(args.tokenfile, 'r') as file:
|
||||
token = file.read()
|
||||
token = token.strip()
|
||||
else:
|
||||
if 'GITHUB_TOKEN' not in os.environ:
|
||||
raise ValueError('No credentials specified')
|
||||
token = os.environ['GITHUB_TOKEN']
|
||||
|
||||
setattr(args, 'token', token)
|
||||
|
||||
return args
|
||||
|
||||
|
||||
class Backport(object):
|
||||
def __init__(self, repo, base, pulls):
|
||||
self._base = base
|
||||
self._repo = repo
|
||||
self._issues = []
|
||||
self._pulls = pulls
|
||||
|
||||
self._pulls_without_an_issue = []
|
||||
self._pulls_with_invalid_issues = {}
|
||||
|
||||
@staticmethod
|
||||
def by_date_range(repo, base, start_date, end_date, excludes):
|
||||
"""Create a Backport object with the provided repo,
|
||||
base, start datetime object, and end datetime objects, and
|
||||
list of excluded PRs"""
|
||||
|
||||
pulls = []
|
||||
|
||||
unfiltered_pulls = repo.get_pulls(
|
||||
base=base, state='closed')
|
||||
for p in unfiltered_pulls:
|
||||
if not p.merged:
|
||||
# only consider merged backports
|
||||
continue
|
||||
|
||||
if p.closed_at < start_date or p.closed_at >= end_date + timedelta(1):
|
||||
# only concerned with PRs within time window
|
||||
continue
|
||||
|
||||
if p.number in excludes:
|
||||
# skip PRs that have been explicitly excluded
|
||||
continue
|
||||
|
||||
pulls.append(p)
|
||||
|
||||
# paginated_list.sort() does not exist
|
||||
pulls = sorted(pulls, key=lambda x: x.number)
|
||||
|
||||
return Backport(repo, base, pulls)
|
||||
|
||||
@staticmethod
|
||||
def by_included_prs(repo, base, includes):
|
||||
"""Create a Backport object with the provided repo,
|
||||
base, and list of included PRs"""
|
||||
|
||||
pulls = []
|
||||
|
||||
for i in includes:
|
||||
try:
|
||||
p = repo.get_pull(i)
|
||||
except Exception:
|
||||
p = None
|
||||
|
||||
if not p:
|
||||
logging.error(f'{i} is not a valid pull request')
|
||||
return None
|
||||
|
||||
if p.base.ref != base:
|
||||
logging.error(
|
||||
f'{i} is not a valid pull request for base {base} ({p.base.label})')
|
||||
return None
|
||||
|
||||
pulls.append(p)
|
||||
|
||||
# paginated_list.sort() does not exist
|
||||
pulls = sorted(pulls, key=lambda x: x.number)
|
||||
|
||||
return Backport(repo, base, pulls)
|
||||
|
||||
@staticmethod
|
||||
def sanitize_title(title):
|
||||
# TODO: sanitize titles such that they are suitable for both JSON and ReStructured Text
|
||||
# could also automatically fix titles like "Automated backport of PR #1234"
|
||||
return title
|
||||
|
||||
def print(self):
|
||||
for i in self.get_issues():
|
||||
title = Backport.sanitize_title(i.title)
|
||||
# * :github:`38972` - logging: Cleaning references to tracing in logging
|
||||
print(f'* :github:`{i.number}` - {title}')
|
||||
|
||||
def print_json(self):
|
||||
issue_objects = []
|
||||
for i in self.get_issues():
|
||||
obj = {}
|
||||
obj['id'] = i.number
|
||||
obj['title'] = Backport.sanitize_title(i.title)
|
||||
obj['url'] = f'https://github.com/{self._repo.organization.login}/{self._repo.name}/pull/{i.number}'
|
||||
issue_objects.append(obj)
|
||||
|
||||
print(json.dumps(issue_objects))
|
||||
|
||||
def get_pulls(self):
|
||||
return self._pulls
|
||||
|
||||
def get_issues(self):
|
||||
"""Return GitHub issues fixed in the provided date window"""
|
||||
if self._issues:
|
||||
return self._issues
|
||||
|
||||
issue_map = {}
|
||||
self._pulls_without_an_issue = []
|
||||
self._pulls_with_invalid_issues = {}
|
||||
|
||||
for p in self._pulls:
|
||||
# check for issues in this pr
|
||||
issues_for_this_pr = {}
|
||||
with io.StringIO(p.body) as buf:
|
||||
for line in buf.readlines():
|
||||
line = line.strip()
|
||||
match = re.search(r"^Fixes[:]?\s*#([1-9][0-9]*).*", line)
|
||||
if not match:
|
||||
match = re.search(
|
||||
rf"^Fixes[:]?\s*https://github\.com/{self._repo.organization.login}/{self._repo.name}/issues/([1-9][0-9]*).*", line)
|
||||
if not match:
|
||||
continue
|
||||
issue_number = int(match[1])
|
||||
issue = self._repo.get_issue(issue_number)
|
||||
if not issue:
|
||||
if not self._pulls_with_invalid_issues[p.number]:
|
||||
self._pulls_with_invalid_issues[p.number] = [
|
||||
issue_number]
|
||||
else:
|
||||
self._pulls_with_invalid_issues[p.number].append(
|
||||
issue_number)
|
||||
logging.error(
|
||||
f'https://github.com/{self._repo.organization.login}/{self._repo.name}/pull/{p.number} references invalid issue number {issue_number}')
|
||||
continue
|
||||
issues_for_this_pr[issue_number] = issue
|
||||
|
||||
# report prs missing issues later
|
||||
if len(issues_for_this_pr) == 0:
|
||||
logging.error(
|
||||
f'https://github.com/{self._repo.organization.login}/{self._repo.name}/pull/{p.number} does not have an associated issue')
|
||||
self._pulls_without_an_issue.append(p)
|
||||
continue
|
||||
|
||||
# FIXME: when we have upgrade to python3.9+, use "issue_map | issues_for_this_pr"
|
||||
issue_map = {**issue_map, **issues_for_this_pr}
|
||||
|
||||
issues = list(issue_map.values())
|
||||
|
||||
# paginated_list.sort() does not exist
|
||||
issues = sorted(issues, key=lambda x: x.number)
|
||||
|
||||
self._issues = issues
|
||||
|
||||
return self._issues
|
||||
|
||||
def get_pulls_without_issues(self):
|
||||
if self._pulls_without_an_issue:
|
||||
return self._pulls_without_an_issue
|
||||
|
||||
self.get_issues()
|
||||
|
||||
return self._pulls_without_an_issue
|
||||
|
||||
def get_pulls_with_invalid_issues(self):
|
||||
if self._pulls_with_invalid_issues:
|
||||
return self._pulls_with_invalid_issues
|
||||
|
||||
self.get_issues()
|
||||
|
||||
return self._pulls_with_invalid_issues
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
if not args:
|
||||
return os.EX_DATAERR
|
||||
|
||||
try:
|
||||
gh = Github(args.token)
|
||||
except Exception:
|
||||
logging.error('failed to authenticate with GitHub')
|
||||
return os.EX_DATAERR
|
||||
|
||||
try:
|
||||
repo = gh.get_repo(args.org + '/' + args.repo)
|
||||
except Exception:
|
||||
logging.error('failed to obtain Github repository')
|
||||
return os.EX_DATAERR
|
||||
|
||||
bp = None
|
||||
if args.includes:
|
||||
bp = Backport.by_included_prs(repo, args.base, set(args.includes))
|
||||
else:
|
||||
bp = Backport.by_date_range(repo, args.base,
|
||||
args.start, args.end, set(args.excludes))
|
||||
|
||||
if not bp:
|
||||
return os.EX_DATAERR
|
||||
|
||||
pulls_with_invalid_issues = bp.get_pulls_with_invalid_issues()
|
||||
if pulls_with_invalid_issues:
|
||||
logging.error('The following PRs link to invalid issues:')
|
||||
for (p, lst) in pulls_with_invalid_issues:
|
||||
logging.error(
|
||||
f'\nhttps://github.com/{repo.organization.login}/{repo.name}/pull/{p.number}: {lst}')
|
||||
return os.EX_DATAERR
|
||||
|
||||
pulls_without_issues = bp.get_pulls_without_issues()
|
||||
if pulls_without_issues:
|
||||
logging.error(
|
||||
'Please ensure the body of each PR to a release branch contains "Fixes #1234"')
|
||||
logging.error('The following PRs are lacking associated issues:')
|
||||
for p in pulls_without_issues:
|
||||
logging.error(
|
||||
f'https://github.com/{repo.organization.login}/{repo.name}/pull/{p.number}')
|
||||
return os.EX_DATAERR
|
||||
|
||||
if args.json:
|
||||
bp.print_json()
|
||||
else:
|
||||
bp.print()
|
||||
|
||||
return os.EX_OK
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
@@ -147,10 +147,10 @@ def parse_args():
|
||||
def main():
|
||||
parse_args()
|
||||
|
||||
token = os.environ.get('GH_TOKEN', None)
|
||||
token = os.environ.get('GITHUB_TOKEN', None)
|
||||
if not token:
|
||||
sys.exit("""Github token not set in environment,
|
||||
set the env. variable GH_TOKEN please and retry.""")
|
||||
set the env. variable GITHUB_TOKEN please and retry.""")
|
||||
|
||||
i = Issues(args.org, args.repo, token)
|
||||
|
||||
@@ -213,5 +213,6 @@ set the env. variable GH_TOKEN please and retry.""")
|
||||
f.write("* :github:`{}` - {}\n".format(
|
||||
item['number'], item['title']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -16,6 +16,7 @@ import tempfile
|
||||
from runners.core import ZephyrBinaryRunner, RunnerCaps
|
||||
|
||||
try:
|
||||
import pylink
|
||||
from pylink.library import Library
|
||||
MISSING_REQUIREMENTS = False
|
||||
except ImportError:
|
||||
@@ -141,16 +142,23 @@ class JLinkBinaryRunner(ZephyrBinaryRunner):
|
||||
# to load the shared library distributed with the tools, which
|
||||
# provides an API call for getting the version.
|
||||
if not hasattr(self, '_jlink_version'):
|
||||
# pylink 0.14.0/0.14.1 exposes JLink SDK DLL (libjlinkarm) in
|
||||
# JLINK_SDK_STARTS_WITH, while other versions use JLINK_SDK_NAME
|
||||
if pylink.__version__ in ('0.14.0', '0.14.1'):
|
||||
sdk = Library.JLINK_SDK_STARTS_WITH
|
||||
else:
|
||||
sdk = Library.JLINK_SDK_NAME
|
||||
|
||||
plat = sys.platform
|
||||
if plat.startswith('win32'):
|
||||
libname = Library.get_appropriate_windows_sdk_name() + '.dll'
|
||||
elif plat.startswith('linux'):
|
||||
libname = Library.JLINK_SDK_NAME + '.so'
|
||||
libname = sdk + '.so'
|
||||
elif plat.startswith('darwin'):
|
||||
libname = Library.JLINK_SDK_NAME + '.dylib'
|
||||
libname = sdk + '.dylib'
|
||||
else:
|
||||
self.logger.warning(f'unknown platform {plat}; assuming UNIX')
|
||||
libname = Library.JLINK_SDK_NAME + '.so'
|
||||
libname = sdk + '.so'
|
||||
|
||||
lib = Library(dllpath=os.fspath(Path(self.commander).parent /
|
||||
libname))
|
||||
|
||||
@@ -125,7 +125,7 @@ Created: {datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")}
|
||||
|
||||
# write other license info, if any
|
||||
if len(doc.customLicenseIDs) > 0:
|
||||
for lic in list(doc.customLicenseIDs).sort():
|
||||
for lic in sorted(list(doc.customLicenseIDs)):
|
||||
writeOtherLicenseSPDX(f, lic)
|
||||
|
||||
# Open SPDX document file for writing, write the document, and calculate
|
||||
|
||||
@@ -24,6 +24,224 @@
|
||||
#define __FPU_PRESENT CONFIG_CPU_HAS_FPU
|
||||
#define __MPU_PRESENT CONFIG_CPU_HAS_ARM_MPU
|
||||
|
||||
#define __CM4_REV 0x0201 /*!< Core Revision r2p1 */
|
||||
|
||||
#define __VTOR_PRESENT 1 /*!< Set to 1 if VTOR is present */
|
||||
#define __NVIC_PRIO_BITS 3 /*!< Number of Bits used for Priority Levels */
|
||||
#define __Vendor_SysTickConfig 0 /*!< 0 use default SysTick HW */
|
||||
#define __FPU_DP 0 /*!< Set to 1 if FPU is double precision */
|
||||
#define __ICACHE_PRESENT 0 /*!< Set to 1 if I-Cache is present */
|
||||
#define __DCACHE_PRESENT 0 /*!< Set to 1 if D-Cache is present */
|
||||
#define __DTCM_PRESENT 0 /*!< Set to 1 if DTCM is present */
|
||||
|
||||
/** @brief ARM Cortex-M4 NVIC Interrupt Numbers
|
||||
* CM4 NVIC implements 16 internal interrupt sources. CMSIS macros use
|
||||
* negative numbers [-15, -1]. Lower numerical value indicates higher
|
||||
* priority.
|
||||
* -15 = Reset Vector invoked on POR or any CPU reset.
|
||||
* -14 = NMI
|
||||
* -13 = Hard Fault. At POR or CPU reset all faults map to Hard Fault.
|
||||
* -12 = Memory Management Fault. If enabled Hard Faults caused by access
|
||||
* violations, no address match, or MPU mismatch.
|
||||
* -11 = Bus Fault. If enabled pre-fetch, AHB access faults.
|
||||
* -10 = Usage Fault. If enabled Undefined instructions, illegal state
|
||||
* transition (Thumb -> ARM mode), unaligned, etc.
|
||||
* -9 through -6 are not implemented (reserved).
|
||||
* -5 System call via SVC instruction.
|
||||
* -4 Debug Monitor.
|
||||
* -3 not implemented (reserved).
|
||||
* -2 PendSV for system service.
|
||||
* -1 SysTick NVIC system timer.
|
||||
* Numbers >= 0 are external peripheral interrupts.
|
||||
*/
|
||||
typedef enum {
|
||||
/* ========== ARM Cortex-M4 Specific Interrupt Numbers ============ */
|
||||
|
||||
Reset_IRQn = -15, /*!< POR/CPU Reset Vector */
|
||||
NonMaskableInt_IRQn = -14, /*!< NMI */
|
||||
HardFault_IRQn = -13, /*!< Hard Faults */
|
||||
MemoryManagement_IRQn = -12, /*!< Memory Management faults */
|
||||
BusFault_IRQn = -11, /*!< Bus Access faults */
|
||||
UsageFault_IRQn = -10, /*!< Usage/instruction faults */
|
||||
SVCall_IRQn = -5, /*!< SVC */
|
||||
DebugMonitor_IRQn = -4, /*!< Debug Monitor */
|
||||
PendSV_IRQn = -2, /*!< PendSV */
|
||||
SysTick_IRQn = -1, /*!< SysTick */
|
||||
|
||||
/* ============== MEC172x Specific Interrupt Numbers ============ */
|
||||
|
||||
GIRQ08_IRQn = 0, /*!< GPIO 0140 - 0176 */
|
||||
GIRQ09_IRQn = 1, /*!< GPIO 0100 - 0136 */
|
||||
GIRQ10_IRQn = 2, /*!< GPIO 0040 - 0076 */
|
||||
GIRQ11_IRQn = 3, /*!< GPIO 0000 - 0036 */
|
||||
GIRQ12_IRQn = 4, /*!< GPIO 0200 - 0236 */
|
||||
GIRQ13_IRQn = 5, /*!< SMBus Aggregated */
|
||||
GIRQ14_IRQn = 6, /*!< DMA Aggregated */
|
||||
GIRQ15_IRQn = 7,
|
||||
GIRQ16_IRQn = 8,
|
||||
GIRQ17_IRQn = 9,
|
||||
GIRQ18_IRQn = 10,
|
||||
GIRQ19_IRQn = 11,
|
||||
GIRQ20_IRQn = 12,
|
||||
GIRQ21_IRQn = 13,
|
||||
/* GIRQ22(peripheral clock wake) is not connected to NVIC */
|
||||
GIRQ23_IRQn = 14,
|
||||
GIRQ24_IRQn = 15,
|
||||
GIRQ25_IRQn = 16,
|
||||
GIRQ26_IRQn = 17, /*!< GPIO 0240 - 0276 */
|
||||
/* Reserved 18-19 */
|
||||
/* GIRQ's 8 - 12, 24 - 26 no direct connections */
|
||||
I2C_SMB_0_IRQn = 20, /*!< GIRQ13 b[0] */
|
||||
I2C_SMB_1_IRQn = 21, /*!< GIRQ13 b[1] */
|
||||
I2C_SMB_2_IRQn = 22, /*!< GIRQ13 b[2] */
|
||||
I2C_SMB_3_IRQn = 23, /*!< GIRQ13 b[3] */
|
||||
DMA0_IRQn = 24, /*!< GIRQ14 b[0] */
|
||||
DMA1_IRQn = 25, /*!< GIRQ14 b[1] */
|
||||
DMA2_IRQn = 26, /*!< GIRQ14 b[2] */
|
||||
DMA3_IRQn = 27, /*!< GIRQ14 b[3] */
|
||||
DMA4_IRQn = 28, /*!< GIRQ14 b[4] */
|
||||
DMA5_IRQn = 29, /*!< GIRQ14 b[5] */
|
||||
DMA6_IRQn = 30, /*!< GIRQ14 b[6] */
|
||||
DMA7_IRQn = 31, /*!< GIRQ14 b[7] */
|
||||
DMA8_IRQn = 32, /*!< GIRQ14 b[8] */
|
||||
DMA9_IRQn = 33, /*!< GIRQ14 b[9] */
|
||||
DMA10_IRQn = 34, /*!< GIRQ14 b[10] */
|
||||
DMA11_IRQn = 35, /*!< GIRQ14 b[11] */
|
||||
DMA12_IRQn = 36, /*!< GIRQ14 b[12] */
|
||||
DMA13_IRQn = 37, /*!< GIRQ14 b[13] */
|
||||
DMA14_IRQn = 38, /*!< GIRQ14 b[14] */
|
||||
DMA15_IRQn = 39, /*!< GIRQ14 b[15] */
|
||||
UART0_IRQn = 40, /*!< GIRQ15 b[0] */
|
||||
UART1_IRQn = 41, /*!< GIRQ15 b[1] */
|
||||
EMI0_IRQn = 42, /*!< GIRQ15 b[2] */
|
||||
EMI1_IRQn = 43, /*!< GIRQ15 b[3] */
|
||||
EMI2_IRQn = 44, /*!< GIRQ15 b[4] */
|
||||
ACPI_EC0_IBF_IRQn = 45, /*!< GIRQ15 b[5] */
|
||||
ACPI_EC0_OBE_IRQn = 46, /*!< GIRQ15 b[6] */
|
||||
ACPI_EC1_IBF_IRQn = 47, /*!< GIRQ15 b[7] */
|
||||
ACPI_EC1_OBE_IRQn = 48, /*!< GIRQ15 b[8] */
|
||||
ACPI_EC2_IBF_IRQn = 49, /*!< GIRQ15 b[9] */
|
||||
ACPI_EC2_OBE_IRQn = 50, /*!< GIRQ15 b[10] */
|
||||
ACPI_EC3_IBF_IRQn = 51, /*!< GIRQ15 b[11] */
|
||||
ACPI_EC3_OBE_IRQn = 52, /*!< GIRQ15 b[12] */
|
||||
ACPI_EC4_IBF_IRQn = 53, /*!< GIRQ15 b[13] */
|
||||
ACPI_EC4_OBE_IRQn = 54, /*!< GIRQ15 b[14] */
|
||||
ACPI_PM1_CTL_IRQn = 55, /*!< GIRQ15 b[15] */
|
||||
ACPI_PM1_EN_IRQn = 56, /*!< GIRQ15 b[16] */
|
||||
ACPI_PM1_STS_IRQn = 57, /*!< GIRQ15 b[17] */
|
||||
KBC_OBE_IRQn = 58, /*!< GIRQ15 b[18] */
|
||||
KBC_IBF_IRQn = 59, /*!< GIRQ15 b[19] */
|
||||
MBOX_IRQn = 60, /*!< GIRQ15 b[20] */
|
||||
/* reserved 61 */
|
||||
P80BD_0_IRQn = 62, /*!< GIRQ15 b[22] */
|
||||
/* reserved 63-64 */
|
||||
PKE_IRQn = 65, /*!< GIRQ16 b[0] */
|
||||
/* reserved 66 */
|
||||
RNG_IRQn = 67, /*!< GIRQ16 b[2] */
|
||||
AESH_IRQn = 68, /*!< GIRQ16 b[3] */
|
||||
/* reserved 69 */
|
||||
PECI_IRQn = 70, /*!< GIRQ17 b[0] */
|
||||
TACH_0_IRQn = 71, /*!< GIRQ17 b[1] */
|
||||
TACH_1_IRQn = 72, /*!< GIRQ17 b[2] */
|
||||
TACH_2_IRQn = 73, /*!< GIRQ17 b[3] */
|
||||
RPMFAN_0_FAIL_IRQn = 74, /*!< GIRQ17 b[20] */
|
||||
RPMFAN_0_STALL_IRQn = 75, /*!< GIRQ17 b[21] */
|
||||
RPMFAN_1_FAIL_IRQn = 76, /*!< GIRQ17 b[22] */
|
||||
RPMFAN_1_STALL_IRQn = 77, /*!< GIRQ17 b[23] */
|
||||
ADC_SNGL_IRQn = 78, /*!< GIRQ17 b[8] */
|
||||
ADC_RPT_IRQn = 79, /*!< GIRQ17 b[9] */
|
||||
RCID_0_IRQn = 80, /*!< GIRQ17 b[10] */
|
||||
RCID_1_IRQn = 81, /*!< GIRQ17 b[11] */
|
||||
RCID_2_IRQn = 82, /*!< GIRQ17 b[12] */
|
||||
LED_0_IRQn = 83, /*!< GIRQ17 b[13] */
|
||||
LED_1_IRQn = 84, /*!< GIRQ17 b[14] */
|
||||
LED_2_IRQn = 85, /*!< GIRQ17 b[15] */
|
||||
LED_3_IRQn = 86, /*!< GIRQ17 b[16] */
|
||||
PHOT_IRQn = 87, /*!< GIRQ17 b[17] */
|
||||
/* reserved 88-89 */
|
||||
SPIP_0_IRQn = 90, /*!< GIRQ18 b[0] */
|
||||
QMSPI_0_IRQn = 91, /*!< GIRQ18 b[1] */
|
||||
GPSPI_0_TXBE_IRQn = 92, /*!< GIRQ18 b[2] */
|
||||
GPSPI_0_RXBF_IRQn = 93, /*!< GIRQ18 b[3] */
|
||||
GPSPI_1_TXBE_IRQn = 94, /*!< GIRQ18 b[4] */
|
||||
GPSPI_1_RXBF_IRQn = 95, /*!< GIRQ18 b[5] */
|
||||
BCL_0_ERR_IRQn = 96, /*!< GIRQ18 b[7] */
|
||||
BCL_0_BCLR_IRQn = 97, /*!< GIRQ18 b[6] */
|
||||
/* reserved 98-99 */
|
||||
PS2_0_ACT_IRQn = 100, /*!< GIRQ18 b[10] */
|
||||
/* reserved 101-102 */
|
||||
ESPI_PC_IRQn = 103, /*!< GIRQ19 b[0] */
|
||||
ESPI_BM1_IRQn = 104, /*!< GIRQ19 b[1] */
|
||||
ESPI_BM2_IRQn = 105, /*!< GIRQ19 b[2] */
|
||||
ESPI_LTR_IRQn = 106, /*!< GIRQ19 b[3] */
|
||||
ESPI_OOB_UP_IRQn = 107, /*!< GIRQ19 b[4] */
|
||||
ESPI_OOB_DN_IRQn = 108, /*!< GIRQ19 b[5] */
|
||||
ESPI_FLASH_IRQn = 109, /*!< GIRQ19 b[6] */
|
||||
ESPI_RESET_IRQn = 110, /*!< GIRQ19 b[7] */
|
||||
RTMR_IRQn = 111, /*!< GIRQ23 b[10] */
|
||||
HTMR_0_IRQn = 112, /*!< GIRQ23 b[16] */
|
||||
HTMR_1_IRQn = 113, /*!< GIRQ23 b[17] */
|
||||
WK_IRQn = 114, /*!< GIRQ21 b[3] */
|
||||
WKSUB_IRQn = 115, /*!< GIRQ21 b[4] */
|
||||
WKSEC_IRQn = 116, /*!< GIRQ21 b[5] */
|
||||
WKSUBSEC_IRQn = 117, /*!< GIRQ21 b[6] */
|
||||
WKSYSPWR_IRQn = 118, /*!< GIRQ21 b[7] */
|
||||
RTC_IRQn = 119, /*!< GIRQ21 b[8] */
|
||||
RTC_ALARM_IRQn = 120, /*!< GIRQ21 b[9] */
|
||||
VCI_OVRD_IN_IRQn = 121, /*!< GIRQ21 b[10] */
|
||||
VCI_IN0_IRQn = 122, /*!< GIRQ21 b[11] */
|
||||
VCI_IN1_IRQn = 123, /*!< GIRQ21 b[12] */
|
||||
VCI_IN2_IRQn = 124, /*!< GIRQ21 b[13] */
|
||||
VCI_IN3_IRQn = 125, /*!< GIRQ21 b[14] */
|
||||
VCI_IN4_IRQn = 126, /*!< GIRQ21 b[15] */
|
||||
/* reserved 127-128 */
|
||||
PS2_0A_WAKE_IRQn = 129, /*!< GIRQ21 b[18] */
|
||||
PS2_0B_WAKE_IRQn = 130, /*!< GIRQ21 b[19] */
|
||||
/* reserved 131-134 */
|
||||
KEYSCAN_IRQn = 135, /*!< GIRQ21 b[25] */
|
||||
B16TMR_0_IRQn = 136, /*!< GIRQ23 b[0] */
|
||||
B16TMR_1_IRQn = 137, /*!< GIRQ23 b[1] */
|
||||
B16TMR_2_IRQn = 138, /*!< GIRQ23 b[2] */
|
||||
B16TMR_3_IRQn = 139, /*!< GIRQ23 b[3] */
|
||||
B32TMR_0_IRQn = 140, /*!< GIRQ23 b[4] */
|
||||
B32TMR_1_IRQn = 141, /*!< GIRQ23 b[5] */
|
||||
CTMR_0_IRQn = 142, /*!< GIRQ23 b[6] */
|
||||
CTMR_1_IRQn = 143, /*!< GIRQ23 b[7] */
|
||||
CTMR_2_IRQn = 144, /*!< GIRQ23 b[8] */
|
||||
CTMR_3_IRQn = 145, /*!< GIRQ23 b[9] */
|
||||
CCT_IRQn = 146, /*!< GIRQ18 b[20] */
|
||||
CCT_CAP0_IRQn = 147, /*!< GIRQ18 b[21] */
|
||||
CCT_CAP1_IRQn = 148, /*!< GIRQ18 b[22] */
|
||||
CCT_CAP2_IRQn = 149, /*!< GIRQ18 b[23] */
|
||||
CCT_CAP3_IRQn = 150, /*!< GIRQ18 b[24] */
|
||||
CCT_CAP4_IRQn = 151, /*!< GIRQ18 b[25] */
|
||||
CCT_CAP5_IRQn = 152, /*!< GIRQ18 b[26] */
|
||||
CCT_CMP0_IRQn = 153, /*!< GIRQ18 b[27] */
|
||||
CCT_CMP1_IRQn = 154, /*!< GIRQ18 b[28] */
|
||||
EEPROMC_IRQn = 155, /*!< GIRQ18 b[13] */
|
||||
ESPI_VWIRE_IRQn = 156, /*!< GIRQ19 b[8] */
|
||||
/* reserved 157 */
|
||||
I2C_SMB_4_IRQn = 158, /*!< GIRQ13 b[4] */
|
||||
TACH_3_IRQn = 159, /*!< GIRQ17 b[4] */
|
||||
/* reserved 160-165 */
|
||||
SAF_DONE_IRQn = 166, /*!< GIRQ19 b[9] */
|
||||
SAF_ERR_IRQn = 167, /*!< GIRQ19 b[10] */
|
||||
/* reserved 168 */
|
||||
SAF_CACHE_IRQn = 169, /*!< GIRQ19 b[11] */
|
||||
/* reserved 170 */
|
||||
WDT_0_IRQn = 171, /*!< GIRQ21 b[2] */
|
||||
GLUE_IRQn = 172, /*!< GIRQ21 b[26] */
|
||||
OTP_RDY_IRQn = 173, /*!< GIRQ20 b[3] */
|
||||
CLK32K_MON_IRQn = 174, /*!< GIRQ20 b[9] */
|
||||
ACPI_EC0_IRQn = 175, /* ACPI EC OBE and IBF combined into one */
|
||||
ACPI_EC1_IRQn = 176, /* No GIRQ connection. Status in ACPI blocks */
|
||||
ACPI_EC2_IRQn = 177, /* Code uses level bits and NVIC bits */
|
||||
ACPI_EC3_IRQn = 178,
|
||||
ACPI_EC4_IRQn = 179,
|
||||
ACPI_PM1_IRQn = 180,
|
||||
MAX_IRQn
|
||||
} IRQn_Type;
|
||||
|
||||
#include <sys/util.h>
|
||||
|
||||
/* chip specific register defines */
|
||||
|
||||
@@ -205,12 +205,12 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
|
||||
sr.cpu = cpu_num;
|
||||
sr.fn = fn;
|
||||
sr.stack_top = Z_THREAD_STACK_BUFFER(stack) + sz;
|
||||
sr.stack_top = Z_KERNEL_STACK_BUFFER(stack) + sz;
|
||||
sr.arg = arg;
|
||||
sr.vecbase = vb;
|
||||
sr.alive = &alive_flag;
|
||||
|
||||
appcpu_top = Z_THREAD_STACK_BUFFER(stack) + sz;
|
||||
appcpu_top = Z_KERNEL_STACK_BUFFER(stack) + sz;
|
||||
|
||||
start_rec = &sr;
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user