Compare commits
234 Commits
main
...
v2.7-audit
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07c3d4529a | ||
|
|
f77c7bb2fe | ||
|
|
7b6cdcbed7 | ||
|
|
878d4338bb | ||
|
|
9043b65acf | ||
|
|
071def1bf1 | ||
|
|
922cde06dc | ||
|
|
7229c12721 | ||
|
|
88608b2e78 | ||
|
|
77efdc73ca | ||
|
|
f30ce73f67 | ||
|
|
bdc5f2c7da | ||
|
|
29155bdd6c | ||
|
|
5b627ad8b3 | ||
|
|
839fa857c8 | ||
|
|
64336f467c | ||
|
|
37f1423213 | ||
|
|
4071c6143c | ||
|
|
64816d53c0 | ||
|
|
ac487cf5a4 | ||
|
|
08619905cd | ||
|
|
aba70ce903 | ||
|
|
671153b94d | ||
|
|
f953e929d8 | ||
|
|
829f63f93f | ||
|
|
d03fa8d8e9 | ||
|
|
40cf447b5a | ||
|
|
3d6f386cca | ||
|
|
747db471bb | ||
|
|
ec69bda0d4 | ||
|
|
04379cbe09 | ||
|
|
019e0d9573 | ||
|
|
da3a04a17b | ||
|
|
bbc6f78c7c | ||
|
|
259c805c3c | ||
|
|
c3d715c6ee | ||
|
|
f5884c7db4 | ||
|
|
672934d206 | ||
|
|
9c0f9e6214 | ||
|
|
844843ce47 | ||
|
|
cf873c6eae | ||
|
|
5b6fef09da | ||
|
|
0075f17e1f | ||
|
|
3c1d1a11e9 | ||
|
|
63afaf742a | ||
|
|
7eadb9c5eb | ||
|
|
8e6e745c76 | ||
|
|
bea9a92819 | ||
|
|
dceab47f74 | ||
|
|
5d02614e34 | ||
|
|
5b7cc3bffa | ||
|
|
4c26541842 | ||
|
|
0a15b510f4 | ||
|
|
d21b0e8ffe | ||
|
|
11dddc5364 | ||
|
|
a984ca8b70 | ||
|
|
4a8da1624d | ||
|
|
18b6921b1b | ||
|
|
efa7239352 | ||
|
|
3e68e6b550 | ||
|
|
16dc0c550f | ||
|
|
7a3ca740e7 | ||
|
|
f0488b4956 | ||
|
|
3472b3b212 | ||
|
|
093a9ea724 | ||
|
|
820aa10ac9 | ||
|
|
711f44a1fc | ||
|
|
d09cb7f1da | ||
|
|
06e25ea826 | ||
|
|
3ff5e8b7ce | ||
|
|
22fdbf18d5 | ||
|
|
b652faeb91 | ||
|
|
c87097cad4 | ||
|
|
f3c9c0ae19 | ||
|
|
835451e36f | ||
|
|
f6ce289342 | ||
|
|
f5d4fb40b5 | ||
|
|
c90e0c1197 | ||
|
|
53d9e942cf | ||
|
|
dd435b011f | ||
|
|
6a851f1f48 | ||
|
|
68a6a3e5c3 | ||
|
|
dff331a40c | ||
|
|
96a7b32e85 | ||
|
|
6f11b2d7f2 | ||
|
|
5166ff9fb1 | ||
|
|
16e3655739 | ||
|
|
b757110019 | ||
|
|
61730131c7 | ||
|
|
5455bf4ba7 | ||
|
|
c65645cc39 | ||
|
|
cef9cbeb60 | ||
|
|
ac0477f17c | ||
|
|
cb4ed62d98 | ||
|
|
df0c972787 | ||
|
|
3f826560aa | ||
|
|
21008182be | ||
|
|
5e578e5967 | ||
|
|
88487a2dee | ||
|
|
f2cae5145d | ||
|
|
9366238a33 | ||
|
|
33f30745a5 | ||
|
|
275dc8275f | ||
|
|
ae757fb704 | ||
|
|
5e1dc921b0 | ||
|
|
5ccffff5b0 | ||
|
|
dd460a9410 | ||
|
|
7e5ac1bfe6 | ||
|
|
f215361e1c | ||
|
|
7e7d71ebda | ||
|
|
6939183a58 | ||
|
|
6f045c1166 | ||
|
|
d9eb21aa1e | ||
|
|
2b8079aa38 | ||
|
|
81136cbc79 | ||
|
|
42276f5bbd | ||
|
|
36bec3599d | ||
|
|
587ad45686 | ||
|
|
169b85c1e3 | ||
|
|
a9aba82522 | ||
|
|
e7df33e8b7 | ||
|
|
29b52a81c9 | ||
|
|
af4c3bc983 | ||
|
|
257df9a236 | ||
|
|
b60b5b97a2 | ||
|
|
8a97c83040 | ||
|
|
42144217be | ||
|
|
4ba168dc4d | ||
|
|
d760c5e322 | ||
|
|
7b880f11f8 | ||
|
|
592c6b1db2 | ||
|
|
f52dce1ee3 | ||
|
|
7f3562bfe4 | ||
|
|
fcc69bf015 | ||
|
|
c140604510 | ||
|
|
d229d45ddd | ||
|
|
8790789c5f | ||
|
|
7bc1deeeae | ||
|
|
6c4fc0226d | ||
|
|
31dbdca2ab | ||
|
|
3bceb73861 | ||
|
|
79bf23c5ac | ||
|
|
7fec8b280d | ||
|
|
706104bdf8 | ||
|
|
a39340a1f8 | ||
|
|
7a2b9586fa | ||
|
|
708ba30959 | ||
|
|
903b5d78d8 | ||
|
|
2df7257bec | ||
|
|
9f903eeb50 | ||
|
|
886d04860f | ||
|
|
311aeef4d5 | ||
|
|
8dacd0f873 | ||
|
|
2d0d093627 | ||
|
|
0689e106c6 | ||
|
|
0d81d97bb0 | ||
|
|
cb2ea25e14 | ||
|
|
50d357d77e | ||
|
|
9877834c95 | ||
|
|
ea2bbb8b9e | ||
|
|
b0667a80b1 | ||
|
|
e2ae4ec78c | ||
|
|
1f09c9269d | ||
|
|
aa8c3fa22e | ||
|
|
5a5eaa3c58 | ||
|
|
efd77e0958 | ||
|
|
b3affe6b94 | ||
|
|
5969c3b941 | ||
|
|
6db7778c81 | ||
|
|
bd6523195c | ||
|
|
701c560901 | ||
|
|
b24bbad815 | ||
|
|
a7baa3628d | ||
|
|
7aee51ea82 | ||
|
|
7c62429a75 | ||
|
|
82f3165b79 | ||
|
|
e858321f83 | ||
|
|
d2c5f05b1b | ||
|
|
708951ecd2 | ||
|
|
ad2b77e7f8 | ||
|
|
eb85f9a47e | ||
|
|
9ad64e8809 | ||
|
|
cf112e2a06 | ||
|
|
0ad4b4438a | ||
|
|
f7d0ae5e6c | ||
|
|
a084ec5483 | ||
|
|
c3fac651ee | ||
|
|
30eadf758a | ||
|
|
1a42926317 | ||
|
|
30b24920e8 | ||
|
|
41654b0dba | ||
|
|
17c5a7c89e | ||
|
|
0d4db6b952 | ||
|
|
e24df5272a | ||
|
|
a3e8f83e6b | ||
|
|
e8929c3360 | ||
|
|
7597eef8b3 | ||
|
|
9ff7cb60fc | ||
|
|
71420c6b76 | ||
|
|
519f412ce8 | ||
|
|
c0e44d9462 | ||
|
|
47f4ddafdd | ||
|
|
1a15d367e2 | ||
|
|
7ac8c2f51b | ||
|
|
4c52fb9fd1 | ||
|
|
cb657057b3 | ||
|
|
69996900c8 | ||
|
|
70979b9047 | ||
|
|
e601ca8e11 | ||
|
|
369d5d038f | ||
|
|
27a2271093 | ||
|
|
f4a03dfa32 | ||
|
|
192cad6cda | ||
|
|
1e2c698e95 | ||
|
|
0c0a990c4b | ||
|
|
dbf08a18c3 | ||
|
|
6c4d190493 | ||
|
|
5d0100e12c | ||
|
|
b11983d71a | ||
|
|
8691e3e0d2 | ||
|
|
c1fa585917 | ||
|
|
8a9c1e7721 | ||
|
|
8a0dc430b2 | ||
|
|
e6638715d9 | ||
|
|
5b78f62138 | ||
|
|
01be872f01 | ||
|
|
317749e1e8 | ||
|
|
7d3606a74a | ||
|
|
08917e0708 | ||
|
|
b868419ac7 | ||
|
|
16efab0493 | ||
|
|
916dbab23a | ||
|
|
62680344a6 | ||
|
|
d8ee47459c |
@@ -1,35 +0,0 @@
|
||||
steps:
|
||||
- command:
|
||||
- .buildkite/run.sh
|
||||
env:
|
||||
ZEPHYR_TOOLCHAIN_VARIANT: "zephyr"
|
||||
ZEPHYR_SDK_INSTALL_DIR: "/opt/toolchains/zephyr-sdk-0.13.1"
|
||||
parallelism: 475
|
||||
timeout_in_minutes: 210
|
||||
retry:
|
||||
manual: true
|
||||
plugins:
|
||||
- docker#v3.5.0:
|
||||
image: "zephyrprojectrtos/ci:v0.18.4"
|
||||
propagate-environment: true
|
||||
volumes:
|
||||
- "/var/lib/buildkite-agent/git-mirrors:/var/lib/buildkite-agent/git-mirrors"
|
||||
- "/var/lib/buildkite-agent/zephyr-module-cache:/var/lib/buildkite-agent/zephyr-module-cache"
|
||||
- "/var/lib/buildkite-agent/zephyr-ccache:/root/.ccache"
|
||||
workdir: "/workdir/zephyr"
|
||||
agents:
|
||||
- "queue=default"
|
||||
|
||||
- wait: ~
|
||||
continue_on_failure: true
|
||||
|
||||
- plugins:
|
||||
- junit-annotate#v1.7.0:
|
||||
artifacts: twister-*.xml
|
||||
|
||||
- command:
|
||||
- .buildkite/mergejunit.sh
|
||||
|
||||
notify:
|
||||
- email: "builds+int+399+7809482394022958124@lists.zephyrproject.org"
|
||||
if: build.state != "passed"
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2020 Linaro Limited
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# report disk usage:
|
||||
echo "--- $0 disk usage"
|
||||
df -h
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2020 Linaro Limited
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Save off where we started so we can go back there
|
||||
WORKDIR=${PWD}
|
||||
|
||||
echo "--- $0 disk usage"
|
||||
df -h
|
||||
du -hs /var/lib/buildkite-agent/*
|
||||
docker images -a
|
||||
docker system df -v
|
||||
|
||||
if [ -n "${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" ]; then
|
||||
git fetch -v origin ${BUILDKITE_PULL_REQUEST_BASE_BRANCH}
|
||||
git checkout FETCH_HEAD
|
||||
git config --local user.email "builds@zephyrproject.org"
|
||||
git config --local user.name "Zephyr CI"
|
||||
git merge --no-edit "${BUILDKITE_COMMIT}" || {
|
||||
local merge_result=$?
|
||||
echo "Merge failed: ${merge_result}"
|
||||
git merge --abort
|
||||
exit $merge_result
|
||||
}
|
||||
fi
|
||||
|
||||
mkdir -p /var/lib/buildkite-agent/zephyr-ccache/
|
||||
|
||||
# create cache dirs, no-op if they already exist
|
||||
mkdir -p /var/lib/buildkite-agent/zephyr-module-cache/modules
|
||||
mkdir -p /var/lib/buildkite-agent/zephyr-module-cache/tools
|
||||
mkdir -p /var/lib/buildkite-agent/zephyr-module-cache/bootloader
|
||||
|
||||
# Clean cache - if it already exists
|
||||
cd /var/lib/buildkite-agent/zephyr-module-cache
|
||||
find -type f -not -path "*/.git/*" -not -name ".git" -delete
|
||||
|
||||
# Remove any stale locks
|
||||
find -name index.lock -delete
|
||||
|
||||
# return from where we started so we can find pipeline files from
|
||||
# git repo
|
||||
cd ${WORKDIR}
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2021 Linaro Limited
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
set -eE
|
||||
|
||||
buildkite-agent artifact download twister-*.xml .
|
||||
|
||||
xmls=""
|
||||
|
||||
for f in twister-*xml; do [ -s ${f} ] && xmls+="${f} "; done
|
||||
|
||||
if [ "${xmls}" ]; then
|
||||
junitparser merge ${xmls} junit.xml
|
||||
buildkite-agent artifact upload junit.xml
|
||||
junit2html junit.xml
|
||||
buildkite-agent artifact upload junit.xml.html
|
||||
buildkite-agent annotate --style "info" "Read the <a href=\"artifact://junit.xml.html\">JUnit test report</a>"
|
||||
fi
|
||||
@@ -1,31 +0,0 @@
|
||||
steps:
|
||||
- command:
|
||||
- .buildkite/run.sh
|
||||
env:
|
||||
ZEPHYR_TOOLCHAIN_VARIANT: "zephyr"
|
||||
ZEPHYR_SDK_INSTALL_DIR: "/opt/toolchains/zephyr-sdk-0.13.1"
|
||||
parallelism: 20
|
||||
timeout_in_minutes: 180
|
||||
retry:
|
||||
manual: true
|
||||
plugins:
|
||||
- docker#v3.5.0:
|
||||
image: "zephyrprojectrtos/ci:v0.18.4"
|
||||
propagate-environment: true
|
||||
volumes:
|
||||
- "/var/lib/buildkite-agent/git-mirrors:/var/lib/buildkite-agent/git-mirrors"
|
||||
- "/var/lib/buildkite-agent/zephyr-module-cache:/var/lib/buildkite-agent/zephyr-module-cache"
|
||||
- "/var/lib/buildkite-agent/zephyr-ccache:/root/.ccache"
|
||||
workdir: "/workdir/zephyr"
|
||||
agents:
|
||||
- "queue=default"
|
||||
|
||||
- wait: ~
|
||||
continue_on_failure: true
|
||||
|
||||
- plugins:
|
||||
- junit-annotate#v1.7.0:
|
||||
artifacts: twister-*.xml
|
||||
|
||||
- command:
|
||||
- .buildkite/mergejunit.sh
|
||||
@@ -1,78 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2020 Linaro Limited
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
set -eE
|
||||
|
||||
function cleanup()
|
||||
{
|
||||
# Rename twister junit xml for use with junit-annotate-buildkite-plugin
|
||||
# create dummy file if twister did nothing
|
||||
if [ ! -f twister-out/twister.xml ]; then
|
||||
touch twister-out/twister.xml
|
||||
fi
|
||||
mv twister-out/twister.xml twister-${BUILDKITE_JOB_ID}.xml
|
||||
buildkite-agent artifact upload twister-${BUILDKITE_JOB_ID}.xml
|
||||
|
||||
|
||||
# Upload test_file to get list of tests that are build/run
|
||||
if [ -f test_file.txt ]; then
|
||||
buildkite-agent artifact upload test_file.txt
|
||||
fi
|
||||
|
||||
# ccache stats
|
||||
echo "--- ccache stats at finish"
|
||||
ccache -s
|
||||
|
||||
# Cleanup on exit
|
||||
rm -fr *
|
||||
|
||||
# disk usage
|
||||
echo "--- disk usage at finish"
|
||||
df -h
|
||||
}
|
||||
|
||||
trap cleanup ERR
|
||||
|
||||
echo "--- run $0"
|
||||
|
||||
git log -n 5 --oneline --decorate --abbrev=12
|
||||
|
||||
# Setup module cache
|
||||
cd /workdir
|
||||
ln -s /var/lib/buildkite-agent/zephyr-module-cache/modules
|
||||
ln -s /var/lib/buildkite-agent/zephyr-module-cache/tools
|
||||
ln -s /var/lib/buildkite-agent/zephyr-module-cache/bootloader
|
||||
cd /workdir/zephyr
|
||||
|
||||
export JOB_NUM=$((${BUILDKITE_PARALLEL_JOB}+1))
|
||||
|
||||
# ccache stats
|
||||
echo ""
|
||||
echo "--- ccache stats at start"
|
||||
ccache -s
|
||||
|
||||
|
||||
if [ -n "${DAILY_BUILD}" ]; then
|
||||
TWISTER_OPTIONS=" --inline-logs -M -N --build-only --all --retry-failed 3 -v "
|
||||
echo "--- DAILY BUILD"
|
||||
west init -l .
|
||||
west update 1> west.update.log || west update 1> west.update-2.log
|
||||
west forall -c 'git reset --hard HEAD'
|
||||
source zephyr-env.sh
|
||||
./scripts/twister --subset ${JOB_NUM}/${BUILDKITE_PARALLEL_JOB_COUNT} ${TWISTER_OPTIONS}
|
||||
else
|
||||
if [ -n "${BUILDKITE_PULL_REQUEST_BASE_BRANCH}" ]; then
|
||||
./scripts/ci/run_ci.sh -c -b ${BUILDKITE_PULL_REQUEST_BASE_BRANCH} -r origin \
|
||||
-m ${JOB_NUM} -M ${BUILDKITE_PARALLEL_JOB_COUNT} -p ${BUILDKITE_PULL_REQUEST}
|
||||
else
|
||||
./scripts/ci/run_ci.sh -c -b ${BUILDKITE_BRANCH} -r origin \
|
||||
-m ${JOB_NUM} -M ${BUILDKITE_PARALLEL_JOB_COUNT};
|
||||
fi
|
||||
fi
|
||||
|
||||
TWISTER_EXIT_STATUS=$?
|
||||
|
||||
cleanup
|
||||
|
||||
exit ${TWISTER_EXIT_STATUS}
|
||||
29
.github/workflows/bluetooth-tests-publish.yaml
vendored
Normal file
29
.github/workflows/bluetooth-tests-publish.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Publish Bluetooth Tests Results
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Bluetooth Tests"]
|
||||
types:
|
||||
- completed
|
||||
jobs:
|
||||
bluetooth-test-results:
|
||||
name: "Publish Bluetooth Test Results"
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.workflow_run.conclusion != 'skipped'
|
||||
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: dawidd6/action-download-artifact@v2
|
||||
with:
|
||||
workflow: bluetooth-tests.yaml
|
||||
run_id: ${{ github.event.workflow_run.id }}
|
||||
|
||||
- name: Publish Bluetooth Test Results
|
||||
uses: EnricoMi/publish-unit-test-result-action@v1
|
||||
with:
|
||||
check_name: Bluetooth Test Results
|
||||
comment_mode: off
|
||||
commit: ${{ github.event.workflow_run.head_sha }}
|
||||
event_file: event/event.json
|
||||
event_name: ${{ github.event.workflow_run.event }}
|
||||
files: "bluetooth-test-results/**/bsim_results.xml"
|
||||
@@ -1,7 +1,7 @@
|
||||
name: Bluetooth Tests
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
pull_request:
|
||||
paths:
|
||||
- "west.yml"
|
||||
- "subsys/bluetooth/**"
|
||||
@@ -11,16 +11,16 @@ on:
|
||||
- "arch/posix/**"
|
||||
|
||||
jobs:
|
||||
bsim-build-cancel:
|
||||
bluetooth-test-prep:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
bsim-build:
|
||||
bluetooth-test-build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: bsim-build-cancel
|
||||
needs: bluetooth-test-prep
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
@@ -39,18 +39,15 @@ jobs:
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: west setup
|
||||
run: |
|
||||
west init -l . || true
|
||||
west update
|
||||
west config --global update.narrow true
|
||||
west update 2>&1 1> west.update.log || west update 2>&1 1> west.update2.log
|
||||
|
||||
- name: Run Bluetooth Tests with BSIM
|
||||
run: |
|
||||
#source zephyr-env.sh
|
||||
export ZEPHYR_BASE=${PWD}
|
||||
WORK_DIR=${ZEPHYR_BASE}/bsim_bt_out tests/bluetooth/bsim_bt/compile.sh
|
||||
RESULTS_FILE=${ZEPHYR_BASE}/${bsim_bt_test_results_file} \
|
||||
@@ -60,26 +57,15 @@ jobs:
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Bluetooth Test Results
|
||||
path: ./bsim_bt_out/bsim_results.xml
|
||||
name: bluetooth-test-results
|
||||
path: |
|
||||
./bsim_bt_out/bsim_results.xml
|
||||
${{ github.event_path }}
|
||||
|
||||
publish-test-results:
|
||||
name: "Publish Unit Tests Results"
|
||||
needs: bsim-build
|
||||
runs-on: ubuntu-20.04
|
||||
# the build-and-test job might be skipped, we don't need to run this job then
|
||||
if: success() || failure()
|
||||
|
||||
steps:
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
- name: Upload Event Details
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Publish Unit Test Results
|
||||
uses: EnricoMi/publish-unit-test-result-action@v1.12
|
||||
with:
|
||||
check_name: Bluetooth Test Results
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
files: "**/bsim_results.xml"
|
||||
comment_on_pr: false
|
||||
name: event
|
||||
path: |
|
||||
${{ github.event_path }}
|
||||
91
.github/workflows/clang.yaml
vendored
91
.github/workflows/clang.yaml
vendored
@@ -3,7 +3,7 @@ name: Build with Clang/LLVM
|
||||
on: pull_request_target
|
||||
|
||||
jobs:
|
||||
clang-build-cancel:
|
||||
clang-build-prep:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
@@ -11,19 +11,21 @@ jobs:
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
clang-build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: clang-build-cancel
|
||||
runs-on: zephyr_runner
|
||||
needs: clang-build-prep
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
subset: [1, 2, 3, 4, 5]
|
||||
platform: ["native_posix"]
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.13.1
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
MATRIX_SIZE: 5
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
outputs:
|
||||
report_needed: ${{ steps.twister.outputs.report_needed }}
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
@@ -38,11 +40,17 @@ jobs:
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: west setup
|
||||
run: |
|
||||
west init -l . || true
|
||||
west update
|
||||
# In some cases modules are left in a state where they can't be
|
||||
# updated (i.e. when we cancel a job and the builder is killed),
|
||||
# So first retry to update, if that does not work, remove all modules
|
||||
# and start over. (Workaround until we implement more robust module
|
||||
# west caching).
|
||||
west update 2>&1 1> west.log || west update 2>&1 1> west2.log || ( rm -rf ../modules && west update)
|
||||
|
||||
- name: Check Environment
|
||||
run: |
|
||||
@@ -50,41 +58,86 @@ jobs:
|
||||
${CLANG_ROOT_DIR}/bin/clang --version
|
||||
gcc --version
|
||||
ls -la
|
||||
- name: Prepare ccache timestamp/data
|
||||
id: ccache_cache_timestamp
|
||||
shell: cmake -P {0}
|
||||
run: |
|
||||
string(TIMESTAMP current_date "%Y-%m-%d-%H;%M;%S" UTC)
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: nashif/action-s3-cache@master
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-clang-${{ matrix.platform }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
run: |
|
||||
test -d github/home/.ccache && mv github/home/.ccache /github/home/.ccache
|
||||
ccache -M 10G -s
|
||||
|
||||
- name: Run Tests with Twister
|
||||
id: twister
|
||||
run: |
|
||||
#source zephyr-env.sh
|
||||
git config --global user.email "bot@zephyrproject.org"
|
||||
git config --global user.name "Zephyr Builder"
|
||||
export ZEPHYR_BASE=${PWD}
|
||||
export ZEPHYR_TOOLCHAIN_VARIANT=llvm
|
||||
./scripts/twister --inline-logs -M -N -v -p native_posix --subset ${{matrix.subset}}/${MATRIX_SIZE} --retry-failed 3
|
||||
# check if we need to run a full twister or not based on files changed
|
||||
SC=$(./scripts/ci/what_changed.py --commits ${COMMIT_RANGE})
|
||||
# Get twister arguments based on the files changed
|
||||
./scripts/ci/get_twister_opt.py --commits ${COMMIT_RANGE}
|
||||
if [ "$SC" = "full" ]; then
|
||||
# Full twister
|
||||
echo "::set-output name=report_needed::1";
|
||||
./scripts/twister --inline-logs -M -N -v -p ${{ matrix.platform }} --retry-failed 2
|
||||
else
|
||||
# We can limit scope to just what has changed
|
||||
if [ -s modified_tests.args ]; then
|
||||
# we are working with one platform at a time
|
||||
sed -i '/--all/d' modified_tests.args
|
||||
echo "::set-output name=report_needed::1";
|
||||
# Full twister but with options based on changes
|
||||
./scripts/twister --inline-logs -M -N -v -p ${{ matrix.platform }} +modified_tests.args --retry-failed 2
|
||||
else
|
||||
# if nothing is run, skip reporting step
|
||||
echo "::set-output name=report_needed::0";
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: ccache stats post
|
||||
run: |
|
||||
ccache -s
|
||||
|
||||
- name: Upload Unit Test Results
|
||||
if: always()
|
||||
if: always() && steps.twister.outputs.report_needed != 0
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Unit Test Results (Subset ${{ matrix.subset }})
|
||||
name: Unit Test Results (Subset ${{ matrix.platform }})
|
||||
path: twister-out/twister.xml
|
||||
|
||||
publish-test-results:
|
||||
clang-build-results:
|
||||
name: "Publish Unit Tests Results"
|
||||
needs: clang-build
|
||||
runs-on: ubuntu-20.04
|
||||
# the build-and-test job might be skipped, we don't need to run this job then
|
||||
if: success() || failure()
|
||||
|
||||
if: (success() || failure() ) && needs.clang-build.outputs.report_needed != 0
|
||||
steps:
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Display structure of downloaded files
|
||||
run: ls -R
|
||||
|
||||
- name: Publish Unit Test Results
|
||||
uses: EnricoMi/publish-unit-test-result-action@v1.12
|
||||
uses: EnricoMi/publish-unit-test-result-action@v1
|
||||
if: always()
|
||||
with:
|
||||
check_name: Unit Test Results
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
files: "**/twister.xml"
|
||||
comment_on_pr: false
|
||||
comment_mode: off
|
||||
|
||||
164
.github/workflows/codecov.yaml
vendored
Normal file
164
.github/workflows/codecov.yaml
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
name: Code Coverage with codecov
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '25 */3 * * 1-5'
|
||||
|
||||
jobs:
|
||||
codecov-prep:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
codecov:
|
||||
runs-on: zephyr_runner
|
||||
needs: codecov-prep
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ["native_posix", "qemu_x86", "unit_testing"]
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.13.1
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
steps:
|
||||
- name: Update PATH for west
|
||||
run: |
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: west setup
|
||||
run: |
|
||||
west init -l . || true
|
||||
west update 1> west.update.log || west update 1> west.update-2.log
|
||||
|
||||
- name: Check Environment
|
||||
run: |
|
||||
cmake --version
|
||||
${CLANG_ROOT_DIR}/bin/clang --version
|
||||
gcc --version
|
||||
ls -la
|
||||
- name: Prepare ccache keys
|
||||
id: ccache_cache_prop
|
||||
shell: cmake -P {0}
|
||||
run: |
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: nashif/action-s3-cache@master
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_prop.outputs.repo }}-${{github.event_name}}-${{matrix.platform}}-codecov-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
run: |
|
||||
test -d github/home/.ccache && mv github/home/.ccache /github/home/.ccache
|
||||
ccache -M 10G -s
|
||||
|
||||
- name: Run Tests with Twister (Push)
|
||||
continue-on-error: true
|
||||
run: |
|
||||
export ZEPHYR_BASE=${PWD}
|
||||
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
|
||||
mkdir -p coverage/reports
|
||||
./scripts/twister -N -v --filter runnable -p ${{ matrix.platform }} --coverage -T tests
|
||||
|
||||
- name: Generate Coverage Report
|
||||
run: |
|
||||
mv twister-out/coverage.info lcov.pre.info
|
||||
lcov -q --remove lcov.pre.info mylib.c --remove lcov.pre.info tests/\* \
|
||||
--remove lcov.pre.info samples/\* --remove lcov.pre.info ext/\* \
|
||||
--remove lcov.pre.info *generated* \
|
||||
-o coverage/reports/${{ matrix.platform }}.info --rc lcov_branch_coverage=1
|
||||
|
||||
- name: ccache stats post
|
||||
run: |
|
||||
ccache -s
|
||||
|
||||
- name: Upload Coverage Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Coverage Data (Subset ${{ matrix.platform }})
|
||||
path: coverage/reports/${{ matrix.platform }}.info
|
||||
|
||||
codecov-results:
|
||||
name: "Publish Coverage Results"
|
||||
needs: codecov
|
||||
runs-on: ubuntu-latest
|
||||
# the codecov job might be skipped, we don't need to run this job then
|
||||
if: success() || failure()
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: coverage/reports
|
||||
|
||||
- name: Move coverage files
|
||||
run: |
|
||||
mv ./coverage/reports/*/*.info ./coverage/reports
|
||||
ls -la ./coverage/reports
|
||||
|
||||
- name: Generate list of coverage files
|
||||
id: get-coverage-files
|
||||
shell: cmake -P {0}
|
||||
run: |
|
||||
file(GLOB INPUT_FILES_LIST "coverage/reports/*.info")
|
||||
set(MERGELIST "")
|
||||
set(FILELIST "")
|
||||
foreach(ITEM ${INPUT_FILES_LIST})
|
||||
get_filename_component(f ${ITEM} NAME)
|
||||
if(FILELIST STREQUAL "")
|
||||
set(FILELIST "${f}")
|
||||
else()
|
||||
set(FILELIST "${FILELIST},${f}")
|
||||
endif()
|
||||
endforeach()
|
||||
foreach(ITEM ${INPUT_FILES_LIST})
|
||||
get_filename_component(f ${ITEM} NAME)
|
||||
if(MERGELIST STREQUAL "")
|
||||
set(MERGELIST "-a ${f}")
|
||||
else()
|
||||
set(MERGELIST "${MERGELIST} -a ${f}")
|
||||
endif()
|
||||
endforeach()
|
||||
message("::set-output name=mergefiles::${MERGELIST}")
|
||||
message("::set-output name=covfiles::${FILELIST}")
|
||||
|
||||
- name: Merge coverage files
|
||||
run: |
|
||||
sudo apt-get install -y lcov
|
||||
cd ./coverage/reports
|
||||
lcov ${{ steps.get-coverage-files.outputs.mergefiles }} -o merged.info --rc lcov_branch_coverage=1
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
directory: ./coverage/reports
|
||||
env_vars: OS,PYTHON
|
||||
fail_ci_if_error: false
|
||||
verbose: true
|
||||
files: merged.info
|
||||
6
.github/workflows/compliance.yml
vendored
6
.github/workflows/compliance.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Compliance
|
||||
name: Compliance Checks
|
||||
|
||||
on: pull_request
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
run: |
|
||||
python3 ./scripts/get_maintainer.py path CMakeLists.txt
|
||||
|
||||
compliance_job:
|
||||
check_compliance:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run compliance checks on patch series (PR)
|
||||
steps:
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
# debug
|
||||
git log --pretty=oneline | head -n 10
|
||||
west init -l . || true
|
||||
west update
|
||||
west update 2>&1 1> west.update.log || west update 2>&1 1> west.update2.log
|
||||
|
||||
- name: Run Compliance Tests
|
||||
continue-on-error: true
|
||||
|
||||
14
.github/workflows/conflict.yml
vendored
14
.github/workflows/conflict.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Conflict Finder
|
||||
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- '**'
|
||||
jobs:
|
||||
conflict:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: mschilde/auto-label-merge-conflicts@master
|
||||
with:
|
||||
CONFLICT_LABEL_NAME: "has conflicts"
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
38
.github/workflows/daily_test_version.yml
vendored
38
.github/workflows/daily_test_version.yml
vendored
@@ -1,38 +0,0 @@
|
||||
# Copyright (c) 2020 Intel Corp.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
name: Publish commit for daily testing
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '50 22 * * *'
|
||||
push:
|
||||
branches:
|
||||
- refs/tags/*
|
||||
|
||||
jobs:
|
||||
get_version:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
|
||||
steps:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: install-pip
|
||||
run: |
|
||||
pip3 install gitpython
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Upload to AWS S3
|
||||
run: |
|
||||
python3 scripts/ci/version_mgr.py --update .
|
||||
aws s3 cp versions.json s3://testing.zephyrproject.org/daily_tests/versions.json
|
||||
3
.github/workflows/devicetree_checks.yml
vendored
3
.github/workflows/devicetree_checks.yml
vendored
@@ -22,6 +22,9 @@ jobs:
|
||||
matrix:
|
||||
python-version: [3.6, 3.7, 3.8]
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
exclude:
|
||||
- os: macos-latest
|
||||
python-version: 3.6
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
3
.github/workflows/footprint-tracking.yml
vendored
3
.github/workflows/footprint-tracking.yml
vendored
@@ -52,7 +52,8 @@ jobs:
|
||||
- name: west setup
|
||||
run: |
|
||||
west init -l . || true
|
||||
west update
|
||||
west config --global update.narrow true
|
||||
west update 2>&1 1> west.update.log || west update 2>&1 1> west.update2.log
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
|
||||
3
.github/workflows/footprint.yml
vendored
3
.github/workflows/footprint.yml
vendored
@@ -42,7 +42,8 @@ jobs:
|
||||
- name: west setup
|
||||
run: |
|
||||
west init -l . || true
|
||||
west update
|
||||
west config --global update.narrow true
|
||||
west update 2>&1 1> west.update.log || west update 2>&1 1> west.update.log
|
||||
|
||||
- name: Detect Changes in Footprint
|
||||
env:
|
||||
|
||||
53
.github/workflows/issue_count.yml
vendored
53
.github/workflows/issue_count.yml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Issue Tracker
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '*/10 * * * *'
|
||||
|
||||
env:
|
||||
OUTPUT_FILE_NAME: IssuesReport.md
|
||||
COMMITTER_EMAIL: actions@github.com
|
||||
COMMITTER_NAME: github-actions
|
||||
COMMITTER_USERNAME: github-actions
|
||||
|
||||
|
||||
jobs:
|
||||
track-issues:
|
||||
name: "Collect Issue Stats"
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
|
||||
steps:
|
||||
- name: Download configuration file
|
||||
run: |
|
||||
wget -q https://raw.githubusercontent.com/$GITHUB_REPOSITORY/master/.github/workflows/issues-report-config.json
|
||||
|
||||
- name: install-packages
|
||||
run: |
|
||||
sudo apt-get install discount
|
||||
|
||||
- uses: brcrista/summarize-issues@v3
|
||||
with:
|
||||
title: 'Issues Report for ${{ github.repository }}'
|
||||
configPath: 'issues-report-config.json'
|
||||
outputPath: ${{ env.OUTPUT_FILE_NAME }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: upload-stats
|
||||
uses: actions/upload-artifact@master
|
||||
continue-on-error: True
|
||||
with:
|
||||
name: ${{ env.OUTPUT_FILE_NAME }}
|
||||
path: ${{ env.OUTPUT_FILE_NAME }}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TESTING }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TESTING }}
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Post Results
|
||||
run: |
|
||||
mkd2html IssuesReport.md IssuesReport.html
|
||||
aws s3 cp --quiet IssuesReport.html s3://testing.zephyrproject.org/issues/$GITHUB_REPOSITORY/index.html
|
||||
37
.github/workflows/issues-report-config.json
vendored
37
.github/workflows/issues-report-config.json
vendored
@@ -1,37 +0,0 @@
|
||||
[
|
||||
{
|
||||
"section": "High Priority Bugs",
|
||||
"labels": ["bug", "priority: high"],
|
||||
"threshold": 0
|
||||
},
|
||||
{
|
||||
"section": "Medium Priority Bugs",
|
||||
"labels": ["bug", "priority: medium"],
|
||||
"threshold": 20
|
||||
},
|
||||
{
|
||||
"section": "Low Priority Bugs",
|
||||
"labels": ["bug", "priority: low"],
|
||||
"threshold": 100
|
||||
},
|
||||
{
|
||||
"section": "Enhancements",
|
||||
"labels": ["Enhancement"],
|
||||
"threshold": 500
|
||||
},
|
||||
{
|
||||
"section": "Features",
|
||||
"labels": ["Feature"],
|
||||
"threshold": 100
|
||||
},
|
||||
{
|
||||
"section": "Questions",
|
||||
"labels": ["question"],
|
||||
"threshold": 100
|
||||
},
|
||||
{
|
||||
"section": "Static Analysis",
|
||||
"labels": ["Coverity"],
|
||||
"threshold": 100
|
||||
}
|
||||
]
|
||||
1
.github/workflows/manifest.yml
vendored
1
.github/workflows/manifest.yml
vendored
@@ -15,6 +15,7 @@ jobs:
|
||||
path: zephyrproject/zephyr
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Manifest
|
||||
uses: zephyrproject-rtos/action-manifest@2f1ad2908599d4fe747f886f9d733dd7eebae4ef
|
||||
|
||||
23
.github/workflows/stale_issue.yml
vendored
23
.github/workflows/stale_issue.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: "Close stale pull requests/issues"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "16 00 * * *"
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: Find Stale issues and PRs
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'zephyrproject-rtos/zephyr'
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: 'This pull request has been marked as stale because it has been open (more than) 60 days with no activity. Remove the stale label or add a comment saying that you would like to have the label removed otherwise this pull request will automatically be closed in 14 days. Note, that you can always re-open a closed pull request at any time.'
|
||||
stale-issue-message: 'This issue has been marked as stale because it has been open (more than) 60 days with no activity. Remove the stale label or add a comment saying that you would like to have the label removed otherwise this issue will automatically be closed in 14 days. Note, that you can always re-open a closed issue at any time.'
|
||||
days-before-stale: 60
|
||||
days-before-close: 14
|
||||
stale-issue-label: 'Stale'
|
||||
stale-pr-label: 'Stale'
|
||||
exempt-pr-labels: 'Blocked,In progress'
|
||||
exempt-issue-labels: 'In progress,Enhancement,Feature,Feature Request,RFC,Meta'
|
||||
operations-per-run: 400
|
||||
231
.github/workflows/twister.yaml
vendored
Normal file
231
.github/workflows/twister.yaml
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
name: Run tests with twister
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- v2.7-auditable-branch
|
||||
pull_request_target:
|
||||
branches:
|
||||
- v2.7-auditable-branch
|
||||
schedule:
|
||||
# Run at 00:00 on Saturday
|
||||
- cron: '0 8 * * 6'
|
||||
|
||||
jobs:
|
||||
twister-build-prep:
|
||||
runs-on: zephyr_runner
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
outputs:
|
||||
subset: ${{ steps.output-services.outputs.subset }}
|
||||
size: ${{ steps.output-services.outputs.size }}
|
||||
env:
|
||||
MATRIX_SIZE: 10
|
||||
DAILY_MATRIX_SIZE: 120
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.13.1
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
TESTS_PER_BUILDER: 700
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
- name: checkout
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: west setup
|
||||
if: github.event_name == 'pull_request_target'
|
||||
run: |
|
||||
west init -l . || true
|
||||
west config --global update.narrow true
|
||||
west update 2>&1 1> west.update.log || west update 2>&1 1> west.update.log
|
||||
west forall -c 'git reset --hard HEAD'
|
||||
|
||||
- name: Generate Test Plan with Twister
|
||||
if: github.event_name == 'pull_request_target'
|
||||
id: test-plan
|
||||
run: |
|
||||
sudo apt-get install -y bc
|
||||
git config --global user.email "bot@zephyrproject.org"
|
||||
git config --global user.name "Zephyr Bot"
|
||||
export ZEPHYR_BASE=${PWD}
|
||||
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
|
||||
git log -n 500 --oneline | grep -q "run twister using github action" || (
|
||||
echo "Your branch is not up to date, you need to rebase on top of latest HEAD of main branch"
|
||||
exit 1
|
||||
)
|
||||
./scripts/ci/run_ci.sh -S -c -b ${{github.base_ref}} -r origin \
|
||||
-p ${{github.event.pull_request.number}} -R ${COMMIT_RANGE}
|
||||
# remove all tests to be skipped
|
||||
grep -v skipped test_file.txt > no_skipped.txt
|
||||
# get number of tests
|
||||
lines=$(wc -l < no_skipped.txt)
|
||||
if [ "$lines" = 1 ]; then
|
||||
# no tests, so we need 0 nodes
|
||||
nodes=0
|
||||
else
|
||||
nodes=$(echo "${lines} / ${TESTS_PER_BUILDER}" | bc)
|
||||
if [ "${nodes}" = 0 ]; then
|
||||
# for less than TESTS_PER_BUILDER, we take at least 1 node
|
||||
nodes=1
|
||||
fi
|
||||
fi
|
||||
echo "::set-output name=calculated_matrix_size::${nodes}";
|
||||
rm test_file.txt no_skipped.txt
|
||||
|
||||
- name: Determine matrix size
|
||||
id: output-services
|
||||
run: |
|
||||
if [ "${{github.event_name}}" = "pull_request_target" ]; then
|
||||
if [ -n "${{steps.test-plan.outputs.calculated_matrix_size}}" ]; then
|
||||
subset="[$(seq -s',' 1 ${{steps.test-plan.outputs.calculated_matrix_size}})]"
|
||||
else
|
||||
subset="[$(seq -s',' 1 ${MATRIX_SIZE})]"
|
||||
fi
|
||||
size=${{ steps.test-plan.outputs.calculated_matrix_size }}
|
||||
elif [ "${{github.event_name}}" = "push" ]; then
|
||||
subset="[$(seq -s',' 1 ${MATRIX_SIZE})]"
|
||||
size=${MATRIX_SIZE}
|
||||
else
|
||||
subset="[$(seq -s',' 1 ${DAILY_MATRIX_SIZE})]"
|
||||
size=${DAILY_MATRIX_SIZE}
|
||||
fi
|
||||
echo "::set-output name=subset::${subset}";
|
||||
echo "::set-output name=size::${size}";
|
||||
|
||||
|
||||
twister-build:
|
||||
runs-on: zephyr_runner
|
||||
needs: twister-build-prep
|
||||
if: needs.twister-build-prep.outputs.size != 0
|
||||
container:
|
||||
image: zephyrprojectrtos/ci:v0.18.4
|
||||
options: '--entrypoint /bin/bash'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
subset: ${{fromJSON(needs.twister-build-prep.outputs.subset)}}
|
||||
env:
|
||||
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.13.1
|
||||
CLANG_ROOT_DIR: /usr/lib/llvm-12
|
||||
DAILY_OPTIONS: ' --inline-logs -M -N --build-only --all --retry-failed 3 -v '
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
steps:
|
||||
- name: Update PATH for west
|
||||
run: |
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: west setup
|
||||
run: |
|
||||
west init -l . || true
|
||||
west config --global update.narrow true
|
||||
west update 2>&1 1> west.update.log || west update 2>&1 1> west.update.log
|
||||
west forall -c 'git reset --hard HEAD'
|
||||
|
||||
- name: Check Environment
|
||||
run: |
|
||||
cmake --version
|
||||
${CLANG_ROOT_DIR}/bin/clang --version
|
||||
gcc --version
|
||||
ls -la
|
||||
echo "github.ref: ${{ github.ref }}"
|
||||
echo "github.base_ref: ${{ github.base_ref }}"
|
||||
echo "github.ref_name: ${{ github.ref_name }}"
|
||||
|
||||
- name: Prepare ccache timestamp/data
|
||||
id: ccache_cache_timestamp
|
||||
shell: cmake -P {0}
|
||||
run: |
|
||||
string(TIMESTAMP current_date "%Y-%m-%d-%H;%M;%S" UTC)
|
||||
string(REPLACE "/" "_" repo ${{github.repository}})
|
||||
string(REPLACE "-" "_" repo2 ${repo})
|
||||
message("::set-output name=repo::${repo2}")
|
||||
|
||||
- name: use cache
|
||||
id: cache-ccache
|
||||
uses: nashif/action-s3-cache@master
|
||||
with:
|
||||
key: ${{ steps.ccache_cache_timestamp.outputs.repo }}-${{ github.ref_name }}-${{github.event_name}}-${{ matrix.subset }}-ccache
|
||||
path: /github/home/.ccache
|
||||
aws-s3-bucket: ccache.zephyrproject.org
|
||||
aws-access-key-id: ${{ secrets.CCACHE_S3_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: ccache stats initial
|
||||
run: |
|
||||
test -d github/home/.ccache && mv github/home/.ccache /github/home/.ccache
|
||||
ccache -M 10G -s
|
||||
|
||||
- if: github.event_name == 'push'
|
||||
name: Run Tests with Twister (Push)
|
||||
run: |
|
||||
export ZEPHYR_BASE=${PWD}
|
||||
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
|
||||
./scripts/ci/run_ci.sh -c -b main -r origin -m ${{matrix.subset}} \
|
||||
-M ${{ strategy.job-total }}
|
||||
|
||||
- if: github.event_name == 'pull_request_target'
|
||||
name: Run Tests with Twister (Pull Request)
|
||||
run: |
|
||||
git config --global user.email "bot@zephyrproject.org"
|
||||
git config --global user.name "Zephyr Builder"
|
||||
export ZEPHYR_BASE=${PWD}
|
||||
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
|
||||
./scripts/ci/run_ci.sh -c -b ${{github.base_ref}} -r origin \
|
||||
-m ${{matrix.subset}} -M ${{ strategy.job-total }} \
|
||||
-p ${{github.event.pull_request.number}} -R ${COMMIT_RANGE}
|
||||
|
||||
- if: github.event_name == 'schedule'
|
||||
name: Run Tests with Twister (Daily)
|
||||
run: |
|
||||
export ZEPHYR_BASE=${PWD}
|
||||
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
|
||||
./scripts/twister --subset ${{matrix.subset}}/${{ strategy.job-total }} ${DAILY_OPTIONS}
|
||||
|
||||
- name: ccache stats post
|
||||
run: |
|
||||
ccache -s
|
||||
|
||||
- name: Upload Unit Test Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Unit Test Results (Subset ${{ matrix.subset }})
|
||||
path: twister-out/twister.xml
|
||||
|
||||
twister-test-results:
|
||||
name: "Publish Unit Tests Results"
|
||||
needs: twister-build
|
||||
runs-on: ubuntu-latest
|
||||
# the build-and-test job might be skipped, we don't need to run this job then
|
||||
if: success() || failure()
|
||||
|
||||
steps:
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Publish Unit Test Results
|
||||
uses: EnricoMi/publish-unit-test-result-action@v1
|
||||
with:
|
||||
check_name: Unit Test Results
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
files: "**/twister.xml"
|
||||
comment_mode: off
|
||||
3
.github/workflows/west_cmds.yml
vendored
3
.github/workflows/west_cmds.yml
vendored
@@ -23,6 +23,9 @@ jobs:
|
||||
matrix:
|
||||
python-version: [3.6, 3.7, 3.8]
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
exclude:
|
||||
- os: macos-latest
|
||||
python-version: 3.6
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
@@ -168,21 +168,29 @@ if(CONFIG_CPLUSPLUS)
|
||||
# Kconfig choice ensures only one of these CONFIG_STD_CPP* is set.
|
||||
if(CONFIG_STD_CPP98)
|
||||
set(STD_CPP_DIALECT_FLAGS $<TARGET_PROPERTY:compiler-cpp,dialect_cpp98>)
|
||||
list(APPEND CMAKE_CXX_COMPILE_FEATURES ${compile_features_cpp98})
|
||||
elseif(CONFIG_STD_CPP11)
|
||||
set(STD_CPP_DIALECT_FLAGS $<TARGET_PROPERTY:compiler-cpp,dialect_cpp11>) # Default in kconfig
|
||||
list(APPEND CMAKE_CXX_COMPILE_FEATURES ${compile_features_cpp11})
|
||||
elseif(CONFIG_STD_CPP14)
|
||||
set(STD_CPP_DIALECT_FLAGS $<TARGET_PROPERTY:compiler-cpp,dialect_cpp14>)
|
||||
list(APPEND CMAKE_CXX_COMPILE_FEATURES ${compile_features_cpp14})
|
||||
elseif(CONFIG_STD_CPP17)
|
||||
set(STD_CPP_DIALECT_FLAGS $<TARGET_PROPERTY:compiler-cpp,dialect_cpp17>)
|
||||
list(APPEND CMAKE_CXX_COMPILE_FEATURES ${compile_features_cpp17})
|
||||
elseif(CONFIG_STD_CPP2A)
|
||||
set(STD_CPP_DIALECT_FLAGS $<TARGET_PROPERTY:compiler-cpp,dialect_cpp2a>)
|
||||
list(APPEND CMAKE_CXX_COMPILE_FEATURES ${compile_features_cpp20})
|
||||
elseif(CONFIG_STD_CPP20)
|
||||
set(STD_CPP_DIALECT_FLAGS $<TARGET_PROPERTY:compiler-cpp,dialect_cpp20>)
|
||||
list(APPEND CMAKE_CXX_COMPILE_FEATURES ${compile_features_cpp20})
|
||||
elseif(CONFIG_STD_CPP2B)
|
||||
set(STD_CPP_DIALECT_FLAGS $<TARGET_PROPERTY:compiler-cpp,dialect_cpp2b>)
|
||||
list(APPEND CMAKE_CXX_COMPILE_FEATURES ${compile_features_cpp20})
|
||||
else()
|
||||
assert(0 "Unreachable code. Expected C++ standard to have been chosen. See Kconfig.zephyr.")
|
||||
endif()
|
||||
set(CMAKE_CXX_COMPILE_FEATURES ${CMAKE_CXX_COMPILE_FEATURES} PARENT_SCOPE)
|
||||
|
||||
zephyr_compile_options($<$<COMPILE_LANGUAGE:CXX>:${STD_CPP_DIALECT_FLAGS}>)
|
||||
endif()
|
||||
@@ -979,6 +987,7 @@ set_ifndef(CSTD c99)
|
||||
zephyr_compile_options(
|
||||
$<$<COMPILE_LANGUAGE:C>:$<TARGET_PROPERTY:compiler,cstd>${CSTD}>
|
||||
)
|
||||
set(CMAKE_C_COMPILE_FEATURES ${compile_features_${CSTD}} PARENT_SCOPE)
|
||||
|
||||
# @Intent: Configure linker scripts, i.e. generate linker scripts with variables substituted
|
||||
toolchain_ld_configure_files()
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
/.github/ @nashif
|
||||
/.github/workflows/ @galak @nashif
|
||||
/.buildkite/ @galak
|
||||
/MAINTAINERS.yml @ioannisg @MaureenHelm
|
||||
/arch/arc/ @abrodkin @ruuddw @evgeniy-paltsev
|
||||
/arch/arm/ @MaureenHelm @galak @ioannisg
|
||||
|
||||
@@ -1632,7 +1632,6 @@ CI:
|
||||
- galak
|
||||
files:
|
||||
- .github/
|
||||
- .buildkite/
|
||||
- scripts/ci/
|
||||
- .checkpatch.conf
|
||||
- scripts/gitlint/
|
||||
|
||||
2
VERSION
2
VERSION
@@ -2,4 +2,4 @@ VERSION_MAJOR = 2
|
||||
VERSION_MINOR = 7
|
||||
PATCHLEVEL = 0
|
||||
VERSION_TWEAK = 0
|
||||
EXTRAVERSION = rc3
|
||||
EXTRAVERSION =
|
||||
|
||||
@@ -136,6 +136,7 @@ config NUM_IRQS
|
||||
config RGF_NUM_BANKS
|
||||
int "Number of General Purpose Register Banks"
|
||||
depends on ARC_FIRQ
|
||||
depends on NUM_IRQ_PRIO_LEVELS > 1
|
||||
range 1 2
|
||||
default 2
|
||||
help
|
||||
@@ -145,10 +146,15 @@ config RGF_NUM_BANKS
|
||||
If fast interrupts are supported but there is only 1
|
||||
register bank, the fast interrupt handler must save
|
||||
and restore general purpose registers.
|
||||
NOTE: it's required to have more than one interrupt priority level
|
||||
to use second register bank - otherwise all interrupts will use
|
||||
same register bank. Such configuration isn't supported in software
|
||||
and it is not beneficial from the performance point of view.
|
||||
|
||||
config ARC_FIRQ
|
||||
bool "FIRQ enable"
|
||||
depends on ISA_ARCV2
|
||||
depends on NUM_IRQ_PRIO_LEVELS > 1
|
||||
default y
|
||||
help
|
||||
Fast interrupts are supported (FIRQ). If FIRQ enabled, for interrupts
|
||||
@@ -156,6 +162,10 @@ config ARC_FIRQ
|
||||
other regs will be saved according to the number of register bank;
|
||||
If FIRQ is disabled, the handle of interrupts with highest priority
|
||||
will be same with other interrupts.
|
||||
NOTE: we don't allow the configuration with FIRQ enabled and only one
|
||||
interrupt priority level (so all interrupts are FIRQ). Such
|
||||
configuration isn't supported in software and it is not beneficial
|
||||
from the performance point of view.
|
||||
|
||||
config ARC_FIRQ_STACK
|
||||
bool "Enable separate firq stack"
|
||||
|
||||
@@ -35,7 +35,7 @@ int arch_mem_domain_max_partitions_get(void)
|
||||
/*
|
||||
* Validate the given buffer is user accessible or not
|
||||
*/
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int arch_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
return arc_core_mpu_buffer_validate(addr, size, write);
|
||||
}
|
||||
|
||||
@@ -207,7 +207,7 @@ int arc_core_mpu_get_max_domain_partition_regions(void)
|
||||
/**
|
||||
* @brief validate the given buffer is user accessible or not
|
||||
*/
|
||||
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
|
||||
int arc_core_mpu_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
/*
|
||||
* For ARC MPU, smaller region number takes priority.
|
||||
|
||||
@@ -779,7 +779,7 @@ int arc_core_mpu_get_max_domain_partition_regions(void)
|
||||
/**
|
||||
* @brief validate the given buffer is user accessible or not
|
||||
*/
|
||||
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
|
||||
int arc_core_mpu_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
int r_index;
|
||||
int key = arch_irq_lock();
|
||||
|
||||
@@ -88,7 +88,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
||||
__ASSERT(prio <= (BIT(NUM_IRQ_PRIO_BITS) - 1),
|
||||
"invalid priority %d for %d irq! values must be less than %lu\n",
|
||||
prio - _IRQ_PRIO_OFFSET, irq,
|
||||
BIT(NUM_IRQ_PRIO_BITS) - (_IRQ_PRIO_OFFSET));
|
||||
(unsigned long)BIT(NUM_IRQ_PRIO_BITS) - (_IRQ_PRIO_OFFSET));
|
||||
NVIC_SetPriority((IRQn_Type)irq, prio);
|
||||
}
|
||||
|
||||
|
||||
@@ -278,7 +278,7 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
|
||||
|
||||
__ASSERT((uintptr_t)&z_priv_stacks_ram_start <= guard_start,
|
||||
"Guard start: (0x%lx) below privilege stacks boundary: (%p)",
|
||||
guard_start, &z_priv_stacks_ram_start);
|
||||
guard_start, z_priv_stacks_ram_start);
|
||||
} else
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
{
|
||||
@@ -324,7 +324,7 @@ int arch_mem_domain_max_partitions_get(void)
|
||||
return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions);
|
||||
}
|
||||
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int arch_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
return arm_core_mpu_buffer_validate(addr, size, write);
|
||||
}
|
||||
|
||||
@@ -261,7 +261,7 @@ int arm_core_mpu_get_max_available_dyn_regions(void);
|
||||
* spans multiple enabled MPU regions (even if these regions all
|
||||
* permit user access).
|
||||
*/
|
||||
int arm_core_mpu_buffer_validate(void *addr, size_t size, int write);
|
||||
int arm_core_mpu_buffer_validate(const void *addr, size_t size, bool write);
|
||||
|
||||
#endif /* CONFIG_ARM_MPU */
|
||||
|
||||
|
||||
@@ -253,7 +253,7 @@ int arm_core_mpu_get_max_available_dyn_regions(void)
|
||||
*
|
||||
* Presumes the background mapping is NOT user accessible.
|
||||
*/
|
||||
int arm_core_mpu_buffer_validate(void *addr, size_t size, int write)
|
||||
int arm_core_mpu_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
return mpu_buffer_validate(addr, size, write);
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ static inline int is_user_accessible_region(uint32_t r_index, int write)
|
||||
* This internal function validates whether a given memory buffer
|
||||
* is user accessible or not.
|
||||
*/
|
||||
static inline int mpu_buffer_validate(void *addr, size_t size, int write)
|
||||
static inline int mpu_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
int32_t r_index;
|
||||
int rc = -EPERM;
|
||||
|
||||
@@ -270,7 +270,7 @@ static inline int is_enabled_region(uint32_t index)
|
||||
* in case the fast address range check fails.
|
||||
*
|
||||
*/
|
||||
static inline int mpu_buffer_validate(void *addr, size_t size, int write)
|
||||
static inline int mpu_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
uint32_t _addr = (uint32_t)addr;
|
||||
uint32_t _size = (uint32_t)size;
|
||||
|
||||
@@ -536,7 +536,7 @@ static inline int is_user_accessible_region(uint32_t r_index, int write)
|
||||
/**
|
||||
* @brief validate the given buffer is user accessible or not
|
||||
*/
|
||||
int arm_core_mpu_buffer_validate(void *addr, size_t size, int write)
|
||||
int arm_core_mpu_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
uint8_t r_index;
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
"pop {r0-r3}\n\t" \
|
||||
load_lr "\n\t" \
|
||||
::); \
|
||||
} while (0)
|
||||
} while (false)
|
||||
|
||||
/**
|
||||
* @brief Macro for "sandwiching" a function call (@p name) in two other calls
|
||||
|
||||
@@ -50,7 +50,7 @@ strlen_done:
|
||||
ret
|
||||
|
||||
/*
|
||||
* int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
* int arch_buffer_validate(const void *addr, size_t size, bool write)
|
||||
*/
|
||||
|
||||
GTEXT(arch_buffer_validate)
|
||||
|
||||
@@ -53,15 +53,15 @@ void z_irq_do_offload(void);
|
||||
#if ALT_CPU_ICACHE_SIZE > 0
|
||||
void z_nios2_icache_flush_all(void);
|
||||
#else
|
||||
#define z_nios2_icache_flush_all() do { } while (0)
|
||||
#define z_nios2_icache_flush_all() do { } while (false)
|
||||
#endif
|
||||
|
||||
#if ALT_CPU_DCACHE_SIZE > 0
|
||||
void z_nios2_dcache_flush_all(void);
|
||||
void z_nios2_dcache_flush_no_writeback(void *start, uint32_t len);
|
||||
#else
|
||||
#define z_nios2_dcache_flush_all() do { } while (0)
|
||||
#define z_nios2_dcache_flush_no_writeback(x, y) do { } while (0)
|
||||
#define z_nios2_dcache_flush_all() do { } while (false)
|
||||
#define z_nios2_dcache_flush_no_writeback(x, y) do { } while (false)
|
||||
#endif
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
@@ -328,7 +328,7 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread,
|
||||
}
|
||||
}
|
||||
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int arch_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
uint32_t index, i;
|
||||
ulong_t pmp_type, pmp_addr_start, pmp_addr_stop;
|
||||
|
||||
@@ -15,7 +15,7 @@ static bool check_sum(struct acpi_sdt *t)
|
||||
{
|
||||
uint8_t sum = 0U, *p = (uint8_t *)t;
|
||||
|
||||
for (int i = 0; i < t->length; i++) {
|
||||
for (uint32_t i = 0; i < t->length; i++) {
|
||||
sum += p[i];
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ static void find_rsdp(void)
|
||||
{
|
||||
uint8_t *bda_seg, *zero_page_base;
|
||||
uint64_t *search;
|
||||
uintptr_t search_phys, rsdp_phys = 0U;
|
||||
uintptr_t search_phys, rsdp_phys;
|
||||
size_t search_length, rsdp_length;
|
||||
|
||||
if (is_rsdp_searched) {
|
||||
@@ -49,7 +49,7 @@ static void find_rsdp(void)
|
||||
* first megabyte and are directly accessible.
|
||||
*/
|
||||
bda_seg = 0x040e + zero_page_base;
|
||||
search_phys = (long)(((int)*(uint16_t *)bda_seg) << 4);
|
||||
search_phys = ((uintptr_t)*(uint16_t *)bda_seg) << 4;
|
||||
|
||||
/* Unmap after use */
|
||||
z_phys_unmap(zero_page_base, 4096);
|
||||
@@ -57,14 +57,14 @@ static void find_rsdp(void)
|
||||
/* Might be nothing there, check before we inspect.
|
||||
* Note that EBDA usually is in 0x80000 to 0x100000.
|
||||
*/
|
||||
if ((POINTER_TO_UINT(search_phys) >= 0x80000UL) &&
|
||||
(POINTER_TO_UINT(search_phys) < 0x100000UL)) {
|
||||
if ((search_phys >= 0x80000UL) &&
|
||||
(search_phys < 0x100000UL)) {
|
||||
search_length = 1024;
|
||||
z_phys_map((uint8_t **)&search, search_phys, search_length, 0);
|
||||
|
||||
for (int i = 0; i < 1024/8; i++) {
|
||||
for (size_t i = 0; i < (1024/8); i++) {
|
||||
if (search[i] == ACPI_RSDP_SIGNATURE) {
|
||||
rsdp_phys = search_phys + i * 8;
|
||||
rsdp_phys = search_phys + (i * 8);
|
||||
rsdp = (void *)&search[i];
|
||||
goto found;
|
||||
}
|
||||
@@ -80,10 +80,9 @@ static void find_rsdp(void)
|
||||
search_length = 128 * 1024;
|
||||
z_phys_map((uint8_t **)&search, search_phys, search_length, 0);
|
||||
|
||||
rsdp_phys = 0U;
|
||||
for (int i = 0; i < 128*1024/8; i++) {
|
||||
for (size_t i = 0; i < ((128*1024)/8); i++) {
|
||||
if (search[i] == ACPI_RSDP_SIGNATURE) {
|
||||
rsdp_phys = search_phys + i * 8;
|
||||
rsdp_phys = search_phys + (i * 8);
|
||||
rsdp = (void *)&search[i];
|
||||
goto found;
|
||||
}
|
||||
@@ -133,11 +132,11 @@ void *z_acpi_find_table(uint32_t signature)
|
||||
|
||||
find_rsdp();
|
||||
|
||||
if (!rsdp) {
|
||||
if (rsdp == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rsdp->rsdt_ptr) {
|
||||
if (rsdp->rsdt_ptr != 0U) {
|
||||
z_phys_map((uint8_t **)&rsdt, rsdp->rsdt_ptr, sizeof(*rsdt), 0);
|
||||
tbl_found = false;
|
||||
|
||||
@@ -150,11 +149,11 @@ void *z_acpi_find_table(uint32_t signature)
|
||||
uint32_t *end = (uint32_t *)((char *)rsdt + rsdt->sdt.length);
|
||||
|
||||
for (uint32_t *tp = &rsdt->table_ptrs[0]; tp < end; tp++) {
|
||||
t_phys = (long)*tp;
|
||||
t_phys = (uintptr_t)*tp;
|
||||
z_phys_map(&mapped_tbl, t_phys, sizeof(*t), 0);
|
||||
t = (void *)mapped_tbl;
|
||||
|
||||
if (t->signature == signature && check_sum(t)) {
|
||||
if ((t->signature == signature) && check_sum(t)) {
|
||||
tbl_found = true;
|
||||
break;
|
||||
}
|
||||
@@ -174,7 +173,7 @@ void *z_acpi_find_table(uint32_t signature)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rsdp->xsdt_ptr) {
|
||||
if (rsdp->xsdt_ptr != 0ULL) {
|
||||
z_phys_map((uint8_t **)&xsdt, rsdp->xsdt_ptr, sizeof(*xsdt), 0);
|
||||
|
||||
tbl_found = false;
|
||||
@@ -187,11 +186,11 @@ void *z_acpi_find_table(uint32_t signature)
|
||||
uint64_t *end = (uint64_t *)((char *)xsdt + xsdt->sdt.length);
|
||||
|
||||
for (uint64_t *tp = &xsdt->table_ptrs[0]; tp < end; tp++) {
|
||||
t_phys = (long)*tp;
|
||||
t_phys = (uintptr_t)*tp;
|
||||
z_phys_map(&mapped_tbl, t_phys, sizeof(*t), 0);
|
||||
t = (void *)mapped_tbl;
|
||||
|
||||
if (t->signature == signature && check_sum(t)) {
|
||||
if ((t->signature == signature) && check_sum(t)) {
|
||||
tbl_found = true;
|
||||
break;
|
||||
}
|
||||
@@ -229,7 +228,7 @@ struct acpi_cpu *z_acpi_get_cpu(int n)
|
||||
uintptr_t base = POINTER_TO_UINT(madt);
|
||||
uintptr_t offset;
|
||||
|
||||
if (!madt) {
|
||||
if (madt == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -21,8 +21,8 @@
|
||||
* together.
|
||||
*/
|
||||
static mm_reg_t mmio;
|
||||
#define IN(reg) (sys_read32(mmio + reg * 4) & 0xff)
|
||||
#define OUT(reg, val) sys_write32((val) & 0xff, mmio + reg * 4)
|
||||
#define IN(reg) (sys_read32(mmio + ((reg) * 4U)) & 0xffU)
|
||||
#define OUT(reg, val) sys_write32((uint32_t)(val) & 0xffU, mmio + ((reg) * 4U))
|
||||
#elif defined(X86_SOC_EARLY_SERIAL_MMIO8_ADDR)
|
||||
/* Still other devices use a MMIO region containing packed byte
|
||||
* registers
|
||||
@@ -49,21 +49,21 @@ static mm_reg_t mmio;
|
||||
#define REG_BRDH 0x01 /* Baud rate divisor (MSB) */
|
||||
|
||||
#define IER_DISABLE 0x00
|
||||
#define LCR_8N1 (BIT(0) | BIT(1))
|
||||
#define LCR_DLAB_SELECT BIT(7)
|
||||
#define MCR_DTR BIT(0)
|
||||
#define MCR_RTS BIT(1)
|
||||
#define LSR_THRE BIT(5)
|
||||
#define LCR_8N1 (BIT32(0) | BIT32(1))
|
||||
#define LCR_DLAB_SELECT BIT32(7)
|
||||
#define MCR_DTR BIT32(0)
|
||||
#define MCR_RTS BIT32(1)
|
||||
#define LSR_THRE BIT32(5)
|
||||
|
||||
#define FCR_FIFO BIT(0) /* enable XMIT and RCVR FIFO */
|
||||
#define FCR_RCVRCLR BIT(1) /* clear RCVR FIFO */
|
||||
#define FCR_XMITCLR BIT(2) /* clear XMIT FIFO */
|
||||
#define FCR_FIFO_1 0 /* 1 byte in RCVR FIFO */
|
||||
#define FCR_FIFO_1 0x00U /* 1 byte in RCVR FIFO */
|
||||
|
||||
static bool early_serial_init_done;
|
||||
static uint32_t suppressed_chars;
|
||||
|
||||
static void serout(int c)
|
||||
static void serout(uint8_t c)
|
||||
{
|
||||
while ((IN(REG_LSR) & LSR_THRE) == 0) {
|
||||
}
|
||||
@@ -77,10 +77,10 @@ int arch_printk_char_out(int c)
|
||||
return c;
|
||||
}
|
||||
|
||||
if (c == '\n') {
|
||||
serout('\r');
|
||||
if (c == (int)'\n') {
|
||||
serout((uint8_t)'\r');
|
||||
}
|
||||
serout(c);
|
||||
serout((uint8_t)c);
|
||||
return c;
|
||||
}
|
||||
|
||||
@@ -100,8 +100,8 @@ void z_x86_early_serial_init(void)
|
||||
|
||||
OUT(REG_IER, IER_DISABLE); /* Disable interrupts */
|
||||
OUT(REG_LCR, LCR_DLAB_SELECT); /* DLAB select */
|
||||
OUT(REG_BRDL, 1); /* Baud divisor = 1 */
|
||||
OUT(REG_BRDH, 0);
|
||||
OUT(REG_BRDL, 1U); /* Baud divisor = 1 */
|
||||
OUT(REG_BRDH, 0U);
|
||||
OUT(REG_LCR, LCR_8N1); /* LCR = 8n1 + DLAB off */
|
||||
OUT(REG_MCR, MCR_DTR | MCR_RTS);
|
||||
|
||||
|
||||
@@ -57,10 +57,10 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
|
||||
{
|
||||
uintptr_t start, end;
|
||||
|
||||
if (_current == NULL || arch_is_in_isr()) {
|
||||
if ((_current == NULL) || arch_is_in_isr()) {
|
||||
/* We were servicing an interrupt or in early boot environment
|
||||
* and are supposed to be on the interrupt stack */
|
||||
int cpu_id;
|
||||
uint8_t cpu_id;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cpu_id = arch_curr_cpu()->id;
|
||||
@@ -71,8 +71,8 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
|
||||
z_interrupt_stacks[cpu_id]);
|
||||
end = start + CONFIG_ISR_STACK_SIZE;
|
||||
#ifdef CONFIG_USERSPACE
|
||||
} else if ((cs & 0x3U) == 0U &&
|
||||
(_current->base.user_options & K_USER) != 0) {
|
||||
} else if (((cs & 0x3U) == 0U) &&
|
||||
((_current->base.user_options & K_USER) != 0)) {
|
||||
/* The low two bits of the CS register is the privilege
|
||||
* level. It will be 0 in supervisor mode and 3 in user mode
|
||||
* corresponding to ring 0 / ring 3.
|
||||
@@ -90,7 +90,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
|
||||
_current->stack_info.size);
|
||||
}
|
||||
|
||||
return (addr <= start) || (addr + size > end);
|
||||
return (addr <= start) || ((addr + size) > end);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -158,7 +158,7 @@ static inline uintptr_t get_cr3(const z_arch_esf_t *esf)
|
||||
/* If the interrupted thread was in user mode, we did a page table
|
||||
* switch when we took the exception via z_x86_trampoline_to_kernel
|
||||
*/
|
||||
if ((esf->cs & 0x3) != 0) {
|
||||
if ((esf->cs & 0x3U) != 0) {
|
||||
return _current->arch.ptables;
|
||||
}
|
||||
#else
|
||||
@@ -307,8 +307,8 @@ static void dump_page_fault(z_arch_esf_t *esf)
|
||||
LOG_ERR("Linear address not present in page tables");
|
||||
}
|
||||
LOG_ERR("Access violation: %s thread not allowed to %s",
|
||||
(err & PF_US) != 0U ? "user" : "supervisor",
|
||||
(err & PF_ID) != 0U ? "execute" : ((err & PF_WR) != 0U ?
|
||||
((err & PF_US) != 0U) ? "user" : "supervisor",
|
||||
((err & PF_ID) != 0U) ? "execute" : (((err & PF_WR) != 0U) ?
|
||||
"write" :
|
||||
"read"));
|
||||
if ((err & PF_PK) != 0) {
|
||||
@@ -356,7 +356,7 @@ FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector,
|
||||
#else
|
||||
ARG_UNUSED(vector);
|
||||
#endif
|
||||
z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
z_x86_fatal_error((unsigned int)K_ERR_CPU_EXCEPTION, esf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
@@ -413,18 +413,16 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(exceptions); i++) {
|
||||
for (size_t i = 0; i < ARRAY_SIZE(exceptions); i++) {
|
||||
#ifdef CONFIG_X86_64
|
||||
if ((void *)esf->rip >= exceptions[i].start &&
|
||||
(void *)esf->rip < exceptions[i].end) {
|
||||
if (((void *)esf->rip >= exceptions[i].start) &&
|
||||
((void *)esf->rip < exceptions[i].end)) {
|
||||
esf->rip = (uint64_t)(exceptions[i].fixup);
|
||||
return;
|
||||
}
|
||||
#else
|
||||
if ((void *)esf->eip >= exceptions[i].start &&
|
||||
(void *)esf->eip < exceptions[i].end) {
|
||||
if (((void *)esf->eip >= exceptions[i].start) &&
|
||||
((void *)esf->eip < exceptions[i].end)) {
|
||||
esf->eip = (unsigned int)(exceptions[i].fixup);
|
||||
return;
|
||||
}
|
||||
@@ -435,21 +433,21 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf)
|
||||
dump_page_fault(esf);
|
||||
#endif
|
||||
#ifdef CONFIG_THREAD_STACK_INFO
|
||||
if (z_x86_check_stack_bounds(esf_get_sp(esf), 0, esf->cs)) {
|
||||
z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
|
||||
if (z_x86_check_stack_bounds(esf_get_sp(esf), 0, (uint16_t)esf->cs)) {
|
||||
z_x86_fatal_error((unsigned int)K_ERR_STACK_CHK_FAIL, esf);
|
||||
}
|
||||
#endif
|
||||
z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
z_x86_fatal_error((unsigned int)K_ERR_CPU_EXCEPTION, esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
void z_x86_do_kernel_oops(const z_arch_esf_t *esf)
|
||||
{
|
||||
uintptr_t reason;
|
||||
unsigned int reason;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
reason = esf->rax;
|
||||
reason = (unsigned int)esf->rax;
|
||||
#else
|
||||
uintptr_t *stack_ptr = (uintptr_t *)esf->esp;
|
||||
|
||||
@@ -460,9 +458,9 @@ void z_x86_do_kernel_oops(const z_arch_esf_t *esf)
|
||||
/* User mode is only allowed to induce oopses and stack check
|
||||
* failures via this software interrupt
|
||||
*/
|
||||
if ((esf->cs & 0x3) != 0 && !(reason == K_ERR_KERNEL_OOPS ||
|
||||
reason == K_ERR_STACK_CHK_FAIL)) {
|
||||
reason = K_ERR_KERNEL_OOPS;
|
||||
if (((esf->cs & 0x3U) != 0) && !((reason == (unsigned int)K_ERR_KERNEL_OOPS) ||
|
||||
(reason == (unsigned int)K_ERR_STACK_CHK_FAIL))) {
|
||||
reason = (unsigned int)K_ERR_KERNEL_OOPS;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -42,18 +42,18 @@ void z_x86_spurious_irq(const z_arch_esf_t *esf)
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
void arch_syscall_oops(void *ssf)
|
||||
void arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
struct _x86_syscall_stack_frame *ssf_ptr =
|
||||
(struct _x86_syscall_stack_frame *)ssf;
|
||||
struct _x86_syscall_stack_frame *ssf =
|
||||
(struct _x86_syscall_stack_frame *)ssf_ptr;
|
||||
z_arch_esf_t oops = {
|
||||
.eip = ssf_ptr->eip,
|
||||
.cs = ssf_ptr->cs,
|
||||
.eflags = ssf_ptr->eflags
|
||||
.eip = ssf->eip,
|
||||
.cs = ssf->cs,
|
||||
.eflags = ssf->eflags
|
||||
};
|
||||
|
||||
if (oops.cs == USER_CODE_SEG) {
|
||||
oops.esp = ssf_ptr->esp;
|
||||
oops.esp = ssf->esp;
|
||||
}
|
||||
|
||||
z_x86_fatal_error(K_ERR_KERNEL_OOPS, &oops);
|
||||
|
||||
@@ -79,7 +79,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void *swap_entry;
|
||||
struct _x86_initial_frame *initial_frame;
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
#ifdef CONFIG_X86_STACK_PROTECTION
|
||||
z_x86_set_stack_guard(stack);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -128,7 +128,7 @@ struct x86_cpuboot x86_cpuboot[] = {
|
||||
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
arch_cpustart_t fn, void *arg)
|
||||
{
|
||||
uint8_t vector = ((unsigned long) x86_ap_start) >> 12;
|
||||
uint8_t vector = (uint8_t)(((uintptr_t)x86_ap_start) >> 12);
|
||||
uint8_t apic_id;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ACPI)) {
|
||||
@@ -143,8 +143,8 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
|
||||
apic_id = x86_cpu_loapics[cpu_num];
|
||||
|
||||
x86_cpuboot[cpu_num].sp = (uint64_t) Z_KERNEL_STACK_BUFFER(stack) + sz;
|
||||
x86_cpuboot[cpu_num].stack_size = sz;
|
||||
x86_cpuboot[cpu_num].sp = (uint64_t) Z_KERNEL_STACK_BUFFER(stack) + (size_t)sz;
|
||||
x86_cpuboot[cpu_num].stack_size = (size_t)sz;
|
||||
x86_cpuboot[cpu_num].fn = fn;
|
||||
x86_cpuboot[cpu_num].arg = arg;
|
||||
|
||||
@@ -188,6 +188,6 @@ FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot)
|
||||
#endif
|
||||
|
||||
/* Enter kernel, never return */
|
||||
cpuboot->ready++;
|
||||
cpuboot->ready += 1;
|
||||
cpuboot->fn(cpuboot->arg);
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||
*/
|
||||
__weak bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf)
|
||||
{
|
||||
ARG_UNUSED(esf);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -46,6 +48,6 @@ void arch_syscall_oops(void *ssf_ptr)
|
||||
|
||||
LOG_ERR("Bad system call from RIP 0x%lx", ssf->rip);
|
||||
|
||||
z_x86_fatal_error(K_ERR_KERNEL_OOPS, NULL);
|
||||
z_x86_fatal_error((unsigned int)K_ERR_KERNEL_OOPS, NULL);
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||
|
||||
unsigned char _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES];
|
||||
uint8_t _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES];
|
||||
|
||||
/*
|
||||
* The low-level interrupt code consults these arrays to dispatch IRQs, so
|
||||
@@ -26,40 +26,43 @@ unsigned char _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES];
|
||||
|
||||
#define NR_IRQ_VECTORS (IV_NR_VECTORS - IV_IRQS) /* # vectors free for IRQs */
|
||||
|
||||
void (*x86_irq_funcs[NR_IRQ_VECTORS])(const void *);
|
||||
void (*x86_irq_funcs[NR_IRQ_VECTORS])(const void *arg);
|
||||
const void *x86_irq_args[NR_IRQ_VECTORS];
|
||||
|
||||
static void irq_spurious(const void *arg)
|
||||
{
|
||||
LOG_ERR("Spurious interrupt, vector %d\n", (uint32_t)(uint64_t)arg);
|
||||
z_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
|
||||
z_fatal_error((unsigned int)K_ERR_SPURIOUS_IRQ, NULL);
|
||||
}
|
||||
|
||||
void x86_64_irq_init(void)
|
||||
{
|
||||
for (int i = 0; i < NR_IRQ_VECTORS; i++) {
|
||||
for (unsigned int i = 0; i < NR_IRQ_VECTORS; i++) {
|
||||
x86_irq_funcs[i] = irq_spurious;
|
||||
x86_irq_args[i] = (const void *)(long)(i + IV_IRQS);
|
||||
x86_irq_args[i] = (const void *)((uintptr_t)i + IV_IRQS);
|
||||
}
|
||||
}
|
||||
|
||||
int z_x86_allocate_vector(unsigned int priority, int prev_vector)
|
||||
{
|
||||
const int VECTORS_PER_PRIORITY = 16;
|
||||
const int MAX_PRIORITY = 13;
|
||||
const unsigned int VECTORS_PER_PRIORITY = 16;
|
||||
const unsigned int MAX_PRIORITY = 13;
|
||||
int vector = prev_vector;
|
||||
int i;
|
||||
|
||||
if (priority >= MAX_PRIORITY) {
|
||||
priority = MAX_PRIORITY;
|
||||
}
|
||||
|
||||
if (vector == -1) {
|
||||
vector = (priority * VECTORS_PER_PRIORITY) + IV_IRQS;
|
||||
const unsigned int uvector = (priority * VECTORS_PER_PRIORITY) + IV_IRQS;
|
||||
|
||||
vector = (int)uvector;
|
||||
}
|
||||
|
||||
for (i = 0; i < VECTORS_PER_PRIORITY; ++i, ++vector) {
|
||||
if (prev_vector != 1 && vector == prev_vector) {
|
||||
const int end_vector = vector + (int) VECTORS_PER_PRIORITY;
|
||||
|
||||
for (; vector < end_vector; ++vector) {
|
||||
if ((prev_vector != 1) && (vector == prev_vector)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -72,7 +75,7 @@ int z_x86_allocate_vector(unsigned int priority, int prev_vector)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (x86_irq_funcs[vector - IV_IRQS] == irq_spurious) {
|
||||
if (x86_irq_funcs[(unsigned int)vector - IV_IRQS] == irq_spurious) {
|
||||
return vector;
|
||||
}
|
||||
}
|
||||
@@ -98,8 +101,8 @@ void z_x86_irq_connect_on_vector(unsigned int irq,
|
||||
*/
|
||||
|
||||
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
void (*func)(const void *arg),
|
||||
const void *arg, uint32_t flags)
|
||||
void (*routine)(const void *parameter),
|
||||
const void *parameter, uint32_t flags)
|
||||
{
|
||||
uint32_t key;
|
||||
int vector;
|
||||
@@ -110,7 +113,7 @@ int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||
|
||||
vector = z_x86_allocate_vector(priority, -1);
|
||||
if (vector >= 0) {
|
||||
z_x86_irq_connect_on_vector(irq, vector, func, arg, flags);
|
||||
z_x86_irq_connect_on_vector(irq, (uint8_t)vector, routine, parameter, flags);
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
#include <offsets_short.h>
|
||||
#include <x86_mmu.h>
|
||||
|
||||
extern void x86_sse_init(struct k_thread *); /* in locore.S */
|
||||
extern void x86_sse_init(struct k_thread *thread); /* in locore.S */
|
||||
|
||||
/* FIXME: This exists to make space for a "return address" at the top
|
||||
* of the stack. Obviously this is unused at runtime, but is required
|
||||
@@ -32,8 +32,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
void *switch_entry;
|
||||
struct x86_initial_frame *iframe;
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
#ifdef CONFIG_X86_STACK_PROTECTION
|
||||
z_x86_set_stack_guard(stack);
|
||||
#else
|
||||
ARG_UNUSED(stack);
|
||||
#endif
|
||||
#ifdef CONFIG_USERSPACE
|
||||
switch_entry = z_x86_userspace_prepare_thread(thread);
|
||||
@@ -44,17 +46,17 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
#endif
|
||||
iframe = Z_STACK_PTR_TO_FRAME(struct x86_initial_frame, stack_ptr);
|
||||
iframe->rip = 0U;
|
||||
thread->callee_saved.rsp = (long) iframe;
|
||||
thread->callee_saved.rip = (long) switch_entry;
|
||||
thread->callee_saved.rsp = (uint64_t) iframe;
|
||||
thread->callee_saved.rip = (uint64_t) switch_entry;
|
||||
thread->callee_saved.rflags = EFLAGS_INITIAL;
|
||||
|
||||
/* Parameters to entry point, which is populated in
|
||||
* thread->callee_saved.rip
|
||||
*/
|
||||
thread->arch.rdi = (long) entry;
|
||||
thread->arch.rsi = (long) p1;
|
||||
thread->arch.rdx = (long) p2;
|
||||
thread->arch.rcx = (long) p3;
|
||||
thread->arch.rdi = (uint64_t) entry;
|
||||
thread->arch.rsi = (uint64_t) p1;
|
||||
thread->arch.rdx = (uint64_t) p2;
|
||||
thread->arch.rcx = (uint64_t) p3;
|
||||
|
||||
x86_sse_init(thread);
|
||||
|
||||
@@ -65,11 +67,16 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
int arch_float_disable(struct k_thread *thread)
|
||||
{
|
||||
/* x86-64 always has FP/SSE enabled so cannot be disabled */
|
||||
ARG_UNUSED(thread);
|
||||
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
int arch_float_enable(struct k_thread *thread, unsigned int options)
|
||||
{
|
||||
/* x86-64 always has FP/SSE enabled so nothing to do here */
|
||||
ARG_UNUSED(thread);
|
||||
ARG_UNUSED(options);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -37,19 +37,20 @@ static void pcie_mm_init(void)
|
||||
struct acpi_mcfg *m = z_acpi_find_table(ACPI_MCFG_SIGNATURE);
|
||||
|
||||
if (m != NULL) {
|
||||
int n = (m->sdt.length - sizeof(*m)) / sizeof(m->pci_segs[0]);
|
||||
size_t n = (m->sdt.length - sizeof(*m)) / sizeof(m->pci_segs[0]);
|
||||
|
||||
for (int i = 0; i < n && i < MAX_PCI_BUS_SEGMENTS; i++) {
|
||||
for (size_t i = 0; (i < n) && (i < MAX_PCI_BUS_SEGMENTS); i++) {
|
||||
size_t size;
|
||||
uintptr_t phys_addr;
|
||||
|
||||
bus_segs[i].start_bus = m->pci_segs[i].start_bus;
|
||||
bus_segs[i].n_buses = 1 + m->pci_segs[i].end_bus
|
||||
bus_segs[i].n_buses = (uint32_t)1 + m->pci_segs[i].end_bus
|
||||
- m->pci_segs[i].start_bus;
|
||||
|
||||
phys_addr = m->pci_segs[i].base_addr;
|
||||
/* 32 devices & 8 functions per bus, 4k per device */
|
||||
size = bus_segs[i].n_buses * (32 * 8 * 4096);
|
||||
size = bus_segs[i].n_buses;
|
||||
size *= 32 * 8 * 4096;
|
||||
|
||||
device_map((mm_reg_t *)&bus_segs[i].mmio, phys_addr,
|
||||
size, K_MEM_CACHE_NONE);
|
||||
@@ -63,10 +64,11 @@ static void pcie_mm_init(void)
|
||||
static inline void pcie_mm_conf(pcie_bdf_t bdf, unsigned int reg,
|
||||
bool write, uint32_t *data)
|
||||
{
|
||||
for (int i = 0; i < ARRAY_SIZE(bus_segs); i++) {
|
||||
int off = PCIE_BDF_TO_BUS(bdf) - bus_segs[i].start_bus;
|
||||
for (size_t i = 0; i < ARRAY_SIZE(bus_segs); i++) {
|
||||
/* Wrapping is deliberate and will be filtered by conditional below */
|
||||
uint32_t off = PCIE_BDF_TO_BUS(bdf) - bus_segs[i].start_bus;
|
||||
|
||||
if (off >= 0 && off < bus_segs[i].n_buses) {
|
||||
if (off < bus_segs[i].n_buses) {
|
||||
bdf = PCIE_BDF(off,
|
||||
PCIE_BDF_TO_DEV(bdf),
|
||||
PCIE_BDF_TO_FUNC(bdf));
|
||||
@@ -187,6 +189,8 @@ uint32_t pcie_msi_map(unsigned int irq,
|
||||
ARG_UNUSED(irq);
|
||||
#if defined(CONFIG_INTEL_VTD_ICTL)
|
||||
#if !defined(CONFIG_PCIE_MSI_X)
|
||||
ARG_UNUSED(vector);
|
||||
|
||||
if (vector != NULL) {
|
||||
map = vtd_remap_msi(vtd, vector);
|
||||
} else
|
||||
@@ -195,6 +199,8 @@ uint32_t pcie_msi_map(unsigned int irq,
|
||||
map = vtd_remap_msi(vtd, vector);
|
||||
} else
|
||||
#endif
|
||||
#else
|
||||
ARG_UNUSED(vector);
|
||||
#endif
|
||||
{
|
||||
map = 0xFEE00000U; /* standard delivery to BSP local APIC */
|
||||
|
||||
@@ -45,7 +45,7 @@ FUNC_NORETURN void z_x86_prep_c(void *arg)
|
||||
ARG_UNUSED(info);
|
||||
#endif
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
#ifdef CONFIG_X86_STACK_PROTECTION
|
||||
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
|
||||
z_x86_set_stack_guard(z_interrupt_stacks[i]);
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ static const struct paging_level paging_levels[] = {
|
||||
}
|
||||
};
|
||||
|
||||
#define NUM_LEVELS ARRAY_SIZE(paging_levels)
|
||||
#define NUM_LEVELS ((unsigned int)ARRAY_SIZE(paging_levels))
|
||||
#define PTE_LEVEL (NUM_LEVELS - 1)
|
||||
#define PDE_LEVEL (NUM_LEVELS - 2)
|
||||
|
||||
@@ -203,14 +203,14 @@ static const struct paging_level paging_levels[] = {
|
||||
#endif /* !CONFIG_X86_64 && !CONFIG_X86_PAE */
|
||||
|
||||
/* Memory range covered by an instance of various table types */
|
||||
#define PT_AREA ((uintptr_t)(CONFIG_MMU_PAGE_SIZE * NUM_PT_ENTRIES))
|
||||
#define PT_AREA ((uintptr_t)CONFIG_MMU_PAGE_SIZE * NUM_PT_ENTRIES)
|
||||
#define PD_AREA (PT_AREA * NUM_PD_ENTRIES)
|
||||
#ifdef CONFIG_X86_64
|
||||
#define PDPT_AREA (PD_AREA * NUM_PDPT_ENTRIES)
|
||||
#endif
|
||||
|
||||
#define VM_ADDR CONFIG_KERNEL_VM_BASE
|
||||
#define VM_SIZE CONFIG_KERNEL_VM_SIZE
|
||||
#define VM_ADDR ((uintptr_t)CONFIG_KERNEL_VM_BASE)
|
||||
#define VM_SIZE ((uintptr_t)CONFIG_KERNEL_VM_SIZE)
|
||||
|
||||
/* Define a range [PT_START, PT_END) which is the memory range
|
||||
* covered by all the page tables needed for the address space
|
||||
@@ -257,7 +257,7 @@ static const struct paging_level paging_levels[] = {
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#define INITIAL_PTABLE_PAGES \
|
||||
(NUM_TABLE_PAGES + CONFIG_X86_EXTRA_PAGE_TABLE_PAGES)
|
||||
(NUM_TABLE_PAGES + (uintptr_t)CONFIG_X86_EXTRA_PAGE_TABLE_PAGES)
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* Toplevel PDPT wasn't included as it is not a page in size */
|
||||
@@ -265,7 +265,7 @@ static const struct paging_level paging_levels[] = {
|
||||
((INITIAL_PTABLE_PAGES * CONFIG_MMU_PAGE_SIZE) + 0x20)
|
||||
#else
|
||||
#define INITIAL_PTABLE_SIZE \
|
||||
(INITIAL_PTABLE_PAGES * CONFIG_MMU_PAGE_SIZE)
|
||||
(INITIAL_PTABLE_PAGES * (uintptr_t)CONFIG_MMU_PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
/* "dummy" pagetables for the first-phase build. The real page tables
|
||||
@@ -283,48 +283,48 @@ static __used char dummy_pagetables[INITIAL_PTABLE_SIZE];
|
||||
* the provided virtual address
|
||||
*/
|
||||
__pinned_func
|
||||
static inline int get_index(void *virt, int level)
|
||||
static inline uintptr_t get_index(void *virt, unsigned int level)
|
||||
{
|
||||
return (((uintptr_t)virt >> paging_levels[level].shift) %
|
||||
paging_levels[level].entries);
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
static inline pentry_t *get_entry_ptr(pentry_t *ptables, void *virt, int level)
|
||||
static inline pentry_t *get_entry_ptr(pentry_t *ptables, void *virt, unsigned int level)
|
||||
{
|
||||
return &ptables[get_index(virt, level)];
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
static inline pentry_t get_entry(pentry_t *ptables, void *virt, int level)
|
||||
static inline pentry_t get_entry(pentry_t *ptables, void *virt, unsigned int level)
|
||||
{
|
||||
return ptables[get_index(virt, level)];
|
||||
}
|
||||
|
||||
/* Get the physical memory address associated with this table entry */
|
||||
__pinned_func
|
||||
static inline uintptr_t get_entry_phys(pentry_t entry, int level)
|
||||
static inline uintptr_t get_entry_phys(pentry_t entry, unsigned int level)
|
||||
{
|
||||
return entry & paging_levels[level].mask;
|
||||
}
|
||||
|
||||
/* Return the virtual address of a linked table stored in the provided entry */
|
||||
__pinned_func
|
||||
static inline pentry_t *next_table(pentry_t entry, int level)
|
||||
static inline pentry_t *next_table(pentry_t entry, unsigned int level)
|
||||
{
|
||||
return z_mem_virt_addr(get_entry_phys(entry, level));
|
||||
}
|
||||
|
||||
/* Number of table entries at this level */
|
||||
__pinned_func
|
||||
static inline size_t get_num_entries(int level)
|
||||
static inline size_t get_num_entries(unsigned int level)
|
||||
{
|
||||
return paging_levels[level].entries;
|
||||
}
|
||||
|
||||
/* 4K for everything except PAE PDPTs */
|
||||
__pinned_func
|
||||
static inline size_t table_size(int level)
|
||||
static inline size_t table_size(unsigned int level)
|
||||
{
|
||||
return get_num_entries(level) * sizeof(pentry_t);
|
||||
}
|
||||
@@ -333,7 +333,7 @@ static inline size_t table_size(int level)
|
||||
* that an entry within the table covers
|
||||
*/
|
||||
__pinned_func
|
||||
static inline size_t get_entry_scope(int level)
|
||||
static inline size_t get_entry_scope(unsigned int level)
|
||||
{
|
||||
return (1UL << paging_levels[level].shift);
|
||||
}
|
||||
@@ -342,7 +342,7 @@ static inline size_t get_entry_scope(int level)
|
||||
* that this entire table covers
|
||||
*/
|
||||
__pinned_func
|
||||
static inline size_t get_table_scope(int level)
|
||||
static inline size_t get_table_scope(unsigned int level)
|
||||
{
|
||||
return get_entry_scope(level) * get_num_entries(level);
|
||||
}
|
||||
@@ -351,7 +351,7 @@ static inline size_t get_table_scope(int level)
|
||||
* stored in any other bits
|
||||
*/
|
||||
__pinned_func
|
||||
static inline bool is_leaf(int level, pentry_t entry)
|
||||
static inline bool is_leaf(unsigned int level, pentry_t entry)
|
||||
{
|
||||
if (level == PTE_LEVEL) {
|
||||
/* Always true for PTE */
|
||||
@@ -363,15 +363,15 @@ static inline bool is_leaf(int level, pentry_t entry)
|
||||
|
||||
/* This does NOT (by design) un-flip KPTI PTEs, it's just the raw PTE value */
|
||||
__pinned_func
|
||||
static inline void pentry_get(int *paging_level, pentry_t *val,
|
||||
static inline void pentry_get(unsigned int *paging_level, pentry_t *val,
|
||||
pentry_t *ptables, void *virt)
|
||||
{
|
||||
pentry_t *table = ptables;
|
||||
|
||||
for (int level = 0; level < NUM_LEVELS; level++) {
|
||||
for (unsigned int level = 0; level < NUM_LEVELS; level++) {
|
||||
pentry_t entry = get_entry(table, virt, level);
|
||||
|
||||
if ((entry & MMU_P) == 0 || is_leaf(level, entry)) {
|
||||
if (((entry & MMU_P) == 0) || is_leaf(level, entry)) {
|
||||
*val = entry;
|
||||
if (paging_level != NULL) {
|
||||
*paging_level = level;
|
||||
@@ -398,7 +398,7 @@ static inline void tlb_flush_page(void *addr)
|
||||
__pinned_func
|
||||
static inline bool is_flipped_pte(pentry_t pte)
|
||||
{
|
||||
return (pte & MMU_P) == 0 && (pte & PTE_ZERO) != 0;
|
||||
return ((pte & MMU_P) == 0) && ((pte & PTE_ZERO) != 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -449,6 +449,8 @@ static inline void assert_addr_aligned(uintptr_t addr)
|
||||
#if __ASSERT_ON
|
||||
__ASSERT((addr & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U,
|
||||
"unaligned address 0x%" PRIxPTR, addr);
|
||||
#else
|
||||
ARG_UNUSED(addr);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -465,6 +467,8 @@ static inline void assert_region_page_aligned(void *addr, size_t size)
|
||||
#if __ASSERT_ON
|
||||
__ASSERT((size & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U,
|
||||
"unaligned size %zu", size);
|
||||
#else
|
||||
ARG_UNUSED(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -477,18 +481,18 @@ static inline void assert_region_page_aligned(void *addr, size_t size)
|
||||
#define COLOR_PAGE_TABLES 1
|
||||
|
||||
#if COLOR_PAGE_TABLES
|
||||
#define ANSI_DEFAULT "\x1B[0m"
|
||||
#define ANSI_RED "\x1B[1;31m"
|
||||
#define ANSI_GREEN "\x1B[1;32m"
|
||||
#define ANSI_YELLOW "\x1B[1;33m"
|
||||
#define ANSI_BLUE "\x1B[1;34m"
|
||||
#define ANSI_MAGENTA "\x1B[1;35m"
|
||||
#define ANSI_CYAN "\x1B[1;36m"
|
||||
#define ANSI_GREY "\x1B[1;90m"
|
||||
#define ANSI_DEFAULT "\x1B" "[0m"
|
||||
#define ANSI_RED "\x1B" "[1;31m"
|
||||
#define ANSI_GREEN "\x1B" "[1;32m"
|
||||
#define ANSI_YELLOW "\x1B" "[1;33m"
|
||||
#define ANSI_BLUE "\x1B" "[1;34m"
|
||||
#define ANSI_MAGENTA "\x1B" "[1;35m"
|
||||
#define ANSI_CYAN "\x1B" "[1;36m"
|
||||
#define ANSI_GREY "\x1B" "[1;90m"
|
||||
|
||||
#define COLOR(x) printk(_CONCAT(ANSI_, x))
|
||||
#else
|
||||
#define COLOR(x) do { } while (0)
|
||||
#define COLOR(x) do { } while (false)
|
||||
#endif
|
||||
|
||||
__pinned_func
|
||||
@@ -521,7 +525,7 @@ static char get_entry_code(pentry_t value)
|
||||
|
||||
if ((value & MMU_US) != 0U) {
|
||||
/* Uppercase indicates user mode access */
|
||||
ret = toupper(ret);
|
||||
ret = (char)toupper((int)(unsigned char)ret);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -529,12 +533,12 @@ static char get_entry_code(pentry_t value)
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
static void print_entries(pentry_t entries_array[], uint8_t *base, int level,
|
||||
static void print_entries(pentry_t entries_array[], uint8_t *base, unsigned int level,
|
||||
size_t count)
|
||||
{
|
||||
int column = 0;
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
pentry_t entry = entries_array[i];
|
||||
|
||||
uintptr_t phys = get_entry_phys(entry, level);
|
||||
@@ -546,7 +550,7 @@ static void print_entries(pentry_t entries_array[], uint8_t *base, int level,
|
||||
if (phys == virt) {
|
||||
/* Identity mappings */
|
||||
COLOR(YELLOW);
|
||||
} else if (phys + Z_MEM_VM_OFFSET == virt) {
|
||||
} else if ((phys + Z_MEM_VM_OFFSET) == virt) {
|
||||
/* Permanent RAM mappings */
|
||||
COLOR(GREEN);
|
||||
} else {
|
||||
@@ -602,7 +606,7 @@ static void print_entries(pentry_t entries_array[], uint8_t *base, int level,
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
static void dump_ptables(pentry_t *table, uint8_t *base, int level)
|
||||
static void dump_ptables(pentry_t *table, uint8_t *base, unsigned int level)
|
||||
{
|
||||
const struct paging_level *info = &paging_levels[level];
|
||||
|
||||
@@ -630,12 +634,12 @@ static void dump_ptables(pentry_t *table, uint8_t *base, int level)
|
||||
}
|
||||
|
||||
/* Dump all linked child tables */
|
||||
for (int j = 0; j < info->entries; j++) {
|
||||
for (size_t j = 0; j < info->entries; j++) {
|
||||
pentry_t entry = table[j];
|
||||
pentry_t *next;
|
||||
|
||||
if ((entry & MMU_P) == 0U ||
|
||||
(entry & MMU_PS) != 0U) {
|
||||
if (((entry & MMU_P) == 0U) ||
|
||||
((entry & MMU_PS) != 0U)) {
|
||||
/* Not present or big page, skip */
|
||||
continue;
|
||||
}
|
||||
@@ -672,7 +676,10 @@ SYS_INIT(dump_kernel_tables, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
__pinned_func
|
||||
static void str_append(char **buf, size_t *size, const char *str)
|
||||
{
|
||||
int ret = snprintk(*buf, *size, "%s", str);
|
||||
/*? snprintk has int return type, but negative values are not tested
|
||||
* and not currently returned from implementation
|
||||
*/
|
||||
size_t ret = (size_t)snprintk(*buf, *size, "%s", str);
|
||||
|
||||
if (ret >= *size) {
|
||||
/* Truncated */
|
||||
@@ -685,7 +692,7 @@ static void str_append(char **buf, size_t *size, const char *str)
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
static void dump_entry(int level, void *virt, pentry_t entry)
|
||||
static void dump_entry(unsigned int level, void *virt, pentry_t entry)
|
||||
{
|
||||
const struct paging_level *info = &paging_levels[level];
|
||||
char buf[24] = { 0 };
|
||||
@@ -697,7 +704,7 @@ static void dump_entry(int level, void *virt, pentry_t entry)
|
||||
if ((entry & MMU_##bit) != 0U) { \
|
||||
str_append(&pos, &sz, #bit " "); \
|
||||
} \
|
||||
} while (0)
|
||||
} while (false)
|
||||
|
||||
DUMP_BIT(RW);
|
||||
DUMP_BIT(US);
|
||||
@@ -715,7 +722,7 @@ static void dump_entry(int level, void *virt, pentry_t entry)
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
void z_x86_pentry_get(int *paging_level, pentry_t *val, pentry_t *ptables,
|
||||
void z_x86_pentry_get(unsigned int *paging_level, pentry_t *val, pentry_t *ptables,
|
||||
void *virt)
|
||||
{
|
||||
pentry_get(paging_level, val, ptables, virt);
|
||||
@@ -729,7 +736,7 @@ __pinned_func
|
||||
void z_x86_dump_mmu_flags(pentry_t *ptables, void *virt)
|
||||
{
|
||||
pentry_t entry = 0;
|
||||
int level = 0;
|
||||
unsigned int level = 0;
|
||||
|
||||
pentry_get(&level, &entry, ptables, virt);
|
||||
|
||||
@@ -775,16 +782,19 @@ static inline pentry_t reset_pte(pentry_t old_val)
|
||||
*/
|
||||
__pinned_func
|
||||
static inline pentry_t pte_finalize_value(pentry_t val, bool user_table,
|
||||
int level)
|
||||
unsigned int level)
|
||||
{
|
||||
#ifdef CONFIG_X86_KPTI
|
||||
static const uintptr_t shared_phys_addr =
|
||||
Z_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_shared_kernel_page_start));
|
||||
|
||||
if (user_table && (val & MMU_US) == 0 && (val & MMU_P) != 0 &&
|
||||
get_entry_phys(val, level) != shared_phys_addr) {
|
||||
if (user_table && ((val & MMU_US) == 0) && ((val & MMU_P) != 0) &&
|
||||
(get_entry_phys(val, level) != shared_phys_addr)) {
|
||||
val = ~val;
|
||||
}
|
||||
#else
|
||||
ARG_UNUSED(user_table);
|
||||
ARG_UNUSED(level);
|
||||
#endif
|
||||
return val;
|
||||
}
|
||||
@@ -798,7 +808,7 @@ static inline pentry_t pte_finalize_value(pentry_t val, bool user_table,
|
||||
__pinned_func
|
||||
static inline pentry_t atomic_pte_get(const pentry_t *target)
|
||||
{
|
||||
return (pentry_t)atomic_ptr_get((atomic_ptr_t *)target);
|
||||
return (pentry_t)atomic_ptr_get((const atomic_ptr_t *)target);
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
@@ -843,23 +853,23 @@ static inline bool atomic_pte_cas(pentry_t *target, pentry_t old_value,
|
||||
* page tables need nearly all pages that don't have the US bit to also
|
||||
* not be Present.
|
||||
*/
|
||||
#define OPTION_USER BIT(0)
|
||||
#define OPTION_USER BIT32(0)
|
||||
|
||||
/* Indicates that the operation requires TLBs to be flushed as we are altering
|
||||
* existing mappings. Not needed for establishing new mappings
|
||||
*/
|
||||
#define OPTION_FLUSH BIT(1)
|
||||
#define OPTION_FLUSH BIT32(1)
|
||||
|
||||
/* Indicates that each PTE's permission bits should be restored to their
|
||||
* original state when the memory was mapped. All other bits in the PTE are
|
||||
* preserved.
|
||||
*/
|
||||
#define OPTION_RESET BIT(2)
|
||||
#define OPTION_RESET BIT32(2)
|
||||
|
||||
/* Indicates that the mapping will need to be cleared entirely. This is
|
||||
* mainly used for unmapping the memory region.
|
||||
*/
|
||||
#define OPTION_CLEAR BIT(3)
|
||||
#define OPTION_CLEAR BIT32(3)
|
||||
|
||||
/**
|
||||
* Atomically update bits in a page table entry
|
||||
@@ -954,12 +964,8 @@ static void page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val,
|
||||
pentry_t *table = ptables;
|
||||
bool flush = (options & OPTION_FLUSH) != 0U;
|
||||
|
||||
for (int level = 0; level < NUM_LEVELS; level++) {
|
||||
int index;
|
||||
pentry_t *entryp;
|
||||
|
||||
index = get_index(virt, level);
|
||||
entryp = &table[index];
|
||||
for (unsigned int level = 0; level < NUM_LEVELS; level++) {
|
||||
pentry_t *entryp = &table[get_index(virt, level)];
|
||||
|
||||
/* Check if we're a PTE */
|
||||
if (level == PTE_LEVEL) {
|
||||
@@ -1072,8 +1078,8 @@ __pinned_func
|
||||
static void range_map(void *virt, uintptr_t phys, size_t size,
|
||||
pentry_t entry_flags, pentry_t mask, uint32_t options)
|
||||
{
|
||||
LOG_DBG("%s: %p -> %p (%zu) flags " PRI_ENTRY " mask "
|
||||
PRI_ENTRY " opt 0x%x", __func__, (void *)phys, virt, size,
|
||||
LOG_DBG("%s: 0x%" PRIxPTR " -> %p (%zu) flags " PRI_ENTRY " mask "
|
||||
PRI_ENTRY " opt 0x%x", __func__, phys, virt, size,
|
||||
entry_flags, mask, options);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -1178,7 +1184,7 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
||||
/* unmap region addr..addr+size, reset entries and flush TLB */
|
||||
void arch_mem_unmap(void *addr, size_t size)
|
||||
{
|
||||
range_map_unlocked((void *)addr, 0, size, 0, 0,
|
||||
range_map_unlocked(addr, 0, size, 0, 0,
|
||||
OPTION_FLUSH | OPTION_CLEAR);
|
||||
}
|
||||
|
||||
@@ -1237,7 +1243,7 @@ void z_x86_mmu_init(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
#ifdef CONFIG_X86_STACK_PROTECTION
|
||||
__pinned_func
|
||||
void z_x86_set_stack_guard(k_thread_stack_t *stack)
|
||||
{
|
||||
@@ -1258,9 +1264,9 @@ void z_x86_set_stack_guard(k_thread_stack_t *stack)
|
||||
__pinned_func
|
||||
static bool page_validate(pentry_t *ptables, uint8_t *addr, bool write)
|
||||
{
|
||||
pentry_t *table = (pentry_t *)ptables;
|
||||
pentry_t *table = ptables;
|
||||
|
||||
for (int level = 0; level < NUM_LEVELS; level++) {
|
||||
for (unsigned int level = 0; level < NUM_LEVELS; level++) {
|
||||
pentry_t entry = get_entry(table, addr, level);
|
||||
|
||||
if (is_leaf(level, entry)) {
|
||||
@@ -1303,7 +1309,7 @@ static inline void bcb_fence(void)
|
||||
}
|
||||
|
||||
__pinned_func
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int arch_buffer_validate(const void *addr, size_t size, bool write)
|
||||
{
|
||||
pentry_t *ptables = z_x86_thread_page_tables_get(_current);
|
||||
uint8_t *virt;
|
||||
@@ -1311,7 +1317,7 @@ int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
int ret = 0;
|
||||
|
||||
/* addr/size arbitrary, fix this up into an aligned region */
|
||||
k_mem_region_align((uintptr_t *)&virt, &aligned_size,
|
||||
(void)k_mem_region_align((uintptr_t *)&virt, &aligned_size,
|
||||
(uintptr_t)addr, size, CONFIG_MMU_PAGE_SIZE);
|
||||
|
||||
for (size_t offset = 0; offset < aligned_size;
|
||||
@@ -1526,9 +1532,9 @@ static void *page_pool_get(void)
|
||||
|
||||
/* Debugging function to show how many pages are free in the pool */
|
||||
__pinned_func
|
||||
static inline unsigned int pages_free(void)
|
||||
static inline size_t pages_free(void)
|
||||
{
|
||||
return (page_pos - page_pool) / CONFIG_MMU_PAGE_SIZE;
|
||||
return (size_t)(page_pos - page_pool) / CONFIG_MMU_PAGE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1548,11 +1554,11 @@ static inline unsigned int pages_free(void)
|
||||
* @retval -ENOMEM Insufficient page pool memory
|
||||
*/
|
||||
__pinned_func
|
||||
static int copy_page_table(pentry_t *dst, pentry_t *src, int level)
|
||||
static int copy_page_table(pentry_t *dst, pentry_t *src, unsigned int level)
|
||||
{
|
||||
if (level == PTE_LEVEL) {
|
||||
/* Base case: leaf page table */
|
||||
for (int i = 0; i < get_num_entries(level); i++) {
|
||||
for (size_t i = 0; i < get_num_entries(level); i++) {
|
||||
dst[i] = pte_finalize_value(reset_pte(src[i]), true,
|
||||
PTE_LEVEL);
|
||||
}
|
||||
@@ -1560,7 +1566,7 @@ static int copy_page_table(pentry_t *dst, pentry_t *src, int level)
|
||||
/* Recursive case: allocate sub-structures as needed and
|
||||
* make recursive calls on them
|
||||
*/
|
||||
for (int i = 0; i < get_num_entries(level); i++) {
|
||||
for (size_t i = 0; i < get_num_entries(level); i++) {
|
||||
pentry_t *child_dst;
|
||||
int ret;
|
||||
|
||||
@@ -1647,8 +1653,8 @@ static inline void apply_region(pentry_t *ptables, void *start,
|
||||
__pinned_func
|
||||
static void set_stack_perms(struct k_thread *thread, pentry_t *ptables)
|
||||
{
|
||||
LOG_DBG("update stack for thread %p's ptables at %p: %p (size %zu)",
|
||||
thread, ptables, (void *)thread->stack_info.start,
|
||||
LOG_DBG("update stack for thread %p's ptables at %p: 0x%" PRIxPTR " (size %zu)",
|
||||
thread, ptables, thread->stack_info.start,
|
||||
thread->stack_info.size);
|
||||
apply_region(ptables, (void *)thread->stack_info.start,
|
||||
thread->stack_info.size,
|
||||
@@ -1792,8 +1798,8 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
}
|
||||
|
||||
thread->arch.ptables = z_mem_phys_addr(domain->arch.ptables);
|
||||
LOG_DBG("set thread %p page tables to %p", thread,
|
||||
(void *)thread->arch.ptables);
|
||||
LOG_DBG("set thread %p page tables to 0x%" PRIxPTR, thread,
|
||||
thread->arch.ptables);
|
||||
|
||||
/* Check if we're doing a migration from a different memory domain
|
||||
* and have to remove permissions from its old domain.
|
||||
@@ -1921,7 +1927,8 @@ void arch_reserved_pages_update(void)
|
||||
int arch_page_phys_get(void *virt, uintptr_t *phys)
|
||||
{
|
||||
pentry_t pte = 0;
|
||||
int level, ret;
|
||||
unsigned int level;
|
||||
int ret;
|
||||
|
||||
__ASSERT(POINTER_TO_UINT(virt) % CONFIG_MMU_PAGE_SIZE == 0U,
|
||||
"unaligned address %p to %s", virt, __func__);
|
||||
@@ -1930,7 +1937,7 @@ int arch_page_phys_get(void *virt, uintptr_t *phys)
|
||||
|
||||
if ((pte & MMU_P) != 0) {
|
||||
if (phys != NULL) {
|
||||
*phys = (uintptr_t)get_entry_phys(pte, PTE_LEVEL);
|
||||
*phys = get_entry_phys(pte, PTE_LEVEL);
|
||||
}
|
||||
ret = 0;
|
||||
} else {
|
||||
@@ -2053,7 +2060,7 @@ __pinned_func
|
||||
enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location)
|
||||
{
|
||||
pentry_t pte;
|
||||
int level;
|
||||
unsigned int level;
|
||||
|
||||
/* TODO: since we only have to query the current set of page tables,
|
||||
* could optimize this with recursive page table mapping
|
||||
@@ -2080,7 +2087,7 @@ __pinned_func
|
||||
bool z_x86_kpti_is_access_ok(void *addr, pentry_t *ptables)
|
||||
{
|
||||
pentry_t pte;
|
||||
int level;
|
||||
unsigned int level;
|
||||
|
||||
pentry_get(&level, &pte, ptables, addr);
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ void z_x86_dump_mmu_flags(pentry_t *ptables, void *virt);
|
||||
* @param ptables Toplevel pointer to page tables
|
||||
* @param virt Virtual address to lookup
|
||||
*/
|
||||
void z_x86_pentry_get(int *paging_level, pentry_t *val, pentry_t *ptables,
|
||||
void z_x86_pentry_get(unsigned int *paging_level, pentry_t *val, pentry_t *ptables,
|
||||
void *virt);
|
||||
|
||||
/**
|
||||
@@ -209,14 +209,16 @@ extern pentry_t z_x86_kernel_ptables[];
|
||||
static inline pentry_t *z_x86_thread_page_tables_get(struct k_thread *thread)
|
||||
{
|
||||
#if defined(CONFIG_USERSPACE) && !defined(CONFIG_X86_COMMON_PAGE_TABLE)
|
||||
if (!IS_ENABLED(CONFIG_X86_KPTI) ||
|
||||
(thread->base.user_options & K_USER) != 0U) {
|
||||
if (!(IS_ENABLED(CONFIG_X86_KPTI)) ||
|
||||
((thread->base.user_options & K_USER) != 0U)) {
|
||||
/* If KPTI is enabled, supervisor threads always use
|
||||
* the kernel's page tables and not the page tables associated
|
||||
* with their memory domain.
|
||||
*/
|
||||
return z_mem_virt_addr(thread->arch.ptables);
|
||||
}
|
||||
#else
|
||||
ARG_UNUSED(thread);
|
||||
#endif
|
||||
return z_x86_kernel_ptables;
|
||||
}
|
||||
|
||||
@@ -10,10 +10,10 @@
|
||||
|
||||
#define __abi __attribute__((ms_abi))
|
||||
|
||||
typedef uintptr_t __abi (*efi_fn1_t)(void *);
|
||||
typedef uintptr_t __abi (*efi_fn2_t)(void *, void *);
|
||||
typedef uintptr_t __abi (*efi_fn3_t)(void *, void *, void *);
|
||||
typedef uintptr_t __abi (*efi_fn4_t)(void *, void *, void *, void *);
|
||||
typedef uintptr_t __abi (*efi_fn1_t)(void *arg1);
|
||||
typedef uintptr_t __abi (*efi_fn2_t)(void *arg1, void *arg2);
|
||||
typedef uintptr_t __abi (*efi_fn3_t)(void *arg1, void *arg2, void *arg3);
|
||||
typedef uintptr_t __abi (*efi_fn4_t)(void *arg1, void *arg2, void *arg3, void *arg4);
|
||||
|
||||
struct efi_simple_text_output {
|
||||
efi_fn2_t Reset;
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
*/
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/* Tiny, but not-as-primitive-as-it-looks implementation of something
|
||||
* like s/n/printf(). Handles %d, %x, %p, %c and %s only, allows a
|
||||
@@ -15,21 +17,21 @@
|
||||
|
||||
struct _pfr {
|
||||
char *buf;
|
||||
int len;
|
||||
int idx;
|
||||
size_t len;
|
||||
size_t idx;
|
||||
};
|
||||
|
||||
/* Set this function pointer to something that generates output */
|
||||
static void (*z_putchar)(int c);
|
||||
|
||||
static void pc(struct _pfr *r, int c)
|
||||
static void pc(struct _pfr *r, char c)
|
||||
{
|
||||
if (r->buf) {
|
||||
if (r->buf != NULL) {
|
||||
if (r->idx <= r->len) {
|
||||
r->buf[r->idx] = c;
|
||||
}
|
||||
} else {
|
||||
z_putchar(c);
|
||||
z_putchar((int)c);
|
||||
}
|
||||
r->idx++;
|
||||
}
|
||||
@@ -41,30 +43,34 @@ static void prdec(struct _pfr *r, long v)
|
||||
v = -v;
|
||||
}
|
||||
|
||||
char digs[11 * sizeof(long)/4];
|
||||
int i = sizeof(digs) - 1;
|
||||
char digs[11U * sizeof(long) / 4];
|
||||
size_t i = sizeof(digs) - 1;
|
||||
|
||||
digs[i--] = 0;
|
||||
while (v || i == 9) {
|
||||
digs[i--] = '0' + (v % 10);
|
||||
digs[i] = '\0';
|
||||
--i;
|
||||
while ((v != 0) || (i == 9)) {
|
||||
digs[i] = '0' + (v % 10);
|
||||
--i;
|
||||
v /= 10;
|
||||
}
|
||||
|
||||
while (digs[++i]) {
|
||||
++i;
|
||||
while (digs[i] != '\0') {
|
||||
pc(r, digs[i]);
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
static void endrec(struct _pfr *r)
|
||||
{
|
||||
if (r->buf && r->idx < r->len) {
|
||||
r->buf[r->idx] = 0;
|
||||
if ((r->buf != NULL) && (r->idx < r->len)) {
|
||||
r->buf[r->idx] = '\0';
|
||||
}
|
||||
}
|
||||
|
||||
static int vpf(struct _pfr *r, const char *f, va_list ap)
|
||||
static size_t vpf(struct _pfr *r, const char *f, va_list ap)
|
||||
{
|
||||
for (/**/; *f; f++) {
|
||||
for (/**/; *f != '\0'; f++) {
|
||||
bool islong = false;
|
||||
|
||||
if (*f != '%') {
|
||||
@@ -78,30 +84,32 @@ static int vpf(struct _pfr *r, const char *f, va_list ap)
|
||||
}
|
||||
|
||||
/* Ignore (but accept) field width and precision values */
|
||||
while (f[1] >= '0' && f[1] <= '9') {
|
||||
while ((f[1] >= '0') && (f[1] <= '9')) {
|
||||
f++;
|
||||
}
|
||||
if (f[1] == '.') {
|
||||
f++;
|
||||
}
|
||||
while (f[1] >= '0' && f[1] <= '9') {
|
||||
while ((f[1] >= '0') && (f[1] <= '9')) {
|
||||
f++;
|
||||
}
|
||||
|
||||
switch (*(++f)) {
|
||||
case 0:
|
||||
case '\0':
|
||||
return r->idx;
|
||||
case '%':
|
||||
pc(r, '%');
|
||||
break;
|
||||
case 'c':
|
||||
pc(r, va_arg(ap, int));
|
||||
pc(r, (char)va_arg(ap, int));
|
||||
break;
|
||||
case 's': {
|
||||
char *s = va_arg(ap, char *);
|
||||
|
||||
while (*s)
|
||||
pc(r, *s++);
|
||||
while (*s != '\0') {
|
||||
pc(r, *s);
|
||||
++s;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'p':
|
||||
@@ -109,15 +117,21 @@ static int vpf(struct _pfr *r, const char *f, va_list ap)
|
||||
pc(r, 'x'); /* fall through... */
|
||||
islong = sizeof(long) > 4;
|
||||
case 'x': {
|
||||
int sig = 0;
|
||||
bool sig = false;
|
||||
unsigned long v = islong ? va_arg(ap, unsigned long)
|
||||
: va_arg(ap, unsigned int);
|
||||
for (int i = 2*sizeof(long) - 1; i >= 0; i--) {
|
||||
int d = (v >> (i*4)) & 0xf;
|
||||
size_t i = 2 * sizeof(v);
|
||||
|
||||
sig += !!d;
|
||||
if (sig || i == 0)
|
||||
while (i > 0) {
|
||||
--i;
|
||||
uint8_t d = (uint8_t)((v >> (i * 4)) & 0x0f);
|
||||
|
||||
if (d != 0) {
|
||||
sig = true;
|
||||
}
|
||||
if (sig || (i == 0)) {
|
||||
pc(r, "0123456789abcdef"[d]);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -136,12 +150,12 @@ static int vpf(struct _pfr *r, const char *f, va_list ap)
|
||||
#define CALL_VPF(rec) \
|
||||
va_list ap; \
|
||||
va_start(ap, f); \
|
||||
ret = vpf(&r, f, ap); \
|
||||
ret = (int)vpf(&r, f, ap); \
|
||||
va_end(ap);
|
||||
|
||||
static inline int snprintf(char *buf, unsigned long len, const char *f, ...)
|
||||
static inline int snprintf(char *buf, size_t len, const char *f, ...)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
struct _pfr r = { .buf = buf, .len = len };
|
||||
|
||||
CALL_VPF(&r);
|
||||
@@ -150,7 +164,7 @@ static inline int snprintf(char *buf, unsigned long len, const char *f, ...)
|
||||
|
||||
static inline int sprintf(char *buf, const char *f, ...)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
struct _pfr r = { .buf = buf, .len = 0x7fffffff };
|
||||
|
||||
CALL_VPF(&r);
|
||||
@@ -159,7 +173,7 @@ static inline int sprintf(char *buf, const char *f, ...)
|
||||
|
||||
static inline int printf(const char *f, ...)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
struct _pfr r = {0};
|
||||
|
||||
CALL_VPF(&r);
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
* stuff after.
|
||||
*/
|
||||
static __attribute__((section(".runtime_data_end")))
|
||||
uint64_t runtime_data_end[1] = { 0x1111aa8888aa1111L };
|
||||
uint64_t runtime_data_end[1] = { 0x1111aa8888aa1111ULL };
|
||||
|
||||
#define EXT_DATA_START ((void *) &runtime_data_end[1])
|
||||
|
||||
@@ -29,13 +29,14 @@ static void efi_putchar(int c)
|
||||
static uint16_t efibuf[PUTCHAR_BUFSZ + 1];
|
||||
static int n;
|
||||
|
||||
if (c == '\n') {
|
||||
efi_putchar('\r');
|
||||
if (c == (int)'\n') {
|
||||
efi_putchar((int)'\r');
|
||||
}
|
||||
|
||||
efibuf[n++] = c;
|
||||
efibuf[n] = (uint16_t)c;
|
||||
++n;
|
||||
|
||||
if (c == '\n' || n == PUTCHAR_BUFSZ) {
|
||||
if ((c == (int)'\n') || (n == PUTCHAR_BUFSZ)) {
|
||||
efibuf[n] = 0U;
|
||||
efi->ConOut->OutputString(efi->ConOut, efibuf);
|
||||
n = 0;
|
||||
@@ -57,7 +58,7 @@ static void disable_hpet(void)
|
||||
{
|
||||
uint64_t *hpet = (uint64_t *)0xfed00000L;
|
||||
|
||||
hpet[32] &= ~4;
|
||||
hpet[32] &= ~4ULL;
|
||||
}
|
||||
|
||||
/* FIXME: if you check the generated code, "ms_abi" calls like this
|
||||
@@ -67,29 +68,31 @@ static void disable_hpet(void)
|
||||
*/
|
||||
uintptr_t __abi efi_entry(void *img_handle, struct efi_system_table *sys_tab)
|
||||
{
|
||||
(void)img_handle;
|
||||
|
||||
efi = sys_tab;
|
||||
z_putchar = efi_putchar;
|
||||
printf("*** Zephyr EFI Loader ***\n");
|
||||
|
||||
for (int i = 0; i < sizeof(zefi_zsegs)/sizeof(zefi_zsegs[0]); i++) {
|
||||
int bytes = zefi_zsegs[i].sz;
|
||||
for (size_t i = 0; i < (sizeof(zefi_zsegs)/sizeof(zefi_zsegs[0])); i++) {
|
||||
uint32_t bytes = zefi_zsegs[i].sz;
|
||||
uint8_t *dst = (uint8_t *)zefi_zsegs[i].addr;
|
||||
|
||||
printf("Zeroing %d bytes of memory at %p\n", bytes, dst);
|
||||
for (int j = 0; j < bytes; j++) {
|
||||
printf("Zeroing %u bytes of memory at %p\n", bytes, dst);
|
||||
for (uint32_t j = 0; j < bytes; j++) {
|
||||
dst[j] = 0U;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < sizeof(zefi_dsegs)/sizeof(zefi_dsegs[0]); i++) {
|
||||
int bytes = zefi_dsegs[i].sz;
|
||||
int off = zefi_dsegs[i].off;
|
||||
for (size_t i = 0; i < (sizeof(zefi_dsegs)/sizeof(zefi_dsegs[0])); i++) {
|
||||
uint32_t bytes = zefi_dsegs[i].sz;
|
||||
uint32_t off = zefi_dsegs[i].off;
|
||||
uint8_t *dst = (uint8_t *)zefi_dsegs[i].addr;
|
||||
uint8_t *src = &((uint8_t *)EXT_DATA_START)[off];
|
||||
|
||||
printf("Copying %d data bytes to %p from image offset %d\n",
|
||||
printf("Copying %u data bytes to %p from image offset %u\n",
|
||||
bytes, dst, zefi_dsegs[i].off);
|
||||
for (int j = 0; j < bytes; j++) {
|
||||
for (uint32_t j = 0; j < bytes; j++) {
|
||||
dst[j] = src[j];
|
||||
}
|
||||
|
||||
@@ -101,7 +104,7 @@ uintptr_t __abi efi_entry(void *img_handle, struct efi_system_table *sys_tab)
|
||||
* starts, because the very first thing it does is
|
||||
* install its own page table that disallows writes.
|
||||
*/
|
||||
if (((long)dst & 0xfff) == 0 && dst < (uint8_t *)0x100000L) {
|
||||
if ((((uintptr_t)dst & 0xfff) == 0) && ((uintptr_t)dst < 0x100000ULL)) {
|
||||
for (int i = 0; i < 8; i++) {
|
||||
dst[i] = 0x90; /* 0x90 == 1-byte NOP */
|
||||
}
|
||||
@@ -120,7 +123,7 @@ uintptr_t __abi efi_entry(void *img_handle, struct efi_system_table *sys_tab)
|
||||
* to drain before we start banging on the same UART from the
|
||||
* OS.
|
||||
*/
|
||||
for (volatile int i = 0; i < 50000000; i++) {
|
||||
for (volatile int i = 0; i < 50000000; i += 1) {
|
||||
}
|
||||
|
||||
__asm__ volatile("cli; jmp *%0" :: "r"(code));
|
||||
|
||||
@@ -14,6 +14,13 @@ config QEMU_TARGET
|
||||
Mark all QEMU targets with this variable for checking whether we are
|
||||
running in an emulated environment.
|
||||
|
||||
config NET_DRIVERS
|
||||
bool
|
||||
default y if QEMU_TARGET && NETWORKING
|
||||
help
|
||||
When building for a qemu target then NET_DRIVERS will be default
|
||||
enabled to allow for easy use of SLIP or PPP
|
||||
|
||||
# Note: $BOARD_DIR might be a glob pattern
|
||||
|
||||
choice
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
-prop=nsim_isa_vec64=1
|
||||
-dcache=65536,64,2,a
|
||||
-dcache_feature=2
|
||||
-dcache_uncached_region
|
||||
-dcache_mem_cycles=2
|
||||
-icache=65536,64,4,a
|
||||
-icache_feature=2
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
-prop=nsim_isa_vec64=1
|
||||
-dcache=65536,64,2,a
|
||||
-dcache_feature=2
|
||||
-dcache_uncached_region
|
||||
-dcache_mem_cycles=2
|
||||
-icache=65536,64,4,a
|
||||
-icache_feature=2
|
||||
|
||||
@@ -11,7 +11,6 @@ CONFIG_HW_STACK_PROTECTION=y
|
||||
# enable peripherals
|
||||
CONFIG_GPIO=y
|
||||
CONFIG_SERIAL=y
|
||||
CONFIG_CONSOLE=y
|
||||
|
||||
# enable sam-ba bootloader on legacy mode
|
||||
CONFIG_BOOTLOADER_BOSSA=y
|
||||
|
||||
@@ -255,9 +255,9 @@ Debugging
|
||||
STM32U5 support is not currently supported in openocd. As a temporary workaround,
|
||||
user can use `STMicroelectronics customized version of OpenOCD`_ to debug the
|
||||
the B_U585I_IOT02A Discovery kit.
|
||||
For this you need to fetch this repo and build openocd following the instructions
|
||||
provided in the README of the project. Then, build zephyr project indicating the
|
||||
openocd location in west build command.
|
||||
For this you need to fetch this repo, checkout branch "openocd-cubeide-r3" and
|
||||
build openocd following the instructions provided in the README of the project.
|
||||
Then, build zephyr project indicating the openocd location in west build command.
|
||||
|
||||
Here is an example for the :ref:`blinky-sample` application.
|
||||
|
||||
@@ -269,24 +269,18 @@ Here is an example for the :ref:`blinky-sample` application.
|
||||
|
||||
Then, indicate openocd as the chosen runner in flash and debug commands:
|
||||
|
||||
.. zephyr-app-commands::
|
||||
:zephyr-app: samples/basic/blinky
|
||||
:board: b_u585i_iot02a
|
||||
:gen-args: -r openocd
|
||||
:goals: flash
|
||||
|
||||
.. zephyr-app-commands::
|
||||
:zephyr-app: samples/basic/blinky
|
||||
:board: nucleo_u575zi_q
|
||||
:gen-args: -r openocd
|
||||
:goals: debug
|
||||
.. code-block:: console
|
||||
|
||||
$ west flash -r openocd
|
||||
$ west debug -r openocd
|
||||
|
||||
|
||||
.. _B U585I IOT02A Discovery kit website:
|
||||
https://www.st.com/en/evaluation-tools/b-u585i-iot02a.html
|
||||
|
||||
.. _B U585I IOT02A board User Manual:
|
||||
https://www.st.com/resource/en/user_manual/dm00698410.pdf
|
||||
https://www.st.com/resource/en/user_manual/um2839-discovery-kit-for-iot-node-with-stm32u5-series-stmicroelectronics.pdf
|
||||
|
||||
.. _STM32U585 on www.st.com:
|
||||
https://www.st.com/en/microcontrollers-microprocessors/stm32u575-585.html
|
||||
|
||||
BIN
boards/arm/bl5340_dvk/doc/img/bl5340_dvk_top.png
Normal file
BIN
boards/arm/bl5340_dvk/doc/img/bl5340_dvk_top.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 59 KiB |
@@ -43,8 +43,8 @@ This development kit has the following features:
|
||||
* :abbr:`USB (Universal Serial Bus)`
|
||||
* :abbr:`WDT (Watchdog Timer)`
|
||||
|
||||
.. figure:: img/bl5340_dvk_front.jpg
|
||||
:width: 800px
|
||||
.. figure:: img/bl5340_dvk_top.png
|
||||
:width: 340px
|
||||
:align: center
|
||||
:alt: BL5340 DVK
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
CONFIG_SOC_LPC54114_M0=y
|
||||
CONFIG_SOC_SERIES_LPC54XXX=y
|
||||
CONFIG_BOARD_LPCXPRESSO54114_M0=y
|
||||
CONFIG_CONSOLE=y
|
||||
CONFIG_USE_SEGGER_RTT=y
|
||||
CONFIG_SERIAL=n
|
||||
CONFIG_CORTEX_M_SYSTICK=y
|
||||
|
||||
@@ -13,9 +13,6 @@ CONFIG_HW_STACK_PROTECTION=y
|
||||
# enable GPIO
|
||||
CONFIG_GPIO=y
|
||||
|
||||
# enable console
|
||||
CONFIG_CONSOLE=y
|
||||
|
||||
# additional board options
|
||||
CONFIG_GPIO_AS_PINRESET=y
|
||||
CONFIG_NFCT_PINS_AS_GPIOS=y
|
||||
|
||||
@@ -12,4 +12,10 @@ config SPI_STM32_INTERRUPT
|
||||
default y
|
||||
depends on SPI
|
||||
|
||||
# FIXME: LSE not working as LPTIM clock source. Use LSI instead.
|
||||
choice STM32_LPTIM_CLOCK
|
||||
default STM32_LPTIM_CLOCK_LSI
|
||||
depends on STM32_LPTIM_TIMER
|
||||
endchoice
|
||||
|
||||
endif # BOARD_NUCLEO_L073RZ
|
||||
|
||||
@@ -240,9 +240,9 @@ Debugging
|
||||
STM32U5 support is not currently supported in openocd. As a temporary workaround,
|
||||
user can use `STMicroelectronics customized version of OpenOCD`_ to debug the
|
||||
the Nucleo U575ZI Q.
|
||||
For this you need to fetch this repo and build openocd following the instructions
|
||||
provided in the README of the project. Then, build zephyr project indicating the
|
||||
openocd location in west build command.
|
||||
For this you need to fetch this repo, checkout branch "openocd-cubeide-r3" and
|
||||
build openocd following the instructions provided in the README of the project.
|
||||
Then, build zephyr project indicating the openocd location in west build command.
|
||||
|
||||
Here is an example for the :ref:`blinky-sample` application.
|
||||
|
||||
@@ -254,17 +254,11 @@ Here is an example for the :ref:`blinky-sample` application.
|
||||
|
||||
Then, indicate openocd as the chosen runner in flash and debug commands:
|
||||
|
||||
.. zephyr-app-commands::
|
||||
:zephyr-app: samples/basic/blinky
|
||||
:board: nucleo_u575zi_q
|
||||
:gen-args: -r openocd
|
||||
:goals: flash
|
||||
|
||||
.. zephyr-app-commands::
|
||||
:zephyr-app: samples/basic/blinky
|
||||
:board: nucleo_u575zi_q
|
||||
:gen-args: -r openocd
|
||||
:goals: debug
|
||||
.. code-block:: console
|
||||
|
||||
$ west flash -r openocd
|
||||
$ west debug -r openocd
|
||||
|
||||
|
||||
.. _STM32 Nucleo-144 board User Manual:
|
||||
|
||||
@@ -12,6 +12,3 @@ CONFIG_HW_STACK_PROTECTION=y
|
||||
|
||||
# Enable GPIO
|
||||
CONFIG_GPIO=y
|
||||
|
||||
# Enable console
|
||||
CONFIG_CONSOLE=y
|
||||
|
||||
@@ -70,14 +70,14 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio,
|
||||
} \
|
||||
static inline int name##_body(void)
|
||||
|
||||
#define ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_HEADER() do { } while (false)
|
||||
#define ARCH_ISR_DIRECT_FOOTER(a) do { } while (false)
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern void posix_irq_check_idle_exit(void);
|
||||
#define ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#else
|
||||
#define ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_PM() do { } while (false)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
@@ -70,14 +70,14 @@ void posix_irq_priority_set(unsigned int irq, unsigned int prio,
|
||||
} \
|
||||
static inline int name##_body(void)
|
||||
|
||||
#define ARCH_ISR_DIRECT_HEADER() do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_FOOTER(a) do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_HEADER() do { } while (false)
|
||||
#define ARCH_ISR_DIRECT_FOOTER(a) do { } while (false)
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern void posix_irq_check_idle_exit(void);
|
||||
#define ARCH_ISR_DIRECT_PM() posix_irq_check_idle_exit()
|
||||
#else
|
||||
#define ARCH_ISR_DIRECT_PM() do { } while (0)
|
||||
#define ARCH_ISR_DIRECT_PM() do { } while (false)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
BIN
boards/x86/acrn/doc/ACRN-Hybrid.png
Normal file
BIN
boards/x86/acrn/doc/ACRN-Hybrid.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 113 KiB |
@@ -5,6 +5,28 @@ Zephyr's is capable of running as a guest under the x86 ACRN
|
||||
hypervisor (see https://projectacrn.org/). The process for getting
|
||||
this to work is somewhat involved, however.
|
||||
|
||||
ACRN hypervisor supports a hybrid scenario where Zephyr runs in a so-
|
||||
called "pre-launched" mode. This means Zephyr will access the ACRN
|
||||
hypervisor directly without involving the SOS VM. This is the most
|
||||
practical user scenario in the real world because Zephyr's real-time
|
||||
and safety capability can be assured without influence from other
|
||||
VMs. The following figure from ACRN's official documentation shows
|
||||
how a hybrid scenario works:
|
||||
|
||||
.. figure:: ACRN-Hybrid.png
|
||||
:align: center
|
||||
:alt: ACRN Hybrid User Scenario
|
||||
:figclass: align-center
|
||||
:width: 80%
|
||||
|
||||
ACRN Hybrid User Scenario
|
||||
|
||||
In this tutorial, we will show you how to build a minimal running instance of Zephyr
|
||||
and ACRN hypervisor to demonstrate that it works successfully. To learn more about
|
||||
other features of ACRN, such as building and using the SOS VM or other guest VMs,
|
||||
please refer to the Getting Started Guide for ACRN:
|
||||
https://projectacrn.github.io/latest/tutorials/using_hybrid_mode_on_nuc.html
|
||||
|
||||
Build your Zephyr App
|
||||
*********************
|
||||
|
||||
@@ -15,6 +37,10 @@ normally would, selecting an appropriate board:
|
||||
|
||||
west build -b acrn_ehl_crb samples/hello_world
|
||||
|
||||
In this tutorial, we will use the Intel Elkhart Lake Reference Board
|
||||
(`EHL`_ CRB) since it is one of the suggested platform for this
|
||||
type of scenario. Use ``acrn_ehl_crb`` as the target board parameter.
|
||||
|
||||
Note the kconfig output in ``build/zephyr/.config``, you will need to
|
||||
reference that to configure ACRN later.
|
||||
|
||||
@@ -31,6 +57,9 @@ First you need the source code, clone from:
|
||||
|
||||
git clone https://github.com/projectacrn/acrn-hypervisor
|
||||
|
||||
We suggest that you use versions v2.5.1 or later of the ACRN hypervisor
|
||||
as they have better support for SMP in Zephyr.
|
||||
|
||||
Like Zephyr, ACRN favors build-time configuration management instead
|
||||
of runtime probing or control. Unlike Zephyr, ACRN has single large
|
||||
configuration files instead of small easily-merged configuration
|
||||
@@ -102,10 +131,51 @@ many CPUs in the ``<cpu_affinity>`` tag. For example:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<cpu_affinity>
|
||||
<pcpu_id>0</pcpu_id>
|
||||
<pcpu_id>1</pcpu_id>
|
||||
</cpu_affinity>
|
||||
<vm id="0">
|
||||
<vm_type>SAFETY_VM</vm_type>
|
||||
<name>ACRN PRE-LAUNCHED VM0</name>
|
||||
<guest_flags>
|
||||
<guest_flag>0</guest_flag>
|
||||
</guest_flags>
|
||||
<cpu_affinity>
|
||||
<pcpu_id>0</pcpu_id>
|
||||
<pcpu_id>1</pcpu_id>
|
||||
</cpu_affinity>
|
||||
...
|
||||
<clos>
|
||||
<vcpu_clos>0</vcpu_clos>
|
||||
<vcpu_clos>0</vcpu_clos>
|
||||
</clos>
|
||||
...
|
||||
</vm>
|
||||
|
||||
To use SMP, we have to change the pcpu_id of VM0 to 0 and 1.
|
||||
This configures ACRN to run Zephyr on CPU0 and CPU1. The ACRN hypervisor
|
||||
and Zephyr application will not boot successfully without this change.
|
||||
If you plan to run Zephyr with one CPU only, you can skip it.
|
||||
|
||||
Since Zephyr is using CPU0 and CPU1, we also have to change
|
||||
VM1's configuration so it runs on CPU2 and CPU3. If your ACRN set up has
|
||||
additional VMs, you should change their configurations as well.
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<vm id="1">
|
||||
<vm_type>SOS_VM</vm_type>
|
||||
<name>ACRN SOS VM</name>
|
||||
<guest_flags>
|
||||
<guest_flag>0</guest_flag>
|
||||
</guest_flags>
|
||||
<cpu_affinity>
|
||||
<pcpu_id>2</pcpu_id>
|
||||
<pcpu_id>3</pcpu_id>
|
||||
</cpu_affinity>
|
||||
<clos>
|
||||
<vcpu_clos>0</vcpu_clos>
|
||||
<vcpu_clos>0</vcpu_clos>
|
||||
</clos>
|
||||
...
|
||||
</vm>
|
||||
|
||||
Note that these indexes are physical CPUs on the host. When
|
||||
configuring multiple guests, you probably don't want to overlap these
|
||||
@@ -228,3 +298,6 @@ command:
|
||||
----- Entering VM 0 Shell -----
|
||||
*** Booting Zephyr OS build v2.6.0-rc1-324-g1a03783861ad ***
|
||||
Hello World! acrn
|
||||
|
||||
|
||||
.. _EHL: https://www.intel.com/content/www/us/en/products/docs/processors/embedded/enhanced-for-iot-platform-brief.html
|
||||
|
||||
@@ -489,10 +489,7 @@ zephyr_boilerplate_watch(CONF_FILE)
|
||||
|
||||
if(DTC_OVERLAY_FILE)
|
||||
# DTC_OVERLAY_FILE has either been specified on the cmake CLI or is already
|
||||
# in the CMakeCache.txt. This has precedence over the environment
|
||||
# variable DTC_OVERLAY_FILE
|
||||
elseif(DEFINED ENV{DTC_OVERLAY_FILE})
|
||||
set(DTC_OVERLAY_FILE $ENV{DTC_OVERLAY_FILE})
|
||||
# in the CMakeCache.txt.
|
||||
elseif(APP_BOARD_DTS)
|
||||
set(DTC_OVERLAY_FILE ${APP_BOARD_DTS})
|
||||
elseif(EXISTS ${APPLICATION_SOURCE_DIR}/${BOARD}.overlay)
|
||||
|
||||
26
cmake/compiler/compiler_features.cmake
Normal file
26
cmake/compiler/compiler_features.cmake
Normal file
@@ -0,0 +1,26 @@
|
||||
set(c23id c2x gnu2x)
|
||||
set(c17id c17 c18 gnu17 gnu18 "iso9899:2017" "iso9899:2018")
|
||||
set(c11id c11 gnu11 "iso9899:2011")
|
||||
set(c99id c99 gnu99 "iso9899:1999")
|
||||
set(c90id c89 c90 gnu89 gnu90 "iso9899:1990" "iso9899:199409")
|
||||
|
||||
set(compile_features_list)
|
||||
|
||||
# For each id value above a compile_features_${idval} with a list of supported
|
||||
# `c_std_XX` values are created for easy lookup.
|
||||
# For example, the settings
|
||||
# - `compile_feature_c99` will contain `c_std_90;c_std_99`
|
||||
# - `compile_feature_iso9899:2011` will contain `c_std_90;c_std_99;c_std_11`
|
||||
# that can then be used to set CMAKE_C_COMPILE_FEATURES accordingly.
|
||||
foreach(standard 90 99 11 17 23)
|
||||
list(APPEND compile_features_list c_std_${standard})
|
||||
foreach(id ${c${standard}id})
|
||||
set(compile_features_${id} ${compile_features_list})
|
||||
endforeach()
|
||||
endforeach()
|
||||
|
||||
set(compile_features_cpp98 cxx_std_98)
|
||||
set(compile_features_cpp11 cxx_std_11 ${compile_features_cpp98})
|
||||
set(compile_features_cpp14 cxx_std_14 ${compile_features_cpp11})
|
||||
set(compile_features_cpp17 cxx_std_17 ${compile_features_cpp14})
|
||||
set(compile_features_cpp20 cxx_std_20 ${compile_features_cpp17})
|
||||
@@ -24,6 +24,11 @@ set(TOOLCHAIN_SIGNATURE ${CMAKE_C_COMPILER_MD5_SUM})
|
||||
string(MD5 COMPILER_SIGNATURE ${CMAKE_C_COMPILER}_${CMAKE_C_COMPILER_ID}_${CMAKE_C_COMPILER_VERSION})
|
||||
set(TOOLCHAIN_SIGNATURE ${TOOLCHAIN_SIGNATURE}_${COMPILER_SIGNATURE})
|
||||
|
||||
# Load the compile features file which will provide compile features lists for
|
||||
# various C / CXX language dialects that can then be exported based on current
|
||||
# Zephyr Kconfig settings or the CSTD global property.
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/compiler/compiler_features.cmake)
|
||||
|
||||
# Loading of templates are strictly not needed as they does not set any
|
||||
# properties.
|
||||
# They purely provides an overview as well as a starting point for supporting
|
||||
|
||||
95
doc/_static/css/custom.css
vendored
95
doc/_static/css/custom.css
vendored
@@ -124,64 +124,27 @@ hr,
|
||||
border-color: var(--admonition-attention-title-background-color) !important;
|
||||
color: var(--admonition-attention-color) !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils).class dt,
|
||||
.rst-content dl:not(.docutils).function dt,
|
||||
.rst-content dl:not(.docutils).method dt,
|
||||
.rst-content dl:not(.docutils).attribute dt {
|
||||
width: 100% !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils).class > dt,
|
||||
.rst-content dl:not(.docutils).function > dt,
|
||||
.rst-content dl:not(.docutils).method > dt,
|
||||
.rst-content dl:not(.docutils).attribute > dt {
|
||||
font-size: 100% !important;
|
||||
font-weight: normal !important;
|
||||
margin-bottom: 16px !important;
|
||||
padding: 6px 8px !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils) tt.descclassname,
|
||||
.rst-content dl:not(.docutils) code.descclassname {
|
||||
color: var(--highlight-type2-color) !important;
|
||||
font-weight: normal !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils) tt.descname,
|
||||
.rst-content dl:not(.docutils) code.descname {
|
||||
color: var(--highlight-function-color) !important;
|
||||
font-weight: normal !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils) .sig-paren,
|
||||
.rst-content dl:not(.docutils) .optional {
|
||||
color: var(--highlight-operator-color) !important;
|
||||
font-weight: normal !important;
|
||||
padding: 0 2px !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils) .optional {
|
||||
font-style: italic !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils) .sig-param,
|
||||
.rst-content dl:not(.docutils).class dt > em,
|
||||
.rst-content dl:not(.docutils).function dt > em,
|
||||
.rst-content dl:not(.docutils).method dt > em {
|
||||
color: var(--code-literal-color) !important;
|
||||
|
||||
.rst-content dt.sig .k {
|
||||
color: var(--highlight-keyword2-color) !important;
|
||||
font-style: normal !important;
|
||||
padding: 0 4px !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils) .sig-param,
|
||||
.rst-content dl:not(.docutils).class dt > code,
|
||||
.rst-content dl:not(.docutils).function dt > code,
|
||||
.rst-content dl:not(.docutils).method dt > code {
|
||||
padding: 0 4px !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils) .sig-param,
|
||||
.rst-content dl:not(.docutils).class dt > .optional ~ em,
|
||||
.rst-content dl:not(.docutils).function dt > .optional ~ em,
|
||||
.rst-content dl:not(.docutils).method dt > .optional ~ em {
|
||||
color: var(--highlight-number-color) !important;
|
||||
font-style: italic !important;
|
||||
}
|
||||
.rst-content dl:not(.docutils).class dt > em.property {
|
||||
|
||||
.rst-content dt.sig .kt {
|
||||
color: var(--highlight-keyword-color) !important;
|
||||
font-style: normal !important;
|
||||
}
|
||||
|
||||
.rst-content dt.sig .sig-name .n {
|
||||
color: var(--highlight-function-color) !important;
|
||||
}
|
||||
|
||||
.rst-content dt.sig .k,
|
||||
.rst-content dt.sig .kt,
|
||||
.rst-content dt.sig .n {
|
||||
font-weight: normal !important;
|
||||
}
|
||||
|
||||
.rst-content dl:not(.docutils) dt a.headerlink {
|
||||
color: var(--link-color) !important;
|
||||
}
|
||||
@@ -878,16 +841,8 @@ kbd, .kbd {
|
||||
|
||||
/* Breathe tweaks */
|
||||
|
||||
.rst-content dl.group>dt, .rst-content dl.group>dd>p {
|
||||
display:none !important;
|
||||
}
|
||||
|
||||
.rst-content dl.group {
|
||||
margin: 0 0 1rem 0;
|
||||
}
|
||||
|
||||
.rst-content dl.group>dd {
|
||||
margin-left: 0 !important;
|
||||
.rst-content .section > dl > dd {
|
||||
margin-left: 0;
|
||||
}
|
||||
|
||||
.rst-content p.breathe-sectiondef-title {
|
||||
@@ -895,24 +850,12 @@ kbd, .kbd {
|
||||
color: var(--link-color);
|
||||
}
|
||||
|
||||
.rst-content div.breathe-sectiondef {
|
||||
padding-left: 0 !important;
|
||||
}
|
||||
|
||||
.rst-content dl:not(.docutils) dl:not(.rst-other-versions) dt {
|
||||
background: var(--admonition-note-background-color) !important;
|
||||
border-top: none !important;
|
||||
border-left: none !important;
|
||||
}
|
||||
|
||||
.rst-content dl:not(.docutils).c.var .pre {
|
||||
padding-right: 4px;
|
||||
}
|
||||
|
||||
.rst-content dl:not(.docutils).c.struct .property {
|
||||
padding-right: 4px !important;
|
||||
}
|
||||
|
||||
/* Misc tweaks */
|
||||
|
||||
.rst-columns {
|
||||
|
||||
@@ -8,14 +8,14 @@ external organization. Several of these are available.
|
||||
|
||||
.. _toolchain_gnuarmemb:
|
||||
|
||||
GNU ARM Embedded
|
||||
GNU Arm Embedded
|
||||
****************
|
||||
|
||||
.. warning::
|
||||
|
||||
Do not install the toolchain into a path with spaces.
|
||||
|
||||
#. Download and install a `GNU ARM Embedded`_ build for your operating system
|
||||
#. Download and install a `GNU Arm Embedded`_ build for your operating system
|
||||
and extract it on your file system.
|
||||
|
||||
.. note::
|
||||
@@ -61,6 +61,57 @@ GNU ARM Embedded
|
||||
- Set :envvar:`ZEPHYR_TOOLCHAIN_VARIANT` to ``gnuarmemb``.
|
||||
- Set :envvar:`GNUARMEMB_TOOLCHAIN_PATH` to the brew installation directory (something like ``/usr/local``)
|
||||
|
||||
.. _toolchain_armclang:
|
||||
|
||||
Arm Compiler 6
|
||||
**************
|
||||
|
||||
#. Download and install a development suite containing the `Arm Compiler 6`_
|
||||
for your operating system.
|
||||
|
||||
#. :ref:`Set these environment variables <env_vars>`:
|
||||
|
||||
- Set :envvar:`ZEPHYR_TOOLCHAIN_VARIANT` to ``armclang``.
|
||||
- Set :envvar:`ARMCLANG_TOOLCHAIN_PATH` to the toolchain installation
|
||||
directory.
|
||||
|
||||
#. The Arm Compiler 6 needs the :envvar:`ARMLMD_LICENSE_FILE` environment
|
||||
variable to point to your license file or server.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# Linux, macOS, license file:
|
||||
export ARMLMD_LICENSE_FILE=/<path>/license_armds.dat
|
||||
# Linux, macOS, license server:
|
||||
export ARMLMD_LICENSE_FILE=8224@myserver
|
||||
|
||||
# Windows, license file:
|
||||
> set ARMLMD_LICENSE_FILE=c:\<path>\license_armds.dat
|
||||
# Windows, license server:
|
||||
> set ARMLMD_LICENSE_FILE=8224@myserver
|
||||
|
||||
#. If the Arm Compiler 6 was installed as part of an Arm Development Studio, then
|
||||
you must set the :envvar:`ARM_PRODUCT_DEF` to point to the product definition file:
|
||||
See also: `Product and toolkit configuration <https://developer.arm.com/tools-and-software/software-development-tools/license-management/resources/product-and-toolkit-configuration>`_.
|
||||
For example if the Arm Development Studio is installed in:
|
||||
``/opt/armds-2020-1`` with a Gold license, then set :envvar:`ARM_PRODUCT_DEF`
|
||||
to point to ``/opt/armds-2020-1/gold.elmap``.
|
||||
|
||||
.. note::
|
||||
|
||||
The Arm Compiler 6 uses ``armlink`` for linking. This is incompatible
|
||||
with Zephyr's linker script template, which works with GNU ld. Zephyr's
|
||||
Arm Compiler 6 support Zephyr's CMake linker script generator, which
|
||||
supports generating scatter files. Basic scatter file support is in
|
||||
place, but there are still areas covered in ld templates which are not
|
||||
fully supported by the CMake linker script generator.
|
||||
|
||||
Some Zephyr subsystems or modules may also contain C or assembly code
|
||||
that relies on GNU intrinsics and have not yet been updated to work fully
|
||||
with ``armclang``.
|
||||
|
||||
Intel oneAPI Toolkit
|
||||
*********************
|
||||
|
||||
@@ -159,5 +210,6 @@ You can build toolchains from source code using crosstool-NG.
|
||||
$ echo $XTOOLS_TOOLCHAIN_PATH
|
||||
/Volumes/CrossToolNGNew/build/output/
|
||||
|
||||
.. _GNU ARM Embedded: https://developer.arm.com/open-source/gnu-toolchain/gnu-rm
|
||||
.. _GNU Arm Embedded: https://developer.arm.com/open-source/gnu-toolchain/gnu-rm
|
||||
.. _crosstool-ng site: http://crosstool-ng.org
|
||||
.. _Arm Compiler 6: https://developer.arm.com/tools-and-software/embedded/arm-compiler/downloads/version-6
|
||||
|
||||
@@ -41,7 +41,7 @@ Legend:
|
||||
+---------------------------------------------------------------------+------------+-------------+--------+------------+
|
||||
| Hardware floating point unit (FPU) | Y | Y | N | TBD |
|
||||
+---------------------------------------------------------------------+------------+-------------+--------+------------+
|
||||
| Symmetric multiprocessing (SMP) support, switch-based | N/A | Y | TBD | WIP |
|
||||
| Symmetric multiprocessing (SMP) support, switch-based | N/A | Y | TBD | Y |
|
||||
+---------------------------------------------------------------------+------------+-------------+--------+------------+
|
||||
| Hardware-assisted stack checking | Y | Y | TBD | N |
|
||||
+---------------------------------------------------------------------+------------+-------------+--------+------------+
|
||||
|
||||
@@ -36,11 +36,21 @@ In order to use MCUboot with Zephyr you need to take the following into account:
|
||||
|
||||
1. You will need to define the flash partitions required by MCUboot; see
|
||||
:ref:`flash_map_api` for details.
|
||||
2. Your application's :file:`.conf` file needs to enable the
|
||||
2. You will have to specify your flash parition as the chosen code partition
|
||||
|
||||
.. code-block:: devicetree
|
||||
|
||||
/ {
|
||||
chosen {
|
||||
zephyr,code-partition = &slot0_partition;
|
||||
};
|
||||
};
|
||||
|
||||
3. Your application's :file:`.conf` file needs to enable the
|
||||
:kconfig:`CONFIG_BOOTLOADER_MCUBOOT` Kconfig option in order for Zephyr to
|
||||
be built in an MCUboot-compatible manner
|
||||
3. You need to build and flash MCUboot itself on your device
|
||||
4. You might need to take precautions to avoid mass erasing the flash and also
|
||||
4. You need to build and flash MCUboot itself on your device
|
||||
5. You might need to take precautions to avoid mass erasing the flash and also
|
||||
to flash the Zephyr application image at the correct offset (right after the
|
||||
bootloader)
|
||||
|
||||
|
||||
@@ -210,7 +210,6 @@ Here are some ways to set it:
|
||||
(``-DDTC_OVERLAY_FILE="file1.overlay;file2.overlay"``)
|
||||
#. with the CMake ``set()`` command in the application ``CMakeLists.txt``,
|
||||
before including zephyr's :file:`boilerplate.cmake` file
|
||||
#. using a ``DTC_OVERLAY_FILE`` environment variable (deprecated)
|
||||
#. create a ``boards/<BOARD>_<revision>.overlay`` file in the application
|
||||
folder for the current board revision. This requires that the board supports
|
||||
multiple revisions, see :ref:`porting_board_revisions`.
|
||||
|
||||
@@ -254,6 +254,8 @@ channels (e.g. ADC or DAC channels) for conversion.
|
||||
|
||||
.. doxygengroup:: devicetree-io-channels
|
||||
|
||||
.. _devicetree-pinctrl-api:
|
||||
|
||||
Pinctrl (pin control)
|
||||
=====================
|
||||
|
||||
|
||||
@@ -202,10 +202,11 @@ in an asynchronous manner.
|
||||
.. note::
|
||||
A message queue can be used to transfer large data items, if desired.
|
||||
However, this can increase interrupt latency as interrupts are locked
|
||||
while a data item is written or read. It is usually preferable to transfer
|
||||
large data items by exchanging a pointer to the data item, rather than the
|
||||
data item itself. The kernel's memory map and memory pool object types
|
||||
can be helpful for data transfers of this sort.
|
||||
while a data item is written or read. The time to write or read a data item
|
||||
increases linearly with its size since the item is copied in its entirety
|
||||
to or from the buffer in memory. For this reason, it is usually preferable
|
||||
to transfer large data items by exchanging a pointer to the data item,
|
||||
rather than the data item itself.
|
||||
|
||||
A synchronous transfer can be achieved by using the kernel's mailbox
|
||||
object type.
|
||||
|
||||
@@ -164,8 +164,7 @@ Use a pipe to send streams of data between threads.
|
||||
.. note::
|
||||
A pipe can be used to transfer long streams of data if desired. However
|
||||
it is often preferable to send pointers to large data items to avoid
|
||||
copying the data. The kernel's memory map and memory pool object types
|
||||
can be helpful for data transfers of this sort.
|
||||
copying the data.
|
||||
|
||||
Configuration Options
|
||||
*********************
|
||||
|
||||
@@ -32,12 +32,12 @@ A stack must be initialized before it can be used. This sets its queue to empty.
|
||||
A data value can be **added** to a stack by a thread or an ISR.
|
||||
The value is given directly to a waiting thread, if one exists;
|
||||
otherwise the value is added to the LIFO's queue.
|
||||
The kernel does *not* detect attempts to add a data value to a stack
|
||||
that has already reached its maximum quantity of queued values.
|
||||
|
||||
.. note::
|
||||
Adding a data value to a stack that is already full will result in
|
||||
array overflow, and lead to unpredictable behavior.
|
||||
If :kconfig:`CONFIG_NO_RUNTIME_CHECKS` is enabled, the kernel will *not* detect
|
||||
and prevent attempts to add a data value to a stack that has already reached
|
||||
its maximum quantity of queued values. Adding a data value to a stack that is
|
||||
already full will result in array overflow, and lead to unpredictable behavior.
|
||||
|
||||
A data value may be **removed** from a stack by a thread.
|
||||
If the stack's queue is empty a thread may choose to wait for it to be given.
|
||||
|
||||
@@ -113,7 +113,7 @@ an extra 72 bytes of stack space where the callee-saved FP context can
|
||||
be saved.
|
||||
|
||||
`Lazy Stacking
|
||||
<http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0298a/DAFGGBJD.html>`_
|
||||
<https://developer.arm.com/documentation/dai0298/a>`_
|
||||
is currently enabled in Zephyr applications on ARM Cortex-M
|
||||
architecture, minimizing interrupt latency, when the floating
|
||||
point context is active.
|
||||
|
||||
@@ -39,6 +39,15 @@ The kernel's scheduler selects the highest priority ready thread
|
||||
to be the current thread. When multiple ready threads of the same priority
|
||||
exist, the scheduler chooses the one that has been waiting longest.
|
||||
|
||||
A thread's relative priority is primarily determined by its static priority.
|
||||
However, when both earliest-deadline-first scheduling is enabled
|
||||
(:kconfig:`CONFIG_SCHED_DEADLINE`) and a choice of threads have equal
|
||||
static priority, then the thread with the earlier deadline is considered
|
||||
to have the higher priority. Thus, when earliest-deadline-first scheduling is
|
||||
enabled, two threads are only considered to have the same priority when both
|
||||
their static priorities and deadlines are equal. The routine
|
||||
:c:func:`k_thread_deadline_set` is used to set a thread's deadline.
|
||||
|
||||
.. note::
|
||||
Execution of ISRs takes precedence over thread execution,
|
||||
so the execution of the current thread may be replaced by an ISR
|
||||
|
||||
@@ -37,6 +37,13 @@ Any number of threads may wait on an unavailable semaphore simultaneously.
|
||||
When the semaphore is given, it is taken by the highest priority thread
|
||||
that has waited longest.
|
||||
|
||||
.. note::
|
||||
You may initialize a "full" semaphore (count equal to limit) to limit the number
|
||||
of threads able to execute the critical section at the same time. You may also
|
||||
initialize an empty semaphore (count equal to 0, with a limit greater than 0)
|
||||
to create a gate through which no waiting thread may pass until the semaphore
|
||||
is incremented. All standard use cases of the common semaphore are supported.
|
||||
|
||||
.. note::
|
||||
The kernel does allow an ISR to take a semaphore, however the ISR must
|
||||
not attempt to wait if the semaphore is unavailable.
|
||||
|
||||
@@ -245,6 +245,10 @@ A thread's initial priority value can be altered up or down after the thread
|
||||
has been started. Thus it is possible for a preemptible thread to become
|
||||
a cooperative thread, and vice versa, by changing its priority.
|
||||
|
||||
.. note::
|
||||
The scheduler does not make heuristic decisions to re-prioritize threads.
|
||||
Thread priorities are set and changed only at the application's request.
|
||||
|
||||
The kernel supports a virtually unlimited number of thread priority levels.
|
||||
The configuration options :kconfig:`CONFIG_NUM_COOP_PRIORITIES` and
|
||||
:kconfig:`CONFIG_NUM_PREEMPT_PRIORITIES` specify the number of priority
|
||||
@@ -269,9 +273,10 @@ When enabled (see :kconfig:`CONFIG_NUM_METAIRQ_PRIORITIES`), there is a special
|
||||
subclass of cooperative priorities at the highest (numerically lowest)
|
||||
end of the priority space: meta-IRQ threads. These are scheduled
|
||||
according to their normal priority, but also have the special ability
|
||||
to preempt all other threads (and other meta-irq threads) at lower
|
||||
to preempt all other threads (and other meta-IRQ threads) at lower
|
||||
priorities, even if those threads are cooperative and/or have taken a
|
||||
scheduler lock.
|
||||
scheduler lock. Meta-IRQ threads are still threads, however,
|
||||
and can still be interrupted by any hardware interrupt.
|
||||
|
||||
This behavior makes the act of unblocking a meta-IRQ thread (by any
|
||||
means, e.g. creating it, calling k_sem_give(), etc.) into the
|
||||
@@ -284,7 +289,7 @@ run before the current CPU returns into application code.
|
||||
|
||||
Unlike similar features in other OSes, meta-IRQ threads are true
|
||||
threads and run on their own stack (which must be allocated normally),
|
||||
not the per-CPU interrupt stack. Design work to enable the use of the
|
||||
not the per-CPU interrupt stack. Design work to enable the use of the
|
||||
IRQ stack on supported architectures is pending.
|
||||
|
||||
Note that because this breaks the promise made to cooperative
|
||||
|
||||
61
doc/reference/usb/hid.rst
Normal file
61
doc/reference/usb/hid.rst
Normal file
@@ -0,0 +1,61 @@
|
||||
.. _usb_device_hid:
|
||||
|
||||
USB Human Interface Devices (HID) support
|
||||
#########################################
|
||||
|
||||
Since the USB HID specification is not only used by the USB subsystem, the USB HID API
|
||||
is split into two header files :zephyr_file:`include/usb/class/hid.h`
|
||||
and :zephyr_file:`include/usb/class/usb_hid.h`. The second includes a specific
|
||||
part for HID support in the USB device stack.
|
||||
|
||||
HID Item helpers
|
||||
****************
|
||||
|
||||
HID item helper macros can be used to compose a HID Report Descriptor.
|
||||
The names correspond to those used in the USB HID Specification.
|
||||
|
||||
Example of a HID Report Descriptor:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
static const uint8_t hid_report_desc[] = {
|
||||
HID_USAGE_PAGE(HID_USAGE_GEN_DESKTOP),
|
||||
HID_USAGE(HID_USAGE_GEN_DESKTOP_UNDEFINED),
|
||||
HID_COLLECTION(HID_COLLECTION_APPLICATION),
|
||||
HID_LOGICAL_MIN8(0),
|
||||
/* logical maximum 255 */
|
||||
HID_LOGICAL_MAX16(0xFF, 0x00),
|
||||
HID_REPORT_ID(1),
|
||||
HID_REPORT_SIZE(8),
|
||||
HID_REPORT_COUNT(1),
|
||||
HID_USAGE(HID_USAGE_GEN_DESKTOP_UNDEFINED),
|
||||
/* HID_INPUT (Data, Variable, Absolute) */
|
||||
HID_INPUT(0x02),
|
||||
HID_END_COLLECTION,
|
||||
};
|
||||
|
||||
|
||||
HID items reference
|
||||
*******************
|
||||
|
||||
.. doxygengroup:: usb_hid_items
|
||||
|
||||
HID types reference
|
||||
*******************
|
||||
|
||||
.. doxygengroup:: usb_hid_types
|
||||
|
||||
HID Mouse and Keyboard report descriptors
|
||||
*****************************************
|
||||
|
||||
The pre-defined Mouse and Keyboard report descriptors can be used by
|
||||
a HID device implementation or simply as examples.
|
||||
|
||||
.. doxygengroup:: usb_hid_mk_report_desc
|
||||
|
||||
HID Class Device API reference
|
||||
******************************
|
||||
|
||||
USB HID devices like mouse, keyboard, or any other specific device use this API.
|
||||
|
||||
.. doxygengroup:: usb_hid_device_api
|
||||
@@ -1,252 +1,13 @@
|
||||
.. _usb_api:
|
||||
|
||||
USB device stack
|
||||
################
|
||||
USB device support
|
||||
##################
|
||||
|
||||
.. contents::
|
||||
:depth: 2
|
||||
:local:
|
||||
:backlinks: top
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
USB Vendor and Product identifiers
|
||||
**********************************
|
||||
|
||||
The USB Vendor ID for the Zephyr project is 0x2FE3. The default USB Product
|
||||
ID for the Zephyr project is 0x100. The USB bcdDevice Device Release Number
|
||||
represents the Zephyr kernel major and minor versions as a binary coded
|
||||
decimal value. When a vendor integrates the Zephyr USB subsystem into a
|
||||
product, the vendor must use the USB Vendor and Product ID assigned to them.
|
||||
A vendor integrating the Zephyr USB subsystem in a product must not use the
|
||||
Vendor ID of the Zephyr project.
|
||||
|
||||
The USB maintainer, if one is assigned, or otherwise the Zephyr Technical
|
||||
Steering Committee, may allocate other USB Product IDs based on well-motivated
|
||||
and documented requests.
|
||||
|
||||
Each USB sample has its own unique Product ID.
|
||||
When adding a new sample, add a new entry in :file:`samples/subsys/usb/usb_pid.Kconfig`
|
||||
and a Kconfig file inside your sample subdirectory.
|
||||
The following Product IDs are currently used:
|
||||
|
||||
* :kconfig:`CONFIG_USB_PID_CDC_ACM_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_CDC_ACM_COMPOSITE_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_HID_CDC_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_CONSOLE_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_DFU_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_HID_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_HID_MOUSE_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_MASS_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_TESTUSB_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_WEBUSB_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_BLE_HCI_H4_SAMPLE`
|
||||
|
||||
USB device controller drivers
|
||||
*****************************
|
||||
|
||||
The Device Controller Driver Layer implements the low level control routines
|
||||
to deal directly with the hardware. All device controller drivers should
|
||||
implement the APIs described in file usb_dc.h. This allows the integration of
|
||||
new USB device controllers to be done without changing the upper layers.
|
||||
|
||||
USB Device Controller API
|
||||
=========================
|
||||
|
||||
.. doxygengroup:: _usb_device_controller_api
|
||||
|
||||
USB device core layer
|
||||
*********************
|
||||
|
||||
The USB Device core layer is a hardware independent interface between USB
|
||||
device controller driver and USB device class drivers or customer applications.
|
||||
It's a port of the LPCUSB device stack. It provides the following
|
||||
functionalities:
|
||||
|
||||
* Responds to standard device requests and returns standard descriptors,
|
||||
essentially handling 'Chapter 9' processing, specifically the standard
|
||||
device requests in table 9-3 from the universal serial bus specification
|
||||
revision 2.0.
|
||||
* Provides a programming interface to be used by USB device classes or
|
||||
customer applications. The APIs are described in the usb_device.h file.
|
||||
* Uses the APIs provided by the device controller drivers to interact with
|
||||
the USB device controller.
|
||||
|
||||
|
||||
USB Device Core Layer API
|
||||
=========================
|
||||
|
||||
There are two ways to transmit data, using the 'low' level read/write API or
|
||||
the 'high' level transfer API.
|
||||
|
||||
Low level API
|
||||
To transmit data to the host, the class driver should call usb_write().
|
||||
Upon completion the registered endpoint callback will be called. Before
|
||||
sending another packet the class driver should wait for the completion of
|
||||
the previous write. When data is received, the registered endpoint callback
|
||||
is called. usb_read() should be used for retrieving the received data.
|
||||
For CDC ACM sample driver this happens via the OUT bulk endpoint handler
|
||||
(cdc_acm_bulk_out) mentioned in the endpoint array (cdc_acm_ep_data).
|
||||
|
||||
High level API
|
||||
The usb_transfer method can be used to transfer data to/from the host. The
|
||||
transfer API will automatically split the data transmission into one or more
|
||||
USB transaction(s), depending endpoint max packet size. The class driver does
|
||||
not have to implement endpoint callback and should set this callback to the
|
||||
generic usb_transfer_ep_callback.
|
||||
|
||||
.. doxygengroup:: _usb_device_core_api
|
||||
|
||||
USB device class drivers
|
||||
************************
|
||||
|
||||
Zephyr USB Device Stack supports many standard classes, such as HID, MSC
|
||||
Ethernet over USB, DFU, Bluetooth.
|
||||
|
||||
Implementing non standard USB class
|
||||
===================================
|
||||
|
||||
Configuration of USB Device is done in the stack layer.
|
||||
|
||||
The following structures and callbacks need to be defined:
|
||||
|
||||
* Part of USB Descriptor table
|
||||
* USB Endpoint configuration table
|
||||
* USB Device configuration structure
|
||||
* Endpoint callbacks
|
||||
* Optionally class, vendor and custom handlers
|
||||
|
||||
For example, for USB loopback application:
|
||||
|
||||
.. literalinclude:: ../../../subsys/usb/class/loopback.c
|
||||
:language: c
|
||||
:start-after: usb.rst config structure start
|
||||
:end-before: usb.rst config structure end
|
||||
:linenos:
|
||||
|
||||
Endpoint configuration:
|
||||
|
||||
.. literalinclude:: ../../../subsys/usb/class/loopback.c
|
||||
:language: c
|
||||
:start-after: usb.rst endpoint configuration start
|
||||
:end-before: usb.rst endpoint configuration end
|
||||
:linenos:
|
||||
|
||||
USB Device configuration structure:
|
||||
|
||||
.. literalinclude:: ../../../subsys/usb/class/loopback.c
|
||||
:language: c
|
||||
:start-after: usb.rst device config data start
|
||||
:end-before: usb.rst device config data end
|
||||
:linenos:
|
||||
|
||||
|
||||
The vendor device requests are forwarded by the USB stack core driver to the
|
||||
class driver through the registered vendor handler.
|
||||
|
||||
For the loopback class driver, :c:func:`loopback_vendor_handler` processes
|
||||
the vendor requests:
|
||||
|
||||
.. literalinclude:: ../../../subsys/usb/class/loopback.c
|
||||
:language: c
|
||||
:start-after: usb.rst vendor handler start
|
||||
:end-before: usb.rst vendor handler end
|
||||
:linenos:
|
||||
|
||||
The class driver waits for the :makevar:`USB_DC_CONFIGURED` device status code
|
||||
before transmitting any data.
|
||||
|
||||
.. _testing_USB_native_posix:
|
||||
|
||||
Testing USB over USP/IP in native_posix
|
||||
***************************************
|
||||
|
||||
Virtual USB controller implemented through USB/IP might be used to test USB
|
||||
Device stack. Follow general build procedure to build USB sample for
|
||||
the native_posix configuration.
|
||||
|
||||
Run built sample with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
west build -t run
|
||||
|
||||
In a terminal window, run the following command to list USB devices:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ usbip list -r localhost
|
||||
Exportable USB devices
|
||||
======================
|
||||
- 127.0.0.1
|
||||
1-1: unknown vendor : unknown product (2fe3:0100)
|
||||
: /sys/devices/pci0000:00/0000:00:01.2/usb1/1-1
|
||||
: (Defined at Interface level) (00/00/00)
|
||||
: 0 - Vendor Specific Class / unknown subclass / unknown protocol (ff/00/00)
|
||||
|
||||
In a terminal window, run the following command to attach USB device:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo usbip attach -r localhost -b 1-1
|
||||
|
||||
The USB device should be connected to your Linux host, and verified with the following commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo usbip port
|
||||
Imported USB devices
|
||||
====================
|
||||
Port 00: <Port in Use> at Full Speed(12Mbps)
|
||||
unknown vendor : unknown product (2fe3:0100)
|
||||
7-1 -> usbip://localhost:3240/1-1
|
||||
-> remote bus/dev 001/002
|
||||
$ lsusb -d 2fe3:0100
|
||||
Bus 007 Device 004: ID 2fe3:0100
|
||||
|
||||
USB Human Interface Devices (HID) support
|
||||
*****************************************
|
||||
|
||||
HID Item helpers
|
||||
================
|
||||
|
||||
HID item helper macros can be used to compose a HID Report Descriptor.
|
||||
The names correspond to those used in the USB HID Specification.
|
||||
|
||||
Example of a HID Report Descriptor:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
static const uint8_t hid_report_desc[] = {
|
||||
HID_USAGE_PAGE(HID_USAGE_GEN_DESKTOP),
|
||||
HID_USAGE(HID_USAGE_GEN_DESKTOP_UNDEFINED),
|
||||
HID_COLLECTION(HID_COLLECTION_APPLICATION),
|
||||
HID_LOGICAL_MIN8(0),
|
||||
/* logical maximum 255 */
|
||||
HID_LOGICAL_MAX16(0xFF, 0x00),
|
||||
HID_REPORT_ID(1),
|
||||
HID_REPORT_SIZE(8),
|
||||
HID_REPORT_COUNT(1),
|
||||
HID_USAGE(HID_USAGE_GEN_DESKTOP_UNDEFINED),
|
||||
/* HID_INPUT (Data, Variable, Absolute) */
|
||||
HID_INPUT(0x02),
|
||||
HID_END_COLLECTION,
|
||||
};
|
||||
|
||||
|
||||
.. doxygengroup:: usb_hid_items
|
||||
|
||||
.. doxygengroup:: usb_hid_types
|
||||
|
||||
HID Mouse and Keyboard report descriptors
|
||||
=========================================
|
||||
|
||||
The pre-defined Mouse and Keyboard report descriptors can be used by
|
||||
a HID device implementation or simply as examples.
|
||||
|
||||
.. doxygengroup:: usb_hid_mk_report_desc
|
||||
|
||||
HID Class Device API
|
||||
********************
|
||||
|
||||
USB HID devices like mouse, keyboard, or any other specific device use this API.
|
||||
|
||||
.. doxygengroup:: usb_hid_device_api
|
||||
udc.rst
|
||||
uds.rst
|
||||
uds_testing.rst
|
||||
hid.rst
|
||||
uds_cdc_acm.rst
|
||||
|
||||
17
doc/reference/usb/udc.rst
Normal file
17
doc/reference/usb/udc.rst
Normal file
@@ -0,0 +1,17 @@
|
||||
.. _udc_api:
|
||||
|
||||
USB device controller driver API
|
||||
################################
|
||||
|
||||
The USB Device Controller Driver Layer implements the low level control routines
|
||||
to deal directly with the hardware. All device controller drivers should
|
||||
implement the APIs described in :zephyr_file:`include/drivers/usb/usb_dc.h`.
|
||||
This allows the integration of new USB device controllers to be done without
|
||||
changing the upper layers.
|
||||
With this API it is not possible to support more than one controller
|
||||
instance at runtime.
|
||||
|
||||
API reference
|
||||
*************
|
||||
|
||||
.. doxygengroup:: _usb_device_controller_api
|
||||
148
doc/reference/usb/uds.rst
Normal file
148
doc/reference/usb/uds.rst
Normal file
@@ -0,0 +1,148 @@
|
||||
.. _usb_device_stack:
|
||||
|
||||
USB device stack
|
||||
################
|
||||
|
||||
The USB device stack is a hardware independent interface between USB
|
||||
device controller driver and USB device class drivers or customer applications.
|
||||
It is a port of the LPCUSB device stack and has been modified and expanded
|
||||
over time. It provides the following functionalities:
|
||||
|
||||
* Uses the APIs provided by the device controller drivers to interact with
|
||||
the USB device controller.
|
||||
* Responds to standard device requests and returns standard descriptors,
|
||||
essentially handling 'Chapter 9' processing, specifically the standard
|
||||
device requests in table 9-3 from the universal serial bus specification
|
||||
revision 2.0.
|
||||
* Provides a programming interface to be used by USB device classes or
|
||||
customer applications. The APIs is described in
|
||||
:zephyr_file:`include/usb/usb_device.h`
|
||||
|
||||
The device stack has few limitations with which it is not possible to support
|
||||
more than one controller instance at runtime, and only one USB device
|
||||
configuration is supported.
|
||||
|
||||
Supported USB classes:
|
||||
|
||||
* USB Audio (experimental)
|
||||
* USB CDC ACM
|
||||
* USB CDC ECM
|
||||
* USB CDC EEM
|
||||
* RNDIS
|
||||
* USB MSC
|
||||
* USB DFU
|
||||
* Bluetooth HCI over USB
|
||||
* USB HID class
|
||||
|
||||
:ref:`List<usb-samples>` of samples for different purposes.
|
||||
CDC ACM and HID samples have configuration overlays for composite configuration.
|
||||
|
||||
Implementing a non-standard USB class
|
||||
*************************************
|
||||
|
||||
The configuration of USB Device is done in the stack layer.
|
||||
|
||||
The following structures and callbacks need to be defined:
|
||||
|
||||
* Part of USB Descriptor table
|
||||
* USB Endpoint configuration table
|
||||
* USB Device configuration structure
|
||||
* Endpoint callbacks
|
||||
* Optionally class, vendor and custom handlers
|
||||
|
||||
For example, for the USB loopback application:
|
||||
|
||||
.. literalinclude:: ../../../subsys/usb/class/loopback.c
|
||||
:language: c
|
||||
:start-after: usb.rst config structure start
|
||||
:end-before: usb.rst config structure end
|
||||
:linenos:
|
||||
|
||||
Endpoint configuration:
|
||||
|
||||
.. literalinclude:: ../../../subsys/usb/class/loopback.c
|
||||
:language: c
|
||||
:start-after: usb.rst endpoint configuration start
|
||||
:end-before: usb.rst endpoint configuration end
|
||||
:linenos:
|
||||
|
||||
USB Device configuration structure:
|
||||
|
||||
.. literalinclude:: ../../../subsys/usb/class/loopback.c
|
||||
:language: c
|
||||
:start-after: usb.rst device config data start
|
||||
:end-before: usb.rst device config data end
|
||||
:linenos:
|
||||
|
||||
|
||||
The vendor device requests are forwarded by the USB stack core driver to the
|
||||
class driver through the registered vendor handler.
|
||||
|
||||
For the loopback class driver, :c:func:`loopback_vendor_handler` processes
|
||||
the vendor requests:
|
||||
|
||||
.. literalinclude:: ../../../subsys/usb/class/loopback.c
|
||||
:language: c
|
||||
:start-after: usb.rst vendor handler start
|
||||
:end-before: usb.rst vendor handler end
|
||||
:linenos:
|
||||
|
||||
The class driver waits for the :makevar:`USB_DC_CONFIGURED` device status code
|
||||
before transmitting any data.
|
||||
|
||||
.. _testing_USB_native_posix:
|
||||
|
||||
USB Vendor and Product identifiers
|
||||
**********************************
|
||||
|
||||
The USB Vendor ID for the Zephyr project is ``0x2FE3``.
|
||||
This USB Vendor ID must not be used when a vendor
|
||||
integrates Zephyr USB device support into its own product.
|
||||
|
||||
Each USB sample has its own unique Product ID.
|
||||
The USB maintainer, if one is assigned, or otherwise the Zephyr Technical
|
||||
Steering Committee, may allocate other USB Product IDs based on well-motivated
|
||||
and documented requests.
|
||||
|
||||
When adding a new sample, add a new entry in :file:`samples/subsys/usb/usb_pid.Kconfig`
|
||||
and a Kconfig file inside your sample subdirectory.
|
||||
The following Product IDs are currently used:
|
||||
|
||||
* :kconfig:`CONFIG_USB_PID_CDC_ACM_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_CDC_ACM_COMPOSITE_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_HID_CDC_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_CONSOLE_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_DFU_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_HID_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_HID_MOUSE_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_MASS_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_TESTUSB_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_WEBUSB_SAMPLE`
|
||||
* :kconfig:`CONFIG_USB_PID_BLE_HCI_H4_SAMPLE`
|
||||
|
||||
The USB device descriptor field ``bcdDevice`` (Device Release Number) represents
|
||||
the Zephyr kernel major and minor versions as a binary coded decimal value.
|
||||
|
||||
API reference
|
||||
*************
|
||||
|
||||
There are two ways to transmit data, using the 'low' level read/write API or
|
||||
the 'high' level transfer API.
|
||||
|
||||
Low level API
|
||||
To transmit data to the host, the class driver should call usb_write().
|
||||
Upon completion the registered endpoint callback will be called. Before
|
||||
sending another packet the class driver should wait for the completion of
|
||||
the previous write. When data is received, the registered endpoint callback
|
||||
is called. usb_read() should be used for retrieving the received data.
|
||||
For CDC ACM sample driver this happens via the OUT bulk endpoint handler
|
||||
(cdc_acm_bulk_out) mentioned in the endpoint array (cdc_acm_ep_data).
|
||||
|
||||
High level API
|
||||
The usb_transfer method can be used to transfer data to/from the host. The
|
||||
transfer API will automatically split the data transmission into one or more
|
||||
USB transaction(s), depending endpoint max packet size. The class driver does
|
||||
not have to implement endpoint callback and should set this callback to the
|
||||
generic usb_transfer_ep_callback.
|
||||
|
||||
.. doxygengroup:: _usb_device_core_api
|
||||
108
doc/reference/usb/uds_cdc_acm.rst
Normal file
108
doc/reference/usb/uds_cdc_acm.rst
Normal file
@@ -0,0 +1,108 @@
|
||||
.. _usb_device_cdc_acm:
|
||||
|
||||
USB device stack CDC ACM support
|
||||
################################
|
||||
|
||||
The CDC ACM class is used as backend for different subsystems in Zephyr.
|
||||
However, its configuration may not be easy for the inexperienced user.
|
||||
Below is a description of the different use cases and some pitfalls.
|
||||
|
||||
The interface for CDC ACM user is :ref:`uart_api` driver API.
|
||||
But there are two important differences in behavior to a real UART controller:
|
||||
|
||||
* Data transfer is only possible after the USB device stack has been initialized and started,
|
||||
until then any data is discarded
|
||||
* If device is connected to the host, it still needs an application
|
||||
on the host side which requests the data
|
||||
|
||||
The devicetree compatible property for CDC ACM UART is
|
||||
:dtcompatible:`zephyr,cdc-acm-uart`.
|
||||
CDC ACM support is automatically selected when USB device support is enabled
|
||||
and a compatible node in the devicetree sources is present. If necessary,
|
||||
CDC ACM support can be explicitly disabled by :kconfig:`CONFIG_USB_CDC_ACM`.
|
||||
About four CDC ACM UART instances can be defined and used,
|
||||
limited by the maximum number of supported endpoints on the controller.
|
||||
|
||||
CDC ACM UART node is supposed to be child of a USB device controller node.
|
||||
Since the designation of the controller nodes varies from vendor to vendor,
|
||||
and our samples and application should be as generic as possible,
|
||||
the default USB device controller is usually assigned an ``zephyr_udc0``
|
||||
node label. Often, CDC ACM UART is described in a devicetree overlay file
|
||||
and looks like this:
|
||||
|
||||
.. code-block:: devicetree
|
||||
|
||||
&zephyr_udc0 {
|
||||
cdc_acm_uart0: cdc_acm_uart0 {
|
||||
compatible = "zephyr,cdc-acm-uart";
|
||||
label = "CDC_ACM_0";
|
||||
};
|
||||
};
|
||||
|
||||
Samples :ref:`usb_cdc-acm` and :ref:`usb_hid-cdc` have similar overlay files.
|
||||
And since no special properties are present, it may seem overkill to use
|
||||
devicetree to describe CDC ACM UART. The motivation behind using devicetree
|
||||
is the easy interchangeability of a real UART controller and CDC ACM UART
|
||||
in applications.
|
||||
|
||||
Console over CDC ACM UART
|
||||
*************************
|
||||
|
||||
With the CDC ACM UART node from above and ``zephyr,console`` property of the
|
||||
chosen node, we can describe that CDC ACM UART is to be used with the console.
|
||||
A similar overlay file is used by :ref:`cdc-acm-console`.
|
||||
If USB device support is enabled in the application, as in the console sample,
|
||||
:kconfig:`CONFIG_USB_UART_CONSOLE` must be enabled,
|
||||
which does nothing but change the initialization time of the console driver.
|
||||
|
||||
.. code-block:: devicetree
|
||||
|
||||
/ {
|
||||
chosen {
|
||||
zephyr,console = &cdc_acm_uart0;
|
||||
};
|
||||
};
|
||||
|
||||
&zephyr_udc0 {
|
||||
cdc_acm_uart0: cdc_acm_uart0 {
|
||||
compatible = "zephyr,cdc-acm-uart";
|
||||
label = "CDC_ACM_0";
|
||||
};
|
||||
};
|
||||
|
||||
Before the application uses the console, it is recommended to wait for
|
||||
the DTR signal:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
const struct device *dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_console));
|
||||
uint32_t dtr = 0;
|
||||
|
||||
if (usb_enable(NULL)) {
|
||||
return;
|
||||
}
|
||||
|
||||
while (!dtr) {
|
||||
uart_line_ctrl_get(dev, UART_LINE_CTRL_DTR, &dtr);
|
||||
k_sleep(K_MSEC(100));
|
||||
}
|
||||
|
||||
printk("nuqneH\n");
|
||||
|
||||
CDC ACM UART as backend
|
||||
***********************
|
||||
|
||||
As for the console sample, it is possible to configure CDC ACM UART as
|
||||
backend for other subsystems by setting :ref:`devicetree-chosen-nodes`
|
||||
properties.
|
||||
|
||||
List of few Zephyr specific chosen properties which can be used to select
|
||||
CDC ACM UART as backend for a subsystem or application:
|
||||
|
||||
* ``zephyr,bt-c2h-uart`` used in Bluetooth,
|
||||
for example see :ref:`bluetooth-hci-uart-sample`
|
||||
* ``zephyr,ot-uart`` used in OpenThread,
|
||||
for example see :ref:`coprocessor-sample`
|
||||
* ``zephyr,shell-uart`` used by shell for serial backend,
|
||||
for example see :zephyr_file:`samples/subsys/shell/shell_module`
|
||||
* ``zephyr,uart-mcumgr`` used by :ref:`smp_svr_sample`
|
||||
50
doc/reference/usb/uds_testing.rst
Normal file
50
doc/reference/usb/uds_testing.rst
Normal file
@@ -0,0 +1,50 @@
|
||||
.. _usb_device_testing:
|
||||
|
||||
Testing USB device support
|
||||
##########################
|
||||
|
||||
Testing over USPIP in native_posix
|
||||
***********************************
|
||||
|
||||
A virtual USB controller implemented through USBIP might be used to test the USB
|
||||
Device stack. Follow the general build procedure to build the USB sample for
|
||||
the native_posix configuration.
|
||||
|
||||
Run built sample with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
west build -t run
|
||||
|
||||
In a terminal window, run the following command to list USB devices:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ usbip list -r localhost
|
||||
Exportable USB devices
|
||||
======================
|
||||
- 127.0.0.1
|
||||
1-1: unknown vendor : unknown product (2fe3:0100)
|
||||
: /sys/devices/pci0000:00/0000:00:01.2/usb1/1-1
|
||||
: (Defined at Interface level) (00/00/00)
|
||||
: 0 - Vendor Specific Class / unknown subclass / unknown protocol (ff/00/00)
|
||||
|
||||
In a terminal window, run the following command to attach the USB device:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo usbip attach -r localhost -b 1-1
|
||||
|
||||
The USB device should be connected to your Linux host, and verified with the following commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo usbip port
|
||||
Imported USB devices
|
||||
====================
|
||||
Port 00: <Port in Use> at Full Speed(12Mbps)
|
||||
unknown vendor : unknown product (2fe3:0100)
|
||||
7-1 -> usbip://localhost:3240/1-1
|
||||
-> remote bus/dev 001/002
|
||||
$ lsusb -d 2fe3:0100
|
||||
Bus 007 Device 004: ID 2fe3:0100
|
||||
@@ -4,9 +4,9 @@ Memory Protection Design
|
||||
########################
|
||||
|
||||
Zephyr's memory protection design is geared towards microcontrollers with MPU
|
||||
(Memory Protection Unit) hardware. We do support some architectures which have
|
||||
a paged MMU (Memory Management Unit), but in that case the MMU is used like
|
||||
an MPU with an identity page table.
|
||||
(Memory Protection Unit) hardware. We do support some architectures, such as x86,
|
||||
which have a paged MMU (Memory Management Unit), but in that case the MMU is
|
||||
used like an MPU with an identity page table.
|
||||
|
||||
All of the discussion below will be using MPU terminology; systems with MMUs
|
||||
can be considered to have an MPU with an unlimited number of programmable
|
||||
@@ -46,7 +46,7 @@ text/ro-data, this is sufficient for the boot time configuration.
|
||||
Hardware Stack Overflow
|
||||
***********************
|
||||
|
||||
``CONFIG_HW_STACK_PROTECTION`` is an optional feature which detects stack
|
||||
:kconfig:`CONFIG_HW_STACK_PROTECTION` is an optional feature which detects stack
|
||||
buffer overflows when the system is running in supervisor mode. This
|
||||
catches issues when the entire stack buffer has overflowed, and not
|
||||
individual stack frames, use compiler-assisted :kconfig:`CONFIG_STACK_CANARIES`
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -784,6 +784,24 @@ Possible overflow in mempool
|
||||
CVE-2021
|
||||
========
|
||||
|
||||
CVE-2021-3319
|
||||
-------------
|
||||
|
||||
DOS: Incorrect 802154 Frame Validation for Omitted Source / Dest Addresses
|
||||
|
||||
Improper processing of omitted source and destination addresses in
|
||||
ieee802154 frame validation (ieee802154_validate_frame)
|
||||
|
||||
This has been fixed in main for v2.5.0
|
||||
|
||||
- `CVE-2020-3319 <http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3319>`_
|
||||
|
||||
- `Zephyr project bug tracker GHSA-94jg-2p6q-5364
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-94jg-2p6q-5364>`_
|
||||
|
||||
- `PR31908 fix for main
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/31908>`_
|
||||
|
||||
CVE-2021-3320
|
||||
-------------------
|
||||
Mismatch between validation and handling of 802154 ACK frames, where
|
||||
@@ -938,6 +956,36 @@ This has been fixed in main for v2.6.0
|
||||
- `PR 33418 fix for 1.14.2
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/33418>`_
|
||||
|
||||
CVE-2021-3436
|
||||
-------------
|
||||
|
||||
Bluetooth: Possible to overwrite an existing bond during keys
|
||||
distribution phase when the identity address of the bond is known
|
||||
|
||||
During the distribution of the identity address information we don’t
|
||||
check for an existing bond with the same identity address.This means
|
||||
that a duplicate entry will be created in RAM while the newest entry
|
||||
will overwrite the existing one in persistent storage.
|
||||
|
||||
This has been fixed in main for v2.6.0
|
||||
|
||||
- `CVE-2021-3436 <http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3436>`_
|
||||
|
||||
- `Zephyr project bug tracker GHSA-j76f-35mc-4h63
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-j76f-35mc-4h63>`_
|
||||
|
||||
- `PR 33266 fix for main
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/33266>`_
|
||||
|
||||
- `PR 33432 fix for 2.5
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/33432>`_
|
||||
|
||||
- `PR 33433 fix for 2.4
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/33433>`_
|
||||
|
||||
- `PR 33718 fix for 1.14.2
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/33718>`_
|
||||
|
||||
CVE-2021-3454
|
||||
-------------
|
||||
|
||||
@@ -989,7 +1037,70 @@ This has been fixed in main for v2.6.0
|
||||
- `PR 36105 fix for 2.4
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/36105>`_
|
||||
|
||||
CVE-2021-3510
|
||||
-------------
|
||||
|
||||
Zephyr JSON decoder incorrectly decodes array of array
|
||||
|
||||
When using JSON_OBJ_DESCR_ARRAY_ARRAY, the subarray is has the token
|
||||
type JSON_TOK_LIST_START, but then assigns to the object part of the
|
||||
union. arr_parse then takes the offset of the array-object (which has
|
||||
nothing todo with the list) treats it as relative to the parent
|
||||
object, and stores the length of the subarray in there.
|
||||
|
||||
This has been fixed in main for v2.7.0
|
||||
|
||||
- `CVE-2021-3510 <http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3510>`_
|
||||
|
||||
- `Zephyr project bug tracker GHSA-289f-7mw3-2qf4
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-289f-7mw3-2qf4>`_
|
||||
|
||||
- `PR 36340 fix for main
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/36340>`_
|
||||
|
||||
- `PR 37816 fix for 2.6
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/37816>`_
|
||||
|
||||
CVE-2021-3581
|
||||
-------------
|
||||
|
||||
Under embargo until 2021/09/04
|
||||
HCI data not properly checked leads to memory overflow in the Bluetooth stack
|
||||
|
||||
In the process of setting SCAN_RSP through the HCI command, the Zephyr
|
||||
Bluetooth protocol stack did not effectively check the length of the
|
||||
incoming HCI data. Causes memory overflow, and then the data in the
|
||||
memory is overwritten, and may even cause arbitrary code execution.
|
||||
|
||||
This has been fixed in main for v2.6.0
|
||||
|
||||
- `CVE-2021-3581 <http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3581>`_
|
||||
|
||||
- `Zephyr project bug tracker GHSA-8q65-5gqf-fmw5
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-8q65-5gqf-fmw5>`_
|
||||
|
||||
- `PR 35935 fix for main
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/35935>`_
|
||||
|
||||
- `PR 35984 fix for 2.5
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/35984>`_
|
||||
|
||||
- `PR 35985 fix for 2.4
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/35985>`_
|
||||
|
||||
- `PR 35985 fix for 1.14
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/35985>`_
|
||||
|
||||
CVE-2021-3625
|
||||
-------------
|
||||
|
||||
Buffer overflow in Zephyr USB DFU DNLOAD
|
||||
|
||||
This has been fixed in main for v2.6.0
|
||||
|
||||
- `CVE-2021-3625 <http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3625>`_
|
||||
|
||||
- `Zephyr project bug tracker GHSA-c3gr-hgvr-f363
|
||||
<https://github.com/zephyrproject-rtos/zephyr/security/advisories/GHSA-c3gr-hgvr-f363>`_
|
||||
|
||||
- `PR 36694 fix for main
|
||||
<https://github.com/zephyrproject-rtos/zephyr/pull/36694>`_
|
||||
|
||||
@@ -49,8 +49,8 @@ add_subdirectory_ifdef(CONFIG_PM_CPU_OPS pm_cpu_ops)
|
||||
|
||||
add_subdirectory_ifdef(CONFIG_FLASH_HAS_DRIVER_ENABLED flash)
|
||||
add_subdirectory_ifdef(CONFIG_SERIAL_HAS_DRIVER serial)
|
||||
add_subdirectory_ifdef(CONFIG_BT bluetooth)
|
||||
add_subdirectory_ifdef(CONFIG_NETWORKING net)
|
||||
add_subdirectory_ifdef(CONFIG_BT_DRIVERS bluetooth)
|
||||
add_subdirectory_ifdef(CONFIG_NET_DRIVERS net)
|
||||
add_subdirectory_ifdef(CONFIG_NET_L2_ETHERNET ethernet)
|
||||
add_subdirectory_ifdef(CONFIG_ENTROPY_HAS_DRIVER entropy)
|
||||
add_subdirectory_ifdef(CONFIG_SYS_CLOCK_EXISTS timer)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user