Compare commits

..

6 Commits

Author SHA1 Message Date
Anas Nashif
77908ed995 doc: read version from Makefile
Change-Id: I6edb5ee53cdff1c687e97663c93ddaa3b09a9288
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
2017-11-19 10:06:21 -05:00
Anas Nashif
1d6259edc9 doc: add a new template variable
Signify if the documentation is for a release or if it is the
development version from master.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
2017-11-19 09:59:15 -05:00
Anas Nashif
12784125b9 doc: support official website theme
Jira: ZEP-1512
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
2017-05-19 21:25:00 -04:00
Anas Nashif
bc038e5e78 doc: use ReadTheDocs.org theme if available
Instead of including the rtd theme in zephyr use an installed instance,
if nothing is installed, use the default zephyr theme.

Change-Id: Ife4bd878e3f879cf59ecf2bc5d186a531a3bf1b6
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
2017-05-19 21:25:00 -04:00
Anas Nashif
68cf168798 doc: do not put version in breadcrumbs for daily docs
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
2017-05-19 21:25:00 -04:00
David B. Kinder
69bddff823 doc: identify release version in generated docs
It's not obvious which kernel release version you're reading about in the
documentation.  Add the version info in the breadcrumb header (instead
of "Home / Docs / Subsystems /" show as
"Home / Docs / 1.8 / Subsystems /").

(Depends on docs-theme PR-9, but can be merged now with no ill-effect)

Signed-off-by: David B. Kinder <david.b.kinder@intel.com>
2017-05-19 21:25:00 -04:00
13479 changed files with 746445 additions and 1263691 deletions

View File

@@ -1,20 +1,18 @@
--mailback
--no-tree
--emacs
--summary-file
--show-types
--max-line-length=80
--min-conf-desc-length=1
--typedefsfile=scripts/checkpatch/typedefsfile
--ignore BRACES
--ignore PRINTK_WITHOUT_KERN_LEVEL
--ignore SPLIT_STRING
--ignore VOLATILE
--ignore CONFIG_EXPERIMENTAL
--ignore PREFER_KERNEL_TYPES
--ignore AVOID_EXTERNS
--ignore NETWORKING_BLOCK_COMMENT_STYLE
--ignore DATE_TIME
--ignore MINMAX
--ignore CONST_STRUCT
--ignore FILE_PATH_CHANGES
--exclude ext
--exclude net/ip/contiki

View File

@@ -1,151 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# clang-format configuration file. Intended for clang-format >= 4.
#
# For more information, see:
#
# Documentation/process/clang-format.rst
# https://clang.llvm.org/docs/ClangFormat.html
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
#
---
AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
#AlignEscapedNewlines: Left # Unknown to clang-format-4.0
AlignOperands: true
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: true
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
#AfterExternBlock: false # Unknown to clang-format-5.0
BeforeCatch: false
BeforeElse: false
IndentBraces: false
#SplitEmptyFunction: true # Unknown to clang-format-4.0
#SplitEmptyRecord: true # Unknown to clang-format-4.0
#SplitEmptyNamespace: true # Unknown to clang-format-4.0
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
#CompactNamespaces: false # Unknown to clang-format-4.0
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 8
ContinuationIndentWidth: 8
Cpp11BracedListStyle: false
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
#FixNamespaceComments: false # Unknown to clang-format-4.0
# Taken from:
# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \
# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
# | sort | uniq
ForEachMacros:
- 'FOR_EACH'
- 'for_each_linux_bus'
- 'for_each_linux_driver'
- 'metal_bitmap_for_each_clear_bit'
- 'metal_bitmap_for_each_set_bit'
- 'metal_for_each_page_size_down'
- 'metal_for_each_page_size_up'
- 'metal_list_for_each'
- 'RB_FOR_EACH'
- 'RB_FOR_EACH_CONTAINER'
- 'SYS_DLIST_FOR_EACH_CONTAINER'
- 'SYS_DLIST_FOR_EACH_CONTAINER_SAFE'
- 'SYS_DLIST_FOR_EACH_NODE'
- 'SYS_DLIST_FOR_EACH_NODE_SAFE'
- 'SYS_SFLIST_FOR_EACH_CONTAINER'
- 'SYS_SFLIST_FOR_EACH_CONTAINER_SAFE'
- 'SYS_SFLIST_FOR_EACH_NODE'
- 'SYS_SFLIST_FOR_EACH_NODE_SAFE'
- 'SYS_SLIST_FOR_EACH_CONTAINER'
- 'SYS_SLIST_FOR_EACH_CONTAINER_SAFE'
- 'SYS_SLIST_FOR_EACH_NODE'
- 'SYS_SLIST_FOR_EACH_NODE_SAFE'
- '_WAIT_Q_FOR_EACH'
- 'Z_GENLIST_FOR_EACH_CONTAINER'
- 'Z_GENLIST_FOR_EACH_CONTAINER_SAFE'
- 'Z_GENLIST_FOR_EACH_NODE'
- 'Z_GENLIST_FOR_EACH_NODE_SAFE'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0
IncludeCategories:
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
#IndentPPDirectives: None # Unknown to clang-format-5.0
IndentWidth: 8
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: Inner
#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
ObjCBlockIndentWidth: 8
ObjCSpaceAfterProperty: true
ObjCSpaceBeforeProtocolList: true
# Taken from git's rules
#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
PenaltyBreakBeforeFirstCallParameter: 30
PenaltyBreakComment: 10
PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: false
SortIncludes: false
#SortUsingDeclarations: false # Unknown to clang-format-4.0
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
SpaceBeforeParens: ControlStatements
#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp03
TabWidth: 8
UseTab: Always
...

View File

@@ -1,31 +0,0 @@
codecov:
notify:
require_ci_to_pass: yes
coverage:
precision: 2
round: down
range: "70...100"
status:
project: yes
patch: yes
changes: no
#ignore:
# - "tests/**/*"
# - "samples/**/*"
# - "ext/hal/**/*"
parsers:
gcov:
branch_detection:
conditional: yes
loop: yes
method: no
macro: no
comment:
layout: "reach, diff, flags, files, footer"
behavior: default
require_changes: no

View File

@@ -1,56 +0,0 @@
# EditorConfig: https://editorconfig.org/
# top-most EditorConfig file
root = true
# All (Defaults)
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
# C
[*.{c,h}]
indent_style = tab
indent_size = 8
# Python
[*.py]
indent_style = space
indent_size = 4
# Perl
[*.pl]
indent_style = tab
indent_size = 8
# YAML
[*.yml]
indent_style = space
indent_size = 2
# Shell Script
[*.sh]
indent_style = space
indent_size = 4
# Windows Command Script
[*.cmd]
end_of_line = crlf
indent_style = tab
indent_size = 8
# Valgrind Suppression File
[*.supp]
indent_style = space
indent_size = 3
# CMake
[{CMakeLists.txt,*.cmake}]
indent_style = space
indent_size = 2
# Makefile
[Makefile]
indent_style = tab

View File

@@ -1,39 +0,0 @@
---
name: Bug report
about: Create a report to help us improve Zephyr
title: ''
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
What have you tried to diagnose or workaround this issue?
**To Reproduce**
Steps to reproduce the behavior:
1. mkdir build; cd build
2. cmake -DBOARD=board\_xyz
3. make
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Impact**
What impact does this issue have on your progress (e.g., annoyance, showstopper)
**Screenshots or console output**
If applicable, add a screenshot (drag-and-drop an image), or console logs
(cut-and-paste text and put a code fence (\`\`\`) before and after, to help
explain the issue.
**Environment (please complete the following information):**
- OS: (e.g. Linux, MacOS, Windows)
- Toolchain (e.g Zephyr SDK, ...)
- Commit SHA or Version used
**Additional context**
Add any other context about the problem here.

View File

@@ -1,20 +0,0 @@
---
name: Enhancement
about: Suggest enhancements to existing features
title: ''
labels: enhancement
assignees: ''
---
**Is your enhancement proposal related to a problem? Please describe.**
A clear and concise description of what the problem is.
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or graphics (drag-and-drop an image) about the feature request here.

View File

@@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: feature request
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is.
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or graphics (drag-and-drop an image) about the feature request here.

View File

@@ -1,51 +0,0 @@
---
name: RFC / Proposal
about: Submit an RFC / Proposal
title: ''
labels: RFC
assignees: ''
---
## Introduction
This section targets end users, TSC members, maintainers and anyone else that might
need a quick explanation of your proposed change.
### Problem description
Why do we want this change and what problem are we trying to address?
### Proposed change
A brief summary of the proposed change - the 10,000 ft view on what it will
change once this change is implemented.
## Detailed RFC
In this section of the document the target audience is the dev team. Upon
reading this section each engineer should have a rather clear picture of what
needs to be done in order to implement the described feature.
### Proposed change (Detailed)
This section is freeform - you should describe your change in as much detail
as possible. Please also ensure to include any context or background info here.
For example, do we have existing components which can be reused or altered.
By reading this section, each team member should be able to know what exactly
you're planning to change and how.
### Dependencies
Highlight how the change may affect the rest of the project (new components,
modifications in other areas), or other teams/projects.
### Concerns and Unresolved Questions
List any concerns, unknowns, and generally unresolved questions etc.
## Alternatives
List any alternatives considered, and the reasons for choosing this option
over them.

39
.gitignore vendored
View File

@@ -7,45 +7,24 @@
*.swp
*.swo
*~
build*/
!doc/guides/build
cscope.*
.dir
/*.patch
# The .cache directory will be used to cache toolchain capabilities if
# no suitable out-of-tree directory is found.
.cache
outdir
outdir-*
scripts/basic/fixdep
scripts/gen_idt/gen_idt
scripts/gen_offset_header/gen_offset_header
scripts/kconfig/conf
scripts/kconfig/mconf
scripts/kconfig/zconf.hash.c
scripts/kconfig/zconf.lex.c
scripts/kconfig/zconf.tab.c
doc/_build
doc/doxygen
doc/xml
doc/html
doc/boards
doc/samples
doc/latex
doc/themes/zephyr-docs-theme
sanity-out*
bsim_bt_out
sanity-out/
scripts/grub
doc/reference/kconfig/*.rst
doc/doc.warnings
.*project
.settings
.envrc
.vscode
hide-defaults-note
# Tag files
GPATH
GRTAGS
GTAGS
TAGS
doc/reference/kconfig/CONFIG_*
doc/reference/kconfig/index.rst
tags
.idea

View File

@@ -1,57 +0,0 @@
# All these sections are optional, edit this file as you like.
[general]
ignore=title-trailing-punctuation, T3, title-max-length, T1, body-hard-tab, B3, B1
# verbosity should be a value between 1 and 3, the commandline -v flags take precedence over this
verbosity = 3
# By default gitlint will ignore merge commits. Set to 'false' to disable.
ignore-merge-commits=true
# Enable debug mode (prints more output). Disabled by default
debug = false
# Set the extra-path where gitlint will search for user defined rules
# See http://jorisroovers.github.io/gitlint/user_defined_rules for details
extra-path=scripts/gitlint
[title-max-length-no-revert]
line-length=75
[body-min-line-count]
min-line-count=1
[body-max-line-count]
max-line-count=200
[title-starts-with-subsystem]
regex = ^(?!subsys:)(([^:]+):)(\s([^:]+):)*\s(.+)$
[title-must-not-contain-word]
# Comma-separated list of words that should not occur in the title. Matching is case
# insensitive. It's fine if the keyword occurs as part of a larger word (so "WIPING"
# will not cause a violation, but "WIP: my title" will.
words=wip
[title-match-regex]
# python like regex (https://docs.python.org/2/library/re.html) that the
# commit-msg title must be matched to.
# Note that the regex can contradict with other rules if not used correctly
# (e.g. title-must-not-contain-word).
#regex=^US[0-9]*
[max-line-length-with-exceptions]
# B1 = body-max-line-length
line-length=72
[body-min-length]
min-length=3
[body-is-missing]
# Whether to ignore this rule on merge commits (which typically only have a title)
# default = True
ignore-merge-commits=false
[body-changed-file-mention]
# List of files that need to be explicitly mentioned in the body when they are changed
# This is useful for when developers often erroneously edit certain files or git submodules.
# By specifying this rule, developers can only change the file when they explicitly reference
# it in the commit message.
#files=gitlint/rules.py,README.md

4
.gitreview Normal file
View File

@@ -0,0 +1,4 @@
[gerrit]
host=gerrit.zephyrproject.org
port=29418
project=zephyr.git

View File

@@ -3,7 +3,7 @@
#
# FIXME: all these should match the relative filename
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]bluetooth.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]$
^(?P<filename>[-._/\w]+/doc/api/bluetooth.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]$
^[ \t]*$
^[ \t]*\^$
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]$
@@ -15,7 +15,7 @@
#
# bt_gatt_discover_params unnamed struct definition
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]bluetooth.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^(?P<filename>[-._/\w]+/doc/api/bluetooth.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
@@ -27,7 +27,7 @@
#
# Bluetooth GATT unnamed struct definition
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]bluetooth.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^(?P<filename>[-._/\w]+/doc/api/bluetooth.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
@@ -36,15 +36,3 @@
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*bt_gatt_read_params.__unnamed__.*
^[- \t]*\^
#
# Bluetooth mesh unnamed struct definition
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]bluetooth.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*bt_mesh_model.__unnamed__.*
^[- \t]*\^

View File

@@ -1,15 +0,0 @@
#
# Display
#
#
# include
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]display_api.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*mb_image.__unnamed__
^[- \t]*\^

View File

@@ -1,6 +0,0 @@
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]file_system[/\\]index.rst):(?P<lineno>[0-9]+): WARNING: Duplicate declaration.
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]peripherals[/\\]dma.rst):(?P<lineno>[0-9]+): WARNING: Duplicate declaration.
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]peripherals[/\\]sensor.rst):(?P<lineno>[0-9]+): WARNING: Duplicate declaration.

View File

@@ -0,0 +1,18 @@
#
# KERNELVERSION not being defined in local builds, kill that warning,
# can ignore it
#
^.*/Kconfig.zephyr:[0-9]+: warning: The symbol KERNELVERSION references the non-existent environment variable KERNELVERSION.*
#
# Documentation generation, early message
#
^cd .* && doxygen doc/doxygen.config
^srctree=.* SRCARCH=\w+ python scripts/genrest/genrest.py .*$
# This cuts the sphinx build line; has to be separate because in the
# middle, we have removed the KERNELVERSION one and a full regex won't match
^sphinx-build -t \w+ -b html -d [-._/\w]+ -q \. .*
#
# Documentation generation, footer message
#
^[ \t]*
^Build finished. The HTML pages are in [-._/\w]+

View File

@@ -1,15 +0,0 @@
#
# Display
#
#
# include
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]misc_api.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*json_obj_descr.__unnamed__
^[- \t]*\^

View File

@@ -1,70 +0,0 @@
#
# Networking
#
#
# include/net/net_ip.h warnings
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]networking.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*in[_6]+addr.in[46]_u
^[- \t]*\^
#
# include/net/net_mgmt.h
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]networking.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*net_mgmt_event_callback.__unnamed__
^[- \t]*\^
#
# include/net/buf.h
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]networking.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*net_buf.__unnamed__
^[- \t]*\^
#
# include/net/ieee802154.h
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]networking.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*ieee802154_req_params.__unnamed__
^[- \t]*\^
#
# include/net/net_context.h
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]networking[/\\]net_context.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*net_context.options
^[- \t]*\^
#
# include/net/net_stats.h
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]networking[/\\]net_stats.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*net_stats_tc.[a-z]+
^[- \t]*\^
#
# stray duplicate definition warnings
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]networking[/\\]net_if.rst):(?P<lineno>[0-9]+): WARNING: Duplicate declaration.

View File

@@ -0,0 +1,12 @@
#
# Sensor value unnamed struct definition
#
^(?P<filename>[-._/\w]+/doc/api/io_interfaces.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*sensor_value.__unnamed__.*
^[- \t]*\^

View File

@@ -1,7 +1,7 @@
#
# UART unnamed struct definition
#doc/api/peripherals/uart.rst
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]peripherals[/\\]uart.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
#
^(?P<filename>[-._/\w]+/doc/api/io_interfaces.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
@@ -13,19 +13,3 @@
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*uart_device_config.__unnamed__.*
^[- \t]*\^
#
^(?P<filename>([\-:\\/\w\.])+[/\\]doc[/\\]reference[/\\]peripherals[/\\]uart.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+]
^[ \t]*
^[ \t]*\^
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+]
^.*uart_event.data
^[- \t]*\^

View File

@@ -1,6 +1,4 @@
#
# When filtering output of the build process, ignore lines that don't
# provide any information that helps the invoker tell if there was an
# error.
# Make blurbs
#
^make: (Entering|Leaving) directory .*

View File

@@ -1,11 +0,0 @@
#
# When executing test cases, ignore the following messages as they are
# not to be considered hard errors.
#
# Block line when test case cannot run in the HW due to server or connection issues
#
^BLCK0/[-a-z0-9:]+ (.+)#test @[^/]+/[^:]+:[^:]+: evaluation blocked(.*)$
#
# Block line when there is an issue with the YKUSH serial connection
#
^BLCK0/[-a-z0-9:]+ (.+)#test @[^/]+/(?P<board>[^:]+):[^:]+: exception: 400: (?P=board): Cannot find YKUSH serial '[A-Z0-9]+'$

View File

@@ -1,18 +0,0 @@
#
# When executing test cases under TCF, ignore the following messages
# as they are not to be considered hard errors.
#
# TCF is run under make for taking advantage of the jobserver; when
# the testcase execution fail, make will complain, which we can
# ignore ('sommersault' was the old name of the target).
#
^/tmp/tcf-[a-zA-Z0-9]+.mk:[0-9]+: recipe for target ('tcf-jobserver-run'|'sommersault') failed$
#
# More of the same
#
^make: \*\*\* \[(tcf-jobserver-run|sommersault)\] Error 1$
#
# TCF's summary line. We don't need to consider it to determine if the
# run failed or passed.
#
^[A-Z]+0/\S+:\s+\S+\s+@\S+: [0-9]+ tests \([0-9]+ passed, [0-9]+ failed, [0-9]+ blocked, [0-9]+ skipped\).*$

View File

@@ -1,4 +0,0 @@
#
# Skip line when test case is eliminated due to filters
#
^SKIP0/\S+\s+\S+: No targets can be used \(all [0-9]+ selected from [0-9]+ available eliminated by testcase filtering\)$

View File

@@ -12,21 +12,3 @@ Carles Cufi <carles.cufi@nordicsemi.no> <carles.cufi@nordicsemi.no>
Kuo-Lang Tseng <kuo-lang.tseng@intel.com> <kuo-lang.tseng@intel.com>
Gerardo Aceves <gerardo.aceves@intel.com> <gerardo.aceves@intel.com>
Evan Couzens <evanx.couzens@intel.com> <evanx.couzens@intel.com>
Lei Liu <lei.a.liu@intel.com> <lei.a.liu@intel.com>
Douglas Su <d0u9.su@outlook.com> <d0u9.su@outlook.com>
Keren Siman-Tov <keren.siman-tov@intel.com> <keren.siman-tov@intel.com>
Naga Raja Rao Tulasi <tulasi.r@tcs.com> <tulasi.r@tcs.com>
Felipe Neves <ryukokki.felipe@gmail.com> <ryukokki.felipe@gmail.com>
Amir Kaplan <amir.kaplan@intel.com> <amir.kaplan@intel.com>
Anas Nashif <anas.nashif@intel.com> <anas.nashif@intel.com>
Ruud Derwig <Ruud.Derwig@synopsys.com> <Ruud.Derwig@synopsys.com>
Flavio Arieta Netto <flavio@exati.com.br>
Nishikant Nayak <nishikantax.nayak@intel.com>
Justin Watson <jwatson5@gmail.com>
Johann Fischer <j.fischer@phytec.de>
Jun Li <jun.r.li@intel.com>
Xiaorui Hu <xiaorui.hu@linaro.org>
Yannis Damigos <giannis.damigos@gmail.com> <ydamigos@iccs.gr>
Vinayak Kariappa Chettimada <vinayak.kariappa.chettimada@nordicsemi.no> <vinayak.kariappa.chettimada@nordicsemi.no> <vich@nordicsemi.no> <vinayak.kariappa@gmail.com>
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
Sean Nyekjaer <sean@geanix.com> <sean@nyekjaer.dk>

View File

@@ -1,75 +0,0 @@
language: c
compiler: gcc
env:
global:
- ZEPHYR_SDK_INSTALL_DIR=/opt/sdk/zephyr-sdk-0.10.3
- ZEPHYR_TOOLCHAIN_VARIANT=zephyr
- MATRIX_BUILDS="5"
matrix:
- MATRIX_BUILD="1"
- MATRIX_BUILD="2"
- MATRIX_BUILD="3"
- MATRIX_BUILD="4"
- MATRIX_BUILD="5"
build:
cache: false
cache_dir_list:
- ${SHIPPABLE_BUILD_DIR}/ccache
pre_ci_boot:
image_name: zephyrprojectrtos/ci
image_tag: v0.9.1
pull: true
options: "-e HOME=/home/buildslave --privileged=true --tty --net=bridge --user buildslave"
ci:
- export CCACHE_DIR=${SHIPPABLE_BUILD_DIR}/ccache/.ccache
- >
if [ "$IS_PULL_REQUEST" = "true" ]; then
./scripts/ci/run_ci.sh -c -b ${PULL_REQUEST_BASE_BRANCH} -r origin -m ${MATRIX_BUILD} -M ${MATRIX_BUILDS} -p ${PULL_REQUEST};
else
./scripts/ci/run_ci.sh -c -b ${BRANCH} -r origin -m ${MATRIX_BUILD} -M ${MATRIX_BUILDS};
fi;
- ccache -s
on_failure:
- >
if [ "$IS_PULL_REQUEST" = "true" ]; then
./scripts/ci/run_ci.sh -f -b ${PULL_REQUEST_BASE_BRANCH} -r origin -m ${MATRIX_BUILD} -M ${MATRIX_BUILDS} -p ${PULL_REQUEST};
else
./scripts/ci/run_ci.sh -f -b ${BRANCH} -r origin -m ${MATRIX_BUILD} -M ${MATRIX_BUILDS};
fi;
on_success:
- >
if [ "$IS_PULL_REQUEST" = "true" ]; then
./scripts/ci/run_ci.sh -s -b ${PULL_REQUEST_BASE_BRANCH} -r origin -m ${MATRIX_BUILD} -M ${MATRIX_BUILDS} -p ${PULL_REQUEST};
else
./scripts/ci/run_ci.sh -s -b ${BRANCH} -r origin -m ${MATRIX_BUILD} -M ${MATRIX_BUILDS};
fi;
branches:
only:
- master
- v*-branch
- topic-*
integrations:
notifications:
- integrationName: slack_integration
type: slack
recipients:
- "#ci"
branches:
only:
- master
on_success: never
on_failure: never
- integrationName: email
type: email
recipients:
- builds@zephyrproject.org
branches:
only:
- master
on_success: never
on_failure: always
on_pull_request: never

View File

@@ -1,80 +0,0 @@
indent_with_tabs = 2 # 1=indent to level only, 2=indent with tabs
input_tab_size = 8 # original tab size
output_tab_size = 8 # new tab size
indent_columns = output_tab_size
indent_label = 1 # pos: absolute col, neg: relative column
indent_switch_case = 0 # number
#
# inter-symbol newlines
#
nl_enum_brace = remove # "enum {" vs "enum \n {"
nl_union_brace = remove # "union {" vs "union \n {"
nl_struct_brace = remove # "struct {" vs "struct \n {"
nl_do_brace = remove # "do {" vs "do \n {"
nl_if_brace = remove # "if () {" vs "if () \n {"
nl_for_brace = remove # "for () {" vs "for () \n {"
nl_else_brace = remove # "else {" vs "else \n {"
nl_while_brace = remove # "while () {" vs "while () \n {"
nl_switch_brace = remove # "switch () {" vs "switch () \n {"
nl_brace_while = remove # "} while" vs "} \n while" - cuddle while
nl_brace_else = remove # "} \n else" vs "} else"
nl_func_var_def_blk = 1
nl_fcall_brace = remove # "list_for_each() {" vs "list_for_each()\n{"
nl_fdef_brace = add # "int foo() {" vs "int foo()\n{"
#
# Source code modifications
#
mod_paren_on_return = ignore # "return 1;" vs "return (1);"
mod_full_brace_if = add # "if() { } else { }" vs "if() else"
#
# inter-character spacing options
#
sp_sizeof_paren = remove # "sizeof (int)" vs "sizeof(int)"
sp_before_sparen = force # "if (" vs "if("
sp_after_sparen = force # "if () {" vs "if (){"
sp_inside_braces = add # "{ 1 }" vs "{1}"
sp_inside_braces_struct = add # "{ 1 }" vs "{1}"
sp_inside_braces_enum = add # "{ 1 }" vs "{1}"
sp_assign = add
sp_arith = add
sp_bool = add
sp_compare = add
sp_assign = add
sp_after_comma = add
sp_func_def_paren = remove # "int foo (){" vs "int foo(){"
sp_func_call_paren = remove # "foo (" vs "foo("
sp_func_proto_paren = remove # "int foo ();" vs "int foo();"
sp_inside_fparen = remove # "func( arg )" vs "func(arg)"
sp_else_brace = add # ignore/add/remove/force
sp_before_ptr_star = add # ignore/add/remove/force
sp_after_ptr_star = remove # ignore/add/remove/force
sp_between_ptr_star = remove # ignore/add/remove/force
sp_inside_paren = remove # remove spaces inside parens
sp_paren_paren = remove # remove spaces between nested parens
sp_inside_sparen = remove # remove spaces inside parens for if, while and the like
sp_brace_else = add # ignore/add/remove/force
sp_before_nl_cont = ignore
sp_cmt_cpp_start = add
sp_brace_typedef = add # }typedefd_name -> } typedefd_name
cmt_sp_after_star_cont = 1
#
# Aligning stuff
#
align_with_tabs = FALSE # use tabs to align
align_on_tabstop = TRUE # align on tabstops
align_enum_equ_span = 4 # '=' in enum definition
align_struct_init_span = 0 # align stuff in a structure init '= { }'
align_right_cmt_span = 3
align_nl_cont = TRUE
sp_pp_concat = ignore # ignore/add/remove/force

File diff suppressed because it is too large Load Diff

View File

@@ -1,411 +0,0 @@
# CODEOWNERS for autoreview assigning in github
# https://help.github.com/en/articles/about-code-owners#codeowners-syntax
# Order is important; for each modified file, the last matching
# pattern takes the most precedence.
# That is, with the last pattern being
# *.rst @dbkinder
# if only .rst files are being modified, only dbkinder is
# automatically requested for review, but you can manually
# add others as needed.
# Do not use wildcard on all source yet
# * @galak @nashif
/.known-issues/ @inakypg @nashif
/arch/arc/ @vonhust @ruuddw
/arch/arm/ @MaureenHelm @galak @ioannisg
/arch/arm/core/cortex_m/cmse/ @ioannisg
/arch/arm/include/cortex_m/cmse.h @ioannisg
/arch/arm/core/cortex_r/ @MaureenHelm @galak @ioannisg @bbolen
/arch/common/ @andrewboie @ioannisg @andyross
/soc/arc/snps_*/ @vonhust @ruuddw
/soc/nios2/ @nashif @wentongwu
/soc/arm/ @MaureenHelm @galak @ioannisg
/soc/arm/arm/mps2/ @fvincenzo
/soc/arm/atmel_sam/sam3x/ @ioannisg
/soc/arm/atmel_sam/sam4s/ @fallrisk
/soc/arm/nxp*/ @MaureenHelm
/soc/arm/nordic_nrf/ @ioannisg
/soc/arm/st_stm32/ @erwango
/soc/arm/st_stm32/stm32f4/ @rsalveti @idlethread
/soc/arm/st_stm32/stm32mp1/ @arnop2
/soc/arm/ti_simplelink/cc13x2_cc26x2/ @bwitherspoon
/soc/arm/ti_simplelink/cc32xx/ @vanti
/soc/arm/ti_simplelink/msp432p4xx/ @Mani-Sadhasivam
/soc/xtensa/intel_s1000/ @sathishkuttan @dcpleung
/arch/x86/ @andrewboie
/arch/nios2/ @andrewboie @wentongwu
/arch/posix/ @aescolar
/arch/riscv/ @kgugala @pgielda @nategraff-sifive
/soc/posix/ @aescolar
/soc/riscv/ @kgugala @pgielda @nategraff-sifive
/soc/riscv/openisa*/ @MaureenHelm
/soc/x86/ @andrewboie
/arch/xtensa/ @andrewboie @dcpleung @andyross
/soc/xtensa/ @andrewboie @dcpleung @andyross
/boards/arc/ @vonhust @ruuddw
/boards/arm/ @MaureenHelm @galak
/boards/arm/96b_argonkey/ @avisconti
/boards/arm/96b_avenger96/ @Mani-Sadhasivam
/boards/arm/96b_carbon/ @rsalveti @idlethread
/boards/arm/96b_meerkat96/ @Mani-Sadhasivam
/boards/arm/96b_nitrogen/ @idlethread
/boards/arm/96b_neonkey/ @Mani-Sadhasivam
/boards/arm/96b_stm32_sensor_mez/ @Mani-Sadhasivam
/boards/arm/96b_wistrio/ @Mani-Sadhasivam
/boards/arm/arduino_due/ @ioannisg
/boards/arm/cc1352r1_launchxl/ @bwitherspoon
/boards/arm/cc26x2r1_launchxl/ @bwitherspoon
/boards/arm/cc3220sf_launchxl/ @vanti
/boards/arm/disco_l475_iot1/ @erwango
/boards/arm/frdm*/ @MaureenHelm
/boards/arm/frdm*/doc/ @MaureenHelm @MeganHansen
/boards/arm/hexiwear*/ @MaureenHelm
/boards/arm/hexiwear*/doc/ @MaureenHelm @MeganHansen
/boards/arm/lpcxpresso*/ @MaureenHelm
/boards/arm/lpcxpresso*/doc/ @MaureenHelm @MeganHansen
/boards/arm/mimxrt*/ @MaureenHelm
/boards/arm/mimxrt*/doc/ @MaureenHelm @MeganHansen
/boards/arm/mps2_an385/ @fvincenzo
/boards/arm/msp_exp432p401r_launchxl/ @Mani-Sadhasivam
/boards/arm/nrf*/ @carlescufi @lemrey @ioannisg
/boards/arm/nucleo*/ @erwango
/boards/arm/nucleo_f401re/ @rsalveti @idlethread
/boards/arm/qemu_cortex_m*/ @ioannisg
/boards/arm/sam4s_xplained/ @fallrisk
/boards/arm/v2m_beetle/ @fvincenzo
/boards/arm/olimexino_stm32/ @ydamigos
/boards/arm/sensortile_box/ @avisconti
/boards/arm/steval_fcu001v1/ @Navin-Sankar
/boards/arm/stm32l1_disco/ @karlp
/boards/arm/stm32*_disco/ @erwango
/boards/arm/stm32f3_disco/ @ydamigos
/boards/arm/stm32*_eval/ @erwango
/boards/common/ @mbolivar
/boards/nios2/ @wentongwu
/boards/nios2/altera_max10/ @wentongwu
/boards/arm/stm32_min_dev/ @cbsiddharth
/boards/posix/ @aescolar
/boards/riscv/ @kgugala @pgielda @nategraff-sifive
/boards/riscv/rv32m1_vega/ @MaureenHelm
/boards/shields/ @erwango
/boards/x86/ @andrewboie @nashif
/boards/xtensa/ @nashif @dcpleung
/boards/xtensa/intel_s1000_crb/ @sathishkuttan @dcpleung
/boards/xtensa/odroid_go/ @ydamigos
# All cmake related files
/cmake/ @SebastianBoe @nashif
/CMakeLists.txt @SebastianBoe @nashif
/doc/ @dbkinder
/doc/guides/coccinelle.rst @himanshujha199640 @JuliaLawall
/doc/CMakeLists.txt @carlescufi
/doc/scripts/ @carlescufi
/doc/guides/bluetooth/ @joerchan @jhedberg @Vudentz
/doc/reference/bluetooth/ @joerchan @jhedberg @Vudentz
/drivers/*/*cc13xx_cc26xx* @bwitherspoon
/drivers/*/*mcux* @MaureenHelm
/drivers/*/*stm32* @erwango
/drivers/*/*native_posix* @aescolar
/drivers/adc/ @anangl
/drivers/adc/adc_stm32.c @cybertale
/drivers/bluetooth/ @joerchan @jhedberg @Vudentz
/drivers/can/ @alexanderwachter
/drivers/can/*mcp2515* @karstenkoenig
/drivers/clock_control/*nrf* @nordic-krch
/drivers/counter/ @nordic-krch
/drivers/counter/counter_cmos.c @andrewboie
/drivers/display/ @vanwinkeljan
/drivers/display/display_framebuf.c @andrewboie
/drivers/dma/*sam0* @Sizurka
/drivers/dma/dma_stm32* @cybertale
/drivers/eeprom/ @henrikbrixandersen
/drivers/entropy/*rv32m1* @MaureenHelm
/drivers/espi/ @albertofloyd @franciscomunoz @scottwcpg
/drivers/ps2/ @albertofloyd @franciscomunoz @scottwcpg
/drivers/kscan/ @albertofloyd @franciscomunoz @scottwcpg
/drivers/ethernet/ @jukkar @tbursztyka @pfalcon
/drivers/flash/ @nashif @nvlsianpu
/drivers/flash/*native_posix* @vanwinkeljan @aescolar
/drivers/flash/*nrf* @nvlsianpu
/drivers/flash/*spi_nor* @pabigot
/drivers/flash/*stm32* @superna9999
/drivers/gpio/ @mnkp @pabigot
/drivers/gpio/*ht16k33* @henrikbrixandersen
/drivers/gpio/*stm32* @rsalveti @idlethread
/drivers/hwinfo/ @alexanderwachter
/drivers/i2s/i2s_ll_stm32* @avisconti
/drivers/ieee802154/ @jukkar @tbursztyka
/drivers/interrupt_controller/ @andrewboie
/drivers/ipm/ipm_mhu* @karl-zh
/drivers/ipm/Kconfig.nrfx @masz-nordic @ioannisg
/drivers/ipm/Kconfig.nrfx_ipc_channel @masz-nordic @ioannisg
/drivers/ipm/ipm_nrfx_ipc.c @masz-nordic @ioannisg
/drivers/ipm/ipm_nrfx_ipc.h @masz-nordic @ioannisg
/drivers/ipm/ipm_stm32_ipcc.c @arnop2
/drivers/*/vexriscv_litex.c @mateusz-holenko @kgugala @pgielda
/drivers/led/ @Mani-Sadhasivam
/drivers/led_strip/ @mbolivar
/drivers/modem/ @mike-scott
/drivers/pcie/ @andrewboie
/drivers/pinmux/stm32/ @rsalveti @idlethread
/drivers/pinmux/*hsdk* @iriszzw
/drivers/sensor/ @MaureenHelm
/drivers/sensor/ams_iAQcore/ @alexanderwachter
/drivers/sensor/ens210/ @alexanderwachter
/drivers/sensor/hts*/ @avisconti
/drivers/sensor/lis*/ @avisconti
/drivers/sensor/lps*/ @avisconti
/drivers/sensor/lsm*/ @avisconti
/drivers/sensor/st*/ @avisconti
/drivers/serial/uart_altera_jtag_hal.c @wentongwu
/drivers/serial/*ns16550* @andrewboie
/drivers/serial/Kconfig.litex @mateusz-holenko @kgugala @pgielda
/drivers/serial/uart_liteuart.c @mateusz-holenko @kgugala @pgielda
/drivers/serial/Kconfig.rtt @carlescufi @pkral78
/drivers/serial/uart_rtt.c @carlescufi @pkral78
/drivers/serial/Kconfig.xlnx @wjliang
/drivers/serial/uart_xlnx_ps.c @wjliang
/drivers/net/ @jukkar @tbursztyka
/drivers/ptp_clock/ @jukkar
/drivers/pwm/pwm_shell.c @henrikbrixandersen
/drivers/spi/ @tbursztyka
/drivers/spi/spi_ll_stm32.* @superna9999
/drivers/spi/spi_rv32m1_lpspi* @karstenkoenig
/drivers/timer/apic_timer.c @andrewboie
/drivers/timer/cortex_m_systick.c @ioannisg
/drivers/timer/altera_avalon_timer_hal.c @wentongwu
/drivers/timer/riscv_machine_timer.c @nategraff-sifive @kgugala @pgielda
/drivers/timer/litex_timer.c @mateusz-holenko @kgugala @pgielda
/drivers/timer/xlnx_psttc_timer.c @wjliang
/drivers/timer/cc13x2_cc26x2_rtc_timer.c @vanti
/drivers/usb/ @jfischer-phytec-iot @finikorg
/drivers/usb/device/usb_dc_stm32.c @ydamigos @loicpoulain
/drivers/video/ @loicpoulain
/drivers/i2c/i2c_ll_stm32* @ldts @ydamigos
/drivers/i2c/i2c_rv32m1_lpi2c* @henrikbrixandersen
/drivers/i2c/*sam0* @Sizurka
/drivers/i2c/i2c_dw* @dcpleung
/drivers/*/*xec* @franciscomunoz @albertofloyd @scottwcpg
/drivers/wifi/ @jukkar @tbursztyka @pfalcon
/drivers/wifi/eswifi/ @loicpoulain
/dts/arc/ @vonhust @ruuddw @iriszzw
/dts/arm/atmel/samr21.dtsi @benpicco
/dts/arm/atmel/sam*5*.dtsi @benpicco
/dts/arm/st/ @erwango
/dts/arm/ti/cc13?2* @bwitherspoon
/dts/arm/ti/cc26?2* @bwitherspoon
/dts/arm/ti/cc3235* @vanti
/dts/arm/nordic/ @ioannisg @carlescufi
/dts/arm/nxp/ @MaureenHelm
/dts/arm/microchip/ @franciscomunoz @albertofloyd @scottwcpg
/dts/riscv/microsemi-miv.dtsi @galak
/dts/riscv/rv32m1* @MaureenHelm
/dts/riscv/riscv32-fe310.dtsi @nategraff-sifive
/dts/riscv/riscv32-litex-vexriscv.dtsi @mateusz-holenko @kgugala @pgielda
/dts/arm/armv7-r.dtsi @bbolen
/dts/arm/xilinx/ @bbolen
/dts/xtensa/xtensa.dtsi @ydamigos
/dts/bindings/ @galak
/dts/bindings/can/ @alexanderwachter
/dts/bindings/iio/adc/st*stm32-adc.yaml @cybertale
/dts/bindings/serial/ns16550.yaml @andrewboie
/dts/bindings/*/nordic* @anangl
/dts/bindings/*/nxp* @MaureenHelm
/dts/bindings/*/openisa* @MaureenHelm
/dts/bindings/*/st* @erwango
/dts/bindings/sensor/ams* @alexanderwachter
/dts/bindings/*/sifive* @mateusz-holenko @kgugala @pgielda @nategraff-sifive
/dts/bindings/*/litex* @mateusz-holenko @kgugala @pgielda
/dts/bindings/*/vexriscv* @mateusz-holenko @kgugala @pgielda
/dts/posix/ @aescolar @vanwinkeljan
/dts/bindings/sensor/*bme680* @BoschSensortec
/dts/bindings/sensor/st* @avisconti
/ext/hal/cmsis/ @MaureenHelm @galak @stephanosio
/ext/lib/crypto/tinycrypt/ @ceolin
/include/ @nashif @carlescufi @galak @MaureenHelm
/include/drivers/adc.h @anangl
/include/drivers/can.h @alexanderwachter
/include/drivers/counter.h @nordic-krch
/include/drivers/display.h @vanwinkeljan
/include/drivers/espi.h @albertofloyd @franciscomunoz @scottwcpg
/include/drivers/bluetooth/ @joerchan @jhedberg @Vudentz
/include/drivers/flash.h @nashif @carlescufi @galak @MaureenHelm @nvlsianpu
/include/drivers/led/ht16k33.h @henrikbrixandersen
/include/drivers/interrupt_controller/ @andrewboie
/include/drivers/pcie/ @andrewboie
/include/drivers/hwinfo.h @alexanderwachter
/include/drivers/led.h @Mani-Sadhasivam
/include/drivers/led_strip.h @mbolivar
/include/drivers/sensor.h @MaureenHelm
/include/drivers/spi.h @tbursztyka
/include/app_memory/ @andrewboie
/include/arch/arc/ @vonhust @ruuddw
/include/arch/arc/arch.h @andrewboie
/include/arch/arc/v2/irq.h @andrewboie
/include/arch/arm/ @MaureenHelm @galak @ioannisg
/include/arch/arm/irq.h @andrewboie
/include/arch/nios2/ @andrewboie
/include/arch/nios2/arch.h @andrewboie
/include/arch/posix/ @aescolar
/include/arch/riscv/ @nategraff-sifive @kgugala @pgielda
/include/arch/x86/ @andrewboie @wentongwu
/include/arch/common/ @andrewboie @andyross @nashif
/include/arch/xtensa/ @andrewboie
/include/sys/atomic.h @andrewboie @andyross
/include/bluetooth/ @joerchan @jhedberg @Vudentz
/include/cache.h @andrewboie @andyross
/include/device.h @wentongwu @nashif
/include/display/ @vanwinkeljan
/include/dt-bindings/clock/kinetis_mcg.h @henrikbrixandersen
/include/dt-bindings/clock/kinetis_scg.h @henrikbrixandersen
/include/dt-bindings/dma/stm32_dma.h @cybertale
/include/dt-bindings/pcie/ @andrewboie
/include/dt-bindings/usb/usb.h @galak @finikorg
/include/fs/ @nashif @wentongwu
/include/init.h @andrewboie @andyross
/include/irq.h @andrewboie @andyross
/include/irq_offload.h @andrewboie @andyross
/include/kernel.h @andrewboie @andyross
/include/kernel_version.h @andrewboie @andyross
/include/linker/app_smem*.ld @andrewboie
/include/linker/ @andrewboie @andyross
/include/logging/ @nordic-krch
/include/misc/ @andrewboie @andyross
/include/net/ @jukkar @tbursztyka @pfalcon
/include/net/buf.h @jukkar @jhedberg @tbursztyka @pfalcon
/include/posix/ @pfalcon
/include/power/power.h @wentongwu @nashif
/include/ptp_clock.h @jukkar
/include/shared_irq.h @andrewboie @andyross
/include/shell/ @jakub-uC @nordic-krch
/include/sw_isr_table.h @andrewboie @andyross
/include/sys_clock.h @andrewboie @andyross
/include/sys/sys_io.h @andrewboie @andyross
/include/toolchain.h @andrewboie @andyross @nashif
/include/toolchain/ @andrewboie @andyross
/include/zephyr.h @andrewboie @andyross
/kernel/ @andrewboie @andyross
/lib/gui/ @vanwinkeljan
/lib/os/ @andrewboie @andyross
/lib/posix/ @pfalcon
/lib/cmsis_rtos_v2/ @nashif
/lib/cmsis_rtos_v1/ @nashif
/lib/libc/ @nashif @andrewboie
/modules/ @nashif
/kernel/device.c @andrewboie @andyross @nashif
/kernel/idle.c @andrewboie @andyross @nashif
/samples/ @nashif
/samples/basic/minimal/ @carlescufi
/samples/basic/servo_motor/*microbit* @jhe
/lib/updatehub/ @chtavares592 @otavio
/samples/bluetooth/ @jhedberg @Vudentz @joerchan
/samples/boards/intel_s1000_crb/ @sathishkuttan @dcpleung @nashif
/samples/display/ @vanwinkeljan
/samples/drivers/CAN/ @alexanderwachter
/samples/drivers/ht16k33/ @henrikbrixandersen
/samples/gui/ @vanwinkeljan
/samples/net/ @jukkar @tbursztyka @pfalcon
/samples/net/dns_resolve/ @jukkar @tbursztyka @pfalcon
/samples/net/lwm2m_client/ @mike-scott
/samples/net/mqtt_publisher/ @jukkar @tbursztyka
/samples/net/sockets/coap_*/ @rveerama1
/samples/net/sockets/ @jukkar @tbursztyka @pfalcon
/samples/net/updatehub/ @chtavares592 @otavio
/samples/sensor/ @MaureenHelm
/samples/shields/ @avisconti
/samples/subsys/logging/ @nordic-krch @jakub-uC
/samples/subsys/shell/ @jakub-uC @nordic-krch
/samples/subsys/usb/ @jfischer-phytec-iot @finikorg
/samples/subsys/power/ @wentongwu @pabigot
/scripts/coccicheck @himanshujha199640 @JuliaLawall
/scripts/coccinelle/ @himanshujha199640 @JuliaLawall
/scripts/kconfig/ @ulfalizer
/scripts/elf_helper.py @andrewboie
/scripts/sanity_chk/expr_parser.py @nashif
/scripts/gen_app_partitions.py @andrewboie
/scripts/dts/ @ulfalizer @galak
/scripts/release/ @nashif
/arch/x86/gen_gdt.py @andrewboie
/arch/x86/gen_idt.py @andrewboie
/scripts/gen_kobject_list.py @andrewboie
/scripts/gen_priv_stacks.py @andrewboie @agross-oss @ioannisg
/scripts/gen_syscall_header.py @andrewboie
/scripts/gen_syscalls.py @andrewboie
/scripts/process_gperf.py @andrewboie
/scripts/gen_relocate_app.py @wentongwu
/scripts/sanity_chk/ @nashif
/scripts/sanitycheck @nashif
/scripts/series-push-hook.sh @erwango
/scripts/west_commands/ @mbolivar
/scripts/west-commands.yml @mbolivar
/scripts/zephyr_module.py @tejlmand
/scripts/valgrind.supp @aescolar
/subsys/bluetooth/ @joerchan @jhedberg @Vudentz
/subsys/bluetooth/controller/ @carlescufi @cvinayak @thoh-ot
/subsys/bluetooth/mesh/ @jhedberg @trond-snekvik @joerchan @Vudentz
/subsys/cpp/ @pabigot @vanwinkeljan
/subsys/debug/ @nashif
/subsys/debug/asan.c @vanwinkeljan @aescolar
/subsys/disk/disk_access_spi_sdhc.c @JunYangNXP
/subsys/disk/disk_access_sdhc.h @JunYangNXP
/subsys/disk/disk_access_usdhc.c @JunYangNXP
/subsys/fb/ @jfischer-phytec-iot
/subsys/fs/ @nashif
/subsys/fs/fcb/ @nvlsianpu
/subsys/fs/fuse_fs_access.c @vanwinkeljan
/subsys/fs/littlefs_fs.c @pabigot
/subsys/fs/nvs/ @Laczen
/subsys/logging/ @nordic-krch
/subsys/logging/log_backend_net.c @nordic-krch @jukkar
/subsys/mgmt/ @carlescufi @nvlsianpu
/subsys/net/buf.c @jukkar @jhedberg @tbursztyka @pfalcon
/subsys/net/ip/ @jukkar @tbursztyka @pfalcon
/subsys/net/lib/ @jukkar @tbursztyka @pfalcon
/subsys/net/lib/dns/ @jukkar @tbursztyka @pfalcon
/subsys/net/lib/lwm2m/ @mike-scott
/subsys/net/lib/config/ @jukkar @tbursztyka
/subsys/net/lib/mqtt/ @jukkar @tbursztyka @rlubos
/subsys/net/lib/openthread/ @rlubos
/subsys/net/lib/coap/ @rveerama1
/subsys/net/lib/sockets/ @jukkar @tbursztyka @pfalcon
/subsys/net/lib/tls_credentials/ @rlubos
/subsys/net/l2/ @jukkar @tbursztyka
/subsys/net/l2/canbus/ @alexanderwachter @jukkar
/subsys/power/ @wentongwu @pabigot
/subsys/random/ @dleach02
/subsys/settings/ @nvlsianpu
/subsys/shell/ @jakub-uC @nordic-krch
/subsys/storage/ @nvlsianpu
/subsys/testsuite/ @nashif
/subsys/usb/ @jfischer-phytec-iot @finikorg
/tests/ @nashif
/tests/application_development/libcxx/ @pabigot
/tests/arch/arm/ @ioannisg
/tests/boards/native_posix/ @aescolar
/tests/boards/intel_s1000_crb/ @dcpleung @sathishkuttan
/tests/bluetooth/ @joerchan @jhedberg @Vudentz
/tests/posix/ @pfalcon
/tests/crypto/ @ceolin
/tests/crypto/mbedtls/ @nashif @ceolin
/tests/drivers/can/ @alexanderwachter
/tests/drivers/flash_simulator/ @nvlsianpu
/tests/drivers/hwinfo/ @alexanderwachter
/tests/drivers/spi/ @tbursztyka
/tests/drivers/uart/uart_async_api/ @Mierunski
/tests/kernel/ @andrewboie @andyross @nashif
/tests/lib/ @nashif
/tests/net/ @jukkar @tbursztyka @pfalcon
/tests/net/buf/ @jukkar @jhedberg @tbursztyka @pfalcon
/tests/net/lib/ @jukkar @tbursztyka @pfalcon
/tests/net/lib/http_header_fields/ @jukkar @tbursztyka
/tests/net/lib/mqtt_packet/ @jukkar @tbursztyka
/tests/net/lib/coap/ @rveerama1
/tests/net/socket/ @jukkar @tbursztyka @pfalcon
/tests/subsys/fs/ @nashif @wentongwu
/tests/subsys/settings/ @nvlsianpu
/tests/subsys/shell/ @jakub-uC @nordic-krch
# Get all docs reviewed
*.rst @dbkinder
*posix*.rst @dbkinder @aescolar

View File

@@ -1,78 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at conduct@zephyrproject.org.
Reports will be received by Kate Stewart (Linux Foundation) and Amy Occhialino
(Intel). All complaints will be reviewed and investigated, and will result in a
response that is deemed necessary and appropriate to the circumstances. The
project team is obligated to maintain confidentiality with regard to the
reporter of an incident. Further details of specific enforcement policies may
be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

View File

@@ -1,39 +0,0 @@
Contribution Guidelines
#######################
As an open-source project, we welcome and encourage the community to submit
patches directly to the project. In our collaborative open source environment,
standards and methods for submitting changes help reduce the chaos that can result
from an active development community.
This document briefly summarizes the full `Contribution
Guidelines <http://docs.zephyrproject.org/latest/contribute/index.html>`_
documentation.
* Zephyr uses the permissive open source `Apache 2.0 license`_
that allows you to freely use, modify, distribute and sell your own products
that include Apache 2.0 licensed software.
* There are some imported or reused components of the Zephyr project that
use other licensing and are clearly identified.
* The Developer Certificate of Origin (DCO) process is followed to
ensure developers are following licensing criteria for their
contributions, and documented with a ``Signed-off-by`` line in commits.
* Zephyr development workflow is supported on Linux, macOS, and Windows,
(with a few exceptions).
* Source code for the project is maintained in the GitHub repo:
https://github.com/zephyrproject-rtos/zephyr
* Issue and feature tracking is done using GitHub issues in this repo.
* A Continuous Integration (CI) system runs on every Pull Request (PR)
to verify several aspects of the PR including Git commit formatting,
Coding Style, sanity checks builds, and documentation builds.
* The `Zephyr devel mailing list`_ is a great place to engage with the
community, ask questions, discuss issues, and help each other.
.. _Zephyr devel mailing list: https://lists.zephyrproject.org/g/devel

122
Kbuild Normal file
View File

@@ -0,0 +1,122 @@
# vim: filetype=make
ifneq ($(strip $(CONFIG_MAX_NUM_TASK_IRQS)),)
ifneq (${CONFIG_MAX_NUM_TASK_IRQS},0)
TASK_IRQS=y
endif
endif
ifneq ("$(wildcard $(MDEF_FILE))","")
MDEF_FILE_PATH=$(strip $(MDEF_FILE))
else
ifneq ($(MDEF_FILE),)
MDEF_FILE_PATH=$(strip $(PROJECT_BASE)/$(MDEF_FILE))
endif
endif
define filechk_prj.mdef
(echo "% WARNING. THIS FILE IS AUTO-GENERATED. DO NOT MODIFY!"; \
echo; \
echo "% CONFIG NUM_COMMAND_PACKETS NUM_TIMER_PACKETS NUM_TASK_PRIORITIES"; \
echo "% ============================================================="; \
echo " CONFIG ${CONFIG_NUM_COMMAND_PACKETS} ${CONFIG_NUM_TIMER_PACKETS} ${CONFIG_NUM_TASK_PRIORITIES}"; \
echo; \
echo "% TASKGROUP NAME";\
echo "% ==============";\
echo " TASKGROUP EXE";\
echo " TASKGROUP SYS";\
echo " TASKGROUP FPU";\
echo " TASKGROUP SSE";\
echo; \
if test "$(TASK_IRQS)" = "y"; then \
echo "% Task IRQ objects";\
echo "% EVENT NAME HANDLER"; \
echo "% ======================================="; \
i=0; \
while [ $$i -lt $(CONFIG_MAX_NUM_TASK_IRQS) ]; do \
echo " EVENT _TaskIrqEvt$$i NULL"; \
i=$$(($$i+1));\
done; \
fi; \
if test -e "$(MDEF_FILE_PATH)"; then \
cat $(MDEF_FILE_PATH); \
fi;)
endef
misc/generated/sysgen/prj.mdef: $(MDEF_FILE_PATH) \
include/config/auto.conf FORCE
$(call filechk,prj.mdef)
misc/generated/sysgen/kernel_main.c: misc/generated/sysgen/prj.mdef \
kernel/microkernel/include/micro_private_types.h \
kernel/microkernel/include/kernel_main.h
$(Q)$(PYTHON) $(srctree)/scripts/sysgen \
$(CURDIR)/misc/generated/sysgen/prj.mdef \
$(CURDIR)/misc/generated/sysgen/
define filechk_configs.c
(echo "/* file is auto-generated, do not modify ! */"; \
echo; \
echo "#include <toolchain.h>"; \
echo; \
echo "GEN_ABS_SYM_BEGIN (_ConfigAbsSyms)"; \
echo; \
cat $(CURDIR)/include/generated/autoconf.h | sed \
's/".*"/1/' | awk \
'/#define/{printf "GEN_ABSOLUTE_SYM(%s, %s);\n", $$2, $$3}'; \
echo; \
echo "GEN_ABS_SYM_END";)
endef
misc/generated/configs.c: include/config/auto.conf FORCE
$(call filechk,configs.c)
targets := misc/generated/configs.c
targets += include/generated/offsets.h
always := misc/generated/configs.c
always += include/generated/offsets.h
ifeq ($(CONFIG_MICROKERNEL),y)
targets += misc/generated/sysgen/kernel_main.c
always += misc/generated/sysgen/kernel_main.c
endif
define rule_cc_o_c_1
$(call echo-cmd,cc_o_c_1) $(cmd_cc_o_c_1);
endef
OFFSETS_INCLUDE = $(strip \
-include $(CURDIR)/include/generated/autoconf.h \
-I $(srctree)/include \
-I $(CURDIR)/include/generated \
-I $(srctree)/kernel/microkernel/include \
-I $(srctree)/kernel/nanokernel/include \
-I $(srctree)/lib/libc/minimal/include \
-I $(srctree)/arch/${ARCH}/include )
cmd_cc_o_c_1 = $(CC) $(KBUILD_CFLAGS) $(OFFSETS_INCLUDE) -c -o $@ $<
arch/$(ARCH)/core/offsets/offsets.o: arch/$(ARCH)/core/offsets/offsets.c $(KCONFIG_CONFIG)
$(Q)mkdir -p $(dir $@)
$(call if_changed,cc_o_c_1)
define offsetchk
$(Q)set -e; \
$(kecho) ' CHK $@'; \
mkdir -p $(dir $@); \
$(GENOFFSET_H) -i $(1) -o $@.tmp; \
if [ -r $@ ] && cmp -s $@ $@.tmp; then \
rm -f $@.tmp; \
else \
$(kecho) ' UPD $@'; \
mv -f $@.tmp $@; \
fi
endef
include/generated/offsets.h: arch/$(ARCH)/core/offsets/offsets.o \
include/config/auto.conf FORCE
$(call offsetchk,arch/$(ARCH)/core/offsets/offsets.o)

18
Kconfig
View File

@@ -1,8 +1,20 @@
# General configuration options
# Kconfig - general configuration options
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
mainmenu "Zephyr Kernel Configuration"
source "Kconfig.zephyr"

View File

@@ -1,421 +1,49 @@
# General configuration options
# Kconfig - general configuration options
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
# Copyright (c) 2016 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
menu "Modules"
source "$(CMAKE_BINARY_DIR)/Kconfig.modules"
source "modules/Kconfig"
endmenu
# Include these first so that any properties (e.g. defaults) below can be
# overridden in *.defconfig files (by defining symbols in multiple locations).
# After merging all the symbol definitions, Kconfig picks the first property
# (e.g. the first default) with a satisfied condition.
#
# Board defaults should be parsed before SoC defaults, because boards usually
# overrides SoC values.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# Note: $ARCH and $BOARD_DIR might be glob patterns.
source "$(BOARD_DIR)/Kconfig.defconfig"
source "$(SOC_DIR)/$(ARCH)/*/Kconfig.defconfig"
source "boards/Kconfig"
source "$(SOC_DIR)/Kconfig"
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
config KERNELVERSION
string
option env="KERNELVERSION"
source "arch/Kconfig"
source "kernel/Kconfig"
source "dts/Kconfig"
source "drivers/Kconfig"
source "net/Kconfig"
source "misc/Kconfig"
source "lib/Kconfig"
source "subsys/Kconfig"
source "fs/Kconfig"
source "ext/Kconfig"
menu "Build and Link Features"
menu "Linker Options"
choice
prompt "Linker Orphan Section Handling"
default LINKER_ORPHAN_SECTION_WARN
config LINKER_ORPHAN_SECTION_PLACE
bool "Place"
help
Linker puts orphan sections in place without warnings
or errors.
config LINKER_ORPHAN_SECTION_WARN
bool "Warn"
help
Linker places the orphan sections in output and issues
warning about those sections.
config LINKER_ORPHAN_SECTION_ERROR
bool "Error"
help
Linker exits with error when an orphan section is found.
endchoice
config CODE_DATA_RELOCATION
bool "Relocate code/data sections"
depends on ARM
help
When selected this will relocate .text, data and .bss sections from
the specified files and places it in the required memory region. The
files should be specified in the CMakeList.txt file with
a cmake API zephyr_code_relocate().
config HAS_FLASH_LOAD_OFFSET
bool
help
This option is selected by targets having a FLASH_LOAD_OFFSET
and FLASH_LOAD_SIZE.
config USE_CODE_PARTITION
bool "link into code-partition"
depends on HAS_FLASH_LOAD_OFFSET
help
When selected application will be linked into chosen code-partition.
# Workaround for not being able to have commas in macro arguments
DT_CHOSEN_Z_CODE_PARTITION := zephyr,code-partition
config FLASH_LOAD_OFFSET
hex "Kernel load offset"
default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_CODE_PARTITION)) if USE_CODE_PARTITION
default 0
depends on HAS_FLASH_LOAD_OFFSET
help
This option specifies the byte offset from the beginning of flash that
the kernel should be loaded into. Changing this value from zero will
affect the Zephyr image's link, and will decrease the total amount of
flash available for use by application code.
If unsure, leave at the default value 0.
config FLASH_LOAD_SIZE
hex "Kernel load size"
default $(dt_chosen_reg_size_hex,$(DT_CHOSEN_Z_CODE_PARTITION)) if USE_CODE_PARTITION
default 0
depends on HAS_FLASH_LOAD_OFFSET
help
If non-zero, this option specifies the size, in bytes, of the flash
area that the Zephyr image will be allowed to occupy. If zero, the
image will be able to occupy from the FLASH_LOAD_OFFSET to the end of
the device.
If unsure, leave at the default value 0.
config TEXT_SECTION_OFFSET
hex
prompt "TEXT section offset" if !BOOTLOADER_MCUBOOT
default 0x200 if BOOTLOADER_MCUBOOT
default 0
help
If the application is built for chain-loading by a bootloader this
variable is required to be set to value that leaves sufficient
space between the beginning of the image and the start of the .text
section to store an image header or any other metadata.
In the particular case of the MCUboot bootloader this reserves enough
space to store the image header, which should also meet vector table
alignment requirements on most ARM targets, although some targets
may require smaller or larger values.
config HAVE_CUSTOM_LINKER_SCRIPT
bool "Custom linker scripts provided"
help
Set this option if you have a custom linker script which needed to
be define in CUSTOM_LINKER_SCRIPT.
config CUSTOM_LINKER_SCRIPT
string "Path to custom linker script"
depends on HAVE_CUSTOM_LINKER_SCRIPT
help
Path to the linker script to be used instead of the one define by the
board.
The linker script must be based on a version provided by Zephyr since
the kernel can expect a certain layout/certain regions.
This is useful when an application needs to add sections into the
linker script and avoid having to change the script provided by
Zephyr.
config CUSTOM_RODATA_LD
bool "(DEPRECATED) Include custom-rodata.ld"
help
Note: This is deprecated, use Cmake function zephyr_linker_sources() instead.
Include a customized linker script fragment for inserting additional
data and linker directives into the rodata section.
config CUSTOM_RWDATA_LD
bool "(DEPRECATED) Include custom-rwdata.ld"
help
Note: This is deprecated, use Cmake function zephyr_linker_sources() instead.
Include a customized linker script fragment for inserting additional
data and linker directives into the data section.
config CUSTOM_SECTIONS_LD
bool "(DEPRECATED) Include custom-sections.ld"
help
Note: This is deprecated, use Cmake function zephyr_linker_sources() instead.
Include a customized linker script fragment for inserting additional
arbitrary sections.
config KERNEL_ENTRY
string "Kernel entry symbol"
default "__start"
help
Code entry symbol, to be set at linking phase.
config LINKER_SORT_BY_ALIGNMENT
bool "Sort input sections by alignment"
default y
help
This turns on the linker flag to sort sections by alignment
in decreasing size of symbols. This helps to minimize
padding between symbols.
endmenu
menu "Compiler Options"
config NATIVE_APPLICATION
bool "Build as a native host application"
help
Build as a native application that can run on the host and using
resources and libraries provided by the host.
choice
prompt "Optimization level"
default NO_OPTIMIZATIONS if COVERAGE
default DEBUG_OPTIMIZATIONS if DEBUG
default SIZE_OPTIMIZATIONS
help
Note that these flags shall only control the compiler
optimization level, and that no extra debug code shall be
conditionally compiled based on them.
config SIZE_OPTIMIZATIONS
bool "Optimize for size"
help
Compiler optimizations will be set to -Os independently of other
options.
config SPEED_OPTIMIZATIONS
bool "Optimize for speed"
help
Compiler optimizations will be set to -O2 independently of other
options.
config DEBUG_OPTIMIZATIONS
bool "Optimize debugging experience"
help
Compiler optimizations will be set to -Og independently of other
options.
config NO_OPTIMIZATIONS
bool "Optimize nothing"
help
Compiler optimizations will be set to -O0 independently of other
options.
endchoice
config COMPILER_OPT
string "Custom compiler options"
help
This option is a free-form string that is passed to the compiler
when building all parts of a project (i.e. kernel).
The compiler options specified by this string supplement the
predefined set of compiler supplied by the build system,
and can be used to change compiler optimization, warning and error
messages, and so on.
endmenu
menu "Build Options"
config KERNEL_BIN_NAME
string "The kernel binary name"
default "zephyr"
help
This option sets the name of the generated kernel binary.
config OUTPUT_STAT
bool "Create a statistics file"
default y
help
Create a stat file using readelf -e <elf>
config OUTPUT_DISASSEMBLY
bool "Create a disassembly file"
default y
help
Create an .lst file with the assembly listing of the firmware.
config OUTPUT_PRINT_MEMORY_USAGE
bool "Print memory usage to stdout"
default y
help
If the toolchain supports it, this option will pass
--print-memory-region to the linker when it is doing it's first
linker pass. Note that the memory regions are symbolic concepts
defined by the linker scripts and do not necessarily map
directly to the real physical address space. Take also note that
some platforms do two passes of the linker so the results do not
match exactly to the final elf file. See also rom_report,
ram_report and
https://sourceware.org/binutils/docs/ld/MEMORY.html
config BUILD_OUTPUT_HEX
bool "Build a binary in HEX format"
help
Build a binary in HEX format. This will build a zephyr.hex file need
by some platforms.
config BUILD_OUTPUT_BIN
bool "Build a binary in BIN format"
default y
help
Build a binary in BIN format. This will build a zephyr.bin file need
by some platforms.
config BUILD_OUTPUT_EXE
bool "Build a binary in ELF format with .exe extension"
help
Build a binary in ELF format that can run in the host system. This
will build a zephyr.exe file.
config BUILD_OUTPUT_S19
bool "Build a binary in S19 format"
help
Build a binary in S19 format. This will build a zephyr.s19 file need
by some platforms.
config BUILD_NO_GAP_FILL
bool "Don't fill gaps in generated hex/bin/s19 files."
depends on BUILD_OUTPUT_HEX || BUILD_OUTPUT_BIN || BUILD_OUTPUT_S19
config BUILD_OUTPUT_STRIPPED
bool "Build a stripped binary"
help
Build a stripped binary. This will build a zephyr.stripped file need
by some platforms.
config APPLICATION_DEFINED_SYSCALL
bool "Scan application folder for any syscall definition"
help
Scan additional folders inside application source folder
for application defined syscalls.
endmenu
endmenu
menu "Boot Options"
config IS_BOOTLOADER
bool "Act as a bootloader"
depends on XIP
depends on ARM
help
This option indicates that Zephyr will act as a bootloader to execute
a separate Zephyr image payload.
config BOOTLOADER_SRAM_SIZE
int "SRAM reserved for bootloader"
default 16
depends on !XIP || IS_BOOTLOADER
depends on ARM || XTENSA
help
This option specifies the amount of SRAM (measure in kB) reserved for
a bootloader image, when either:
- the Zephyr image itself is to act as the bootloader, or
- Zephyr is a !XIP image, which implicitly assumes existence of a
bootloader that loads the Zephyr !XIP image onto SRAM.
config BOOTLOADER_MCUBOOT
bool "MCUboot bootloader support"
select USE_CODE_PARTITION
help
This option signifies that the target uses MCUboot as a bootloader,
or in other words that the image is to be chain-loaded by MCUboot.
This sets several required build system and Device Tree options in
order for the image generated to be bootable using the MCUboot open
source bootloader. Currently this includes:
* Setting TEXT_SECTION_OFFSET to a default value that allows space
for the MCUboot image header
* Activating SW_VECTOR_RELAY on Cortex-M0 (or Armv8-M baseline)
targets with no built-in vector relocation mechanisms
* Including dts/common/mcuboot.overlay when building the Device
Tree in order to place and link the image at the slot0 offset
config BOOTLOADER_ESP_IDF
bool "ESP-IDF bootloader support"
depends on SOC_ESP32
help
This option will trigger the compilation of the ESP-IDF bootloader
inside the build folder.
At flash time, the bootloader will be flashed with the zephyr image
config BOOTLOADER_KEXEC
bool "Boot using Linux kexec() system call"
depends on X86
help
This option signifies that Linux boots the kernel using kexec system call
and utility. This method is used to boot the kernel over the network.
config BOOTLOADER_CONTEXT_RESTORE
bool "Boot loader has context restore support"
default y
depends on SYS_POWER_DEEP_SLEEP_STATES && BOOTLOADER_CONTEXT_RESTORE_SUPPORTED
help
This option signifies that the target has a bootloader
that restores CPU context upon resuming from deep sleep
power state.
config REBOOT
bool "Reboot functionality"
select SYSTEM_CLOCK_DISABLE
help
Enable the sys_reboot() API. Enabling this can drag in other subsystems
needed to perform a "safe" reboot (e.g. SYSTEM_CLOCK_DISABLE, to stop the
system clock before issuing a reset).
config MISRA_SANE
bool "MISRA standards compliance features"
help
Causes the source code to build in "MISRA" mode, which
disallows some otherwise-permitted features of the C
standard for safety reasons. Specifically variable length
arrays are not permitted (and gcc will enforce this).
endmenu
menu "Compatibility"
config COMPAT_INCLUDES
bool "Suppress warnings when using header shims"
default y
help
Suppress any warnings from the pre-processor when including
deprecated header files.
endmenu
source "usb/Kconfig"
#
# The following are for Kconfig files for default values only.
# These should be parsed at the end.
#
# Board defaults should be parsed before SoC defaults
# because board usually overrides them.
#
source "boards/*/Kconfig.defconfig"
source "arch/*/soc/*/Kconfig.defconfig"

280
MAINTAINERS Normal file
View File

@@ -0,0 +1,280 @@
Originally from the Linux Kernel.
# Licensed under the terms of the GNU GPL License version 2
Descriptions of section entries:
P: Person (obsolete)
M: Mail patches to: FullName <address@domain>
R: Designated reviewer: FullName <address@domain>
These reviewers should be CCed on patches.
L: Mailing list that is relevant to this area
W: Web-page with status/info
Q: Patchwork web based patch tracking system site
T: SCM tree type and location.
Type is one of: git, hg, quilt, stgit, topgit
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
Odd Fixes: It has a maintainer but they don't have time to do
much other than throw the odd patch in. See below..
Orphan: No current maintainer [but maybe you could take the
role as you write your new code].
Obsolete: Old code. Something tagged obsolete generally means
it has been replaced by a better system and you
should be using that.
F: Files and directories with wildcard patterns.
A trailing slash includes all files and subdirectory files.
F: drivers/net/ all files in and below drivers/net
F: drivers/net/* all files in drivers/net, but not below
F: */net/* all files in "any top level directory"/net
One pattern per line. Multiple F: lines acceptable.
N: Files and directories with regex patterns.
N: [^a-z]tegra all files whose path contains the word tegra
One pattern per line. Multiple N: lines acceptable.
scripts/get_maintainer.pl has different behavior for files that
match F: pattern and matches of N: patterns. By default,
get_maintainer will not look at git log history when an F: pattern
match occurs. When an N: match occurs, git log history is used
to also notify the people that have git commit signatures.
X: Files and directories that are NOT maintained, same rules as F:
Files exclusions are tested before file matches.
Can be useful for excluding a specific subdirectory, for instance:
F: net/
X: net/ipv6/
matches all files in and below net excluding net/ipv6/
K: Keyword perl extended regex pattern to match content in a
patch or file. For instance:
K: of_get_profile
matches patches or files that contain "of_get_profile"
K: \b(printk|pr_(info|err))\b
matches patches or files that contain one or more of the words
printk, pr_info or pr_err
One regex pattern per line. Multiple K: lines acceptable.
Note: For the hard of thinking, this list is meant to remain in alphabetical
order. If you could add yourselves to it in alphabetical order that would be
so much easier [Ed]
Maintainers List (try to look for most precise areas first)
-----------------------------------
ARC ARCHITECTURE
M: Ruud Derwig <Ruud.Derwig@synopsys.com>
M: Chuck Jordan <Chuck.Jordan@synopsys.com
M: Benjamin Walsh <benjamin.walsh@windriver.com>
S: Supported
F: arch/arc/
F: include/arch/arc/
ARM ARCHITECTURE
M: Maureen Helm <maureen.helm@nxp.com>
S: Supported
F: arch/arm/
F: include/arch/arm/
BLUETOOTH
M: Johan Hedberg <johan.hedberg@intel.com>
M: Luiz Augusto von Dentz <luiz.dentz@gmail.com>
M: Szymon Janc <szymon.janc@gmail.com>
S: Supported
W: https://www.zephyrproject.org/doc/subsystems/bluetooth/bluetooth.html
F: net/bluetooth/
F: include/bluetooth/
F: drivers/bluetooth/
F: samples/bluetooth/
F: tests/bluetooth/
DOCUMENTATION
M: Kinder, David <david.b.kinder@intel.com>
M: Perez-Gonzalez, Inaky <inaky.perez-gonzalez@intel.com>
S: Supported
F: doc/
FILE SYSTEM
M: Ramesh Thomas <ramesh.thomas@intel.com>
M: Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
S: Supported
F: ext/fs/
F: fs/
F: include/fs/
F: include/fs.h
F: samples/fs/
FLASH DRIVER
M: Baohong Liu <baohong.liu@intel.com>
M: Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
S: Supported
F: drivers/flash/
INTERRUPTS
M: Andrew Boie <andrew.p.boie@intel.com>
S: Supported
F: drivers/interrupt_controller/
F: arch/arc/core/
F: arch/arm/core/
F: arch/nios2/core/
F: arch/x86/core/
F: include/irq.h
F: include/arch/x86/arch.h
F: include/arch/arm/cortex_m/irq.h
F: include/arch/nios2/arch.h
F: include/arch/arc/arch.h
F: include/arch/arc/v2/irq.h
F: include/drivers/loapic.h
F: include/drivers/ioapic.h
F: include/drivers/mvic.h
KERNEL CORE
M: Benjamin Walsh <benjamin.walsh@windriver.com>
M: Allan Stephens <allan.stephens@windriver.com>
S: Supported
F: kernel/
F: include/nanokernel.h
F: include/microkernel.h
F: include/microkernel/
F: include/misc/
F: include/toolchain/
F: include/atomic.h
F: include/cache.h
F: include/init.h
F: include/irq.h
F: include/irq_offload.h
F: include/kernel_version.h
F: include/linker-defs.h
F: include/linker-tool-gcc.h
F: include/linker-tool.h
F: include/section_tags.h
F: include/sections.h
F: include/shared_irq.h
F: include/sw_isr_table.h
F: include/sys_clock.h
F: include/sys_io.h
F: include/toolchain.h
F: include/zephyr.h
KNOWN ISSUES
M: Anas Nashif <anas.nashif@intel.com>
M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
M: Javier B Perez <javier.b.perez.hernandez@intel.com>
F: .known-issues/
MAINTAINERS
M: Javier B Perez <javier.b.perez.hernandez@intel.com>
M: Anas Nashif <anas.nashif@intel.com>
M: Perez-Gonzalez, Inaky <inaky.perez-gonzalez@intel.com>
S: Supported
F: MAINTAINERS
MBEDTLS
M: Sergio Rodriguez <sergio.sf.rodriguez@intel.com>
M: Jithu Joseph <jithu.joseph@intel.com>
M: Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
S: Supported
F: ext/lib/crypto/mbedtls/
F: samples/net/mbedtls_sslclient/
F: tests/crypto/test_mbedtls/
NETWORKING
M: Jukka Rissanen <jukka.rissanen@linux.intel.com>
S: Supported
W: https://www.zephyrproject.org/doc/subsystems/networking/networking.html
F: net/ip/
F: include/net/
F: samples/net/
F: tests/net/
NETWORK APPLICATIONS
M: Flavio Santes <flavio.santes@intel.com>
S: Supported
F: samples/net/dns_client/
F: samples/net/nats_clients/
F: samples/net/paho_mqtt_clients/
NETWORK BUFFERS
M: Johan Hedberg <johan.hedberg@intel.com>
M: Jukka Rissanen <jukka.rissanen@linux.intel.com>
S: Supported
W: https://www.zephyrproject.org/doc/subsystems/networking/buffers.html
F: net/buf.c
F: include/net/buf.h
F: tests/net/buf/
NIOS II
M: Andrew Boie <andrew.p.boie@intel.com>
S: Supported
F: arch/nios2/
F: include/arch/nios2/
F: drivers/serial/uart_altera_jtag.c
F: drivers/timer/altera_avalon_timer.c
F: tests/kernel/test_intmath/
NORDIC MDK
M: Carles Cufi <carles.cufi@nordicsemi.no>
S: Supported
F: ext/hal/nordic/mdk/
POWER MANAGEMENT
M: Ramesh Thomas <ramesh.thomas@intel.com>
M: Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
S: Supported
F: arch/x86/core/crt0.S
F: include/device.h
F: include/init.h
F: include/power.h
F: kernel/microkernel/k_idle.c
F: kernel/nanokernel/device.c
F: samples/power/
QMSI
M: Anas Nashif <anas.nashif@intel.com>
S: Supported
F: ext/hal/qmsi/
QMSI DRIVERS
M: Sergio Rodriguez <sergio.sf.rodriguez@intel.com>
M: Baohong Liu <baohong.liu@intel.com>
M: Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
S: Supported
F: drivers/*/*qmsi*
F: drivers/*/*/*qmsi*
SANITYCHECK
M: Andrew Boie <andrew.p.boie@intel.com>
S: Supported
F: scripts/sanitycheck
F: scripts/expr_parser.py
F: scripts/sanity_chk/
SENSOR DRIVERS
M: Bogdan Davidoaia <bogdan.m.davidoaia@intel.com>
M: Laurentiu Palcu <laurentiu.palcu@intel.com>
M: Murtaza Alexandru <alexandru.murtaza@intel.com>
M: Vlad Dogaru <vlad.dogaru@intel.com>
S: Supported
W: https://www.zephyrproject.org/doc/subsystems/sensor.html
F: include/sensor.h
F: drivers/sensor/
F: samples/sensor/
TINYCRYPT
M: Constanza Heath <constanza.m.heath@intel.com>
M: Flavio Santes <flavio.santes@intel.com>
S: Supported
F: ext/lib/crypto/tinycrypt/
F: tests/crypto/
X86 ARCH
M: Benjamin Walsh <benjamin.walsh@windriver.com>
M: Allan Stephens <allan.stephens@windriver.com>
S: Supported
F: arch/x86/
F: include/arch/x86/
THE REST
M: Anas Nashif <anas.nashif@intel.com>
L: devel@lists.zephyrproject.com
T: git https://gerrit.zephyrproject.org/r/a/zephyr
S: Buried alive in reporters
F: *
F: */

1289
Makefile

File diff suppressed because it is too large Load Diff

140
Makefile.inc Normal file
View File

@@ -0,0 +1,140 @@
# vim: filetype=make
#
ARCH ?= x86
UNAME := $(shell uname)
ifeq (MINGW, $(findstring MINGW, $(UNAME)))
DQUOTE = '
# '
PROJECT_BASE ?= $(shell sh -c "pwd -W")
else
DQUOTE = "
# "
PROJECT_BASE ?= $(CURDIR)
endif
O ?= $(PROJECT_BASE)/outdir
# Turn O into an absolute path; we call the main Kbuild with $(MAKE) -C
# which changes the working directory, relative paths don't work right.
# Need to create the directory first to make realpath happy
ifneq ($(MAKECMDGOALS),help)
$(shell mkdir -p $(O))
override O := $(realpath $(O))
endif
export ARCH MDEF_FILE QEMU_EXTRA_FLAGS PROJECT_BASE
KERNEL_TYPE ?= micro
override CONF_FILE := $(strip $(subst $(DQUOTE),,$(CONF_FILE)))
ifdef BOARD
KBUILD_DEFCONFIG_PATH=$(wildcard $(ZEPHYR_BASE)/boards/*/$(BOARD)_defconfig)
ifeq ($(KBUILD_DEFCONFIG_PATH),)
$(error Board $(BOARD) not found!)
endif
else
$(error BOARD is not defined!)
endif
SOURCE_DIR ?= $(PROJECT_BASE)/src/
override SOURCE_DIR := $(abspath $(SOURCE_DIR))
override SOURCE_DIR := $(subst \,/,$(SOURCE_DIR))
override SOURCE_DIR_PARENT := $(patsubst %, %/.., $(SOURCE_DIR))
override SOURCE_DIR_PARENT := $(abspath $(SOURCE_DIR_PARENT))
override SOURCE_DIR_PARENT := $(subst \,/,$(SOURCE_DIR_PARENT))
export SOURCE_DIR SOURCE_DIR_PARENT
ifeq ("$(origin V)", "command line")
KBUILD_VERBOSE = $(V)
endif
ifndef KBUILD_VERBOSE
KBUILD_VERBOSE = 0
endif
ifeq ($(KBUILD_VERBOSE),1)
Q =
S =
else
Q = @
S = -s
endif
export CFLAGS
zephyrmake = +$(MAKE) -C $(ZEPHYR_BASE) O=$(1) \
PROJECT=$(PROJECT_BASE) SOURCE_DIR=$(DQUOTE)$(SOURCE_DIR)$(DQUOTE) $(2)
BOARDCONFIG = $(O)/.board_$(BOARD)
DOTCONFIG = $(O)/.config
all: $(DOTCONFIG)
$(Q)$(call zephyrmake,$(O),$@)
ifeq ($(findstring qemu_,$(BOARD)),)
qemu:
@echo "Emulation not available for this platform"
qemugdb: qemu
else
qemu: $(DOTCONFIG)
$(Q)$(call zephyrmake,$(O),$@)
qemugdb: $(DOTCONFIG)
$(Q)$(call zephyrmake,$(O),$@)
endif
debug: $(DOTCONFIG)
$(Q)$(call zephyrmake,$(O),$@)
flash: $(DOTCONFIG)
$(Q)$(call zephyrmake,$(O),$@)
ifeq ($(MAKECMDGOALS),debugserver)
-include $(ZEPHYR_BASE)/boards/$(BOARD)/Makefile.board
-include $(ZEPHYR_BASE)/scripts/Makefile.toolchain.$(ZEPHYR_GCC_VARIANT)
BOARD_NAME = $(BOARD)
export BOARD_NAME
endif
debugserver: FORCE
$(Q)$(CONFIG_SHELL) $(ZEPHYR_BASE)/scripts/support/$(FLASH_SCRIPT) debugserver
initconfig: $(DOTCONFIG)
$(BOARDCONFIG):
@rm -f $(O)/.board_*
@touch $@
ram_report: initconfig
$(Q)$(call zephyrmake,$(O),$@)
rom_report: initconfig
$(Q)$(call zephyrmake,$(O),$@)
menuconfig: initconfig
$(Q)$(call zephyrmake,$(O),$@)
help:
$(Q)$(MAKE) -s -C $(ZEPHYR_BASE) $@
# Catch all
%:
$(Q)$(call zephyrmake,$(O),$@)
KERNEL_CONFIG = $(ZEPHYR_BASE)/kernel/configs/$(KERNEL_TYPE).config
$(DOTCONFIG): $(BOARDCONFIG) $(KBUILD_DEFCONFIG_PATH) $(CONF_FILE)
$(Q)$(CONFIG_SHELL) $(ZEPHYR_BASE)/scripts/kconfig/merge_config.sh \
-q -m -O $(O) $(KBUILD_DEFCONFIG_PATH) $(KERNEL_CONFIG) $(CONF_FILE)
$(Q)$(MAKE) $(S) -C $(ZEPHYR_BASE) O=$(O) PROJECT=$(PROJECT_BASE) oldnoconfig
pristine:
$(Q)rm -rf $(O)
PHONY += FORCE initconfig
FORCE:
.PHONY: $(PHONY)

View File

@@ -1,87 +0,0 @@
.. raw:: html
<a href="https://www.zephyrproject.org">
<p align="center">
<img src="doc/images/Zephyr-Project.png">
</p>
</a>
<a href="https://bestpractices.coreinfrastructure.org/projects/74"><img
src="https://bestpractices.coreinfrastructure.org/projects/74/badge"></a>
<img
src="https://api.shippable.com/projects/58ffb2b8baa5e307002e1d79/badge?branch=master">
The Zephyr Project is a scalable real-time operating system (RTOS) supporting
multiple hardware architectures, optimized for resource constrained devices,
and built with security in mind.
The Zephyr OS is based on a small-footprint kernel designed for use on
resource-constrained systems: from simple embedded environmental sensors and
LED wearables to sophisticated smart watches and IoT wireless gateways.
The Zephyr kernel supports multiple architectures, including ARM Cortex-M,
Intel x86, ARC, Nios II, Tensilica Xtensa, and RISC-V, and a large number of
`supported boards`_.
.. below included in doc/introduction/introduction.rst
.. start_include_here
Getting Started
***************
Welcome to Zephyr! See the `Introduction to Zephyr`_ for a high-level overview,
and the documentation's `Getting Started Guide`_ to start developing.
Community Support
*****************
Community support is provided via mailing lists and Slack; see the Resources
below for details.
.. _project-resources:
Resources
*********
Here's a quick summary of resources to help you find your way around:
* **Help**: `Asking for Help Tips`_
* **Documentation**: http://docs.zephyrproject.org (`Getting Started Guide`_)
* **Source Code**: https://github.com/zephyrproject-rtos/zephyr is the main
repository; https://elixir.bootlin.com/zephyr/latest/source contains a
searchable index
* **Releases**: https://zephyrproject.org/developers/#downloads
* **Samples and example code**: see `Sample and Demo Code Examples`_
* **Mailing Lists**: users@lists.zephyrproject.org and
devel@lists.zephyrproject.org are the main user and developer mailing lists,
respectively. You can join the developer's list and search its archives at
`Zephyr Development mailing list`_. The other `Zephyr mailing list
subgroups`_ have their own archives and sign-up pages.
* **Nightly CI Build Status**: https://lists.zephyrproject.org/g/builds
The builds@lists.zephyrproject.org mailing list archives the CI
(shippable) nightly build results.
* **Chat**: Zephyr's Slack workspace is https://zephyrproject.slack.com. Use
this `Slack Invite`_ to register.
* **Contributing**: see the `Contribution Guide`_
* **Wiki**: `Zephyr GitHub wiki`_
* **Issues**: https://github.com/zephyrproject-rtos/zephyr/issues
* **Security Issues**: Email vulnerabilities@zephyrproject.org to report
security issues; also see our `Security`_ documentation. Security issues are
tracked separately at https://zephyrprojectsec.atlassian.net.
* **Zephyr Project Website**: https://zephyrproject.org
.. _Slack Invite: https://tinyurl.com/y5glwylp
.. _supported boards: http://docs.zephyrproject.org/latest/boards/index.html
.. _Zephyr Documentation: http://docs.zephyrproject.org
.. _Introduction to Zephyr: http://docs.zephyrproject.org/latest/introduction/index.html
.. _Getting Started Guide: http://docs.zephyrproject.org/latest/getting_started/index.html
.. _Contribution Guide: http://docs.zephyrproject.org/latest/contribute/index.html
.. _Zephyr GitHub wiki: https://github.com/zephyrproject-rtos/zephyr/wiki
.. _Zephyr Development mailing list: https://lists.zephyrproject.org/g/devel
.. _Zephyr mailing list subgroups: https://lists.zephyrproject.org/g/main/subgroups
.. _Sample and Demo Code Examples: http://docs.zephyrproject.org/latest/samples/index.html
.. _Security: http://docs.zephyrproject.org/latest/security/index.html
.. _Asking for Help Tips: https://docs.zephyrproject.org/latest/guides/getting-help.html

View File

@@ -1,5 +0,0 @@
VERSION_MAJOR = 2
VERSION_MINOR = 1
PATCHLEVEL = 0
VERSION_TWEAK = 0
EXTRAVERSION =

View File

@@ -1,11 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
add_definitions(-D__ZEPHYR_SUPERVISOR__)
include_directories(
${ZEPHYR_BASE}/kernel/include
${ZEPHYR_BASE}/arch/${ARCH}/include
)
add_subdirectory(common)
add_subdirectory(${ARCH_DIR}/${ARCH} arch/${ARCH})

View File

@@ -1,502 +1,64 @@
# General architecture configuration options
# Kconfig - general architecture configuration options
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
# Copyright (c) 2015 Intel Corporation
# Copyright (c) 2016 Cadence Design Systems, Inc.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Include these first so that any properties (e.g. defaults) below can be
# overridden (by defining symbols in multiple locations)
# Note: $ARCH might be a glob pattern
source "$(ARCH_DIR)/$(ARCH)/Kconfig"
choice ARCH_CHOICE
choice
prompt "Architecture"
default X86
config ARC
bool "ARC architecture"
select HAS_DTS
config ARM
bool "ARM architecture"
select HAS_DTS
select ATOMIC_OPERATIONS_BUILTIN
config X86
bool "x86 architecture"
select NANOKERNEL_TICKLESS_IDLE_SUPPORTED
select ATOMIC_OPERATIONS_BUILTIN
select HAS_DTS
config NIOS2
bool "Nios II Gen 2 architecture"
select ATOMIC_OPERATIONS_C
select HAS_DTS
config RISCV
bool "RISCV architecture"
select HAS_DTS
config XTENSA
bool "Xtensa architecture"
select HAS_DTS
select USE_SWITCH
select USE_SWITCH_SUPPORTED
config ARCH_POSIX
bool "POSIX (native) architecture"
select ATOMIC_OPERATIONS_BUILTIN
select ARCH_HAS_CUSTOM_SWAP_TO_MAIN
select ARCH_HAS_CUSTOM_BUSY_WAIT
select ARCH_HAS_THREAD_ABORT
select NATIVE_APPLICATION
select HAS_COVERAGE_SUPPORT
endchoice
menu "General Architecture Options"
module = ARCH
module-str = arch
source "subsys/logging/Kconfig.template.log_config"
module = MPU
module-str = mpu
source "subsys/logging/Kconfig.template.log_config"
config BIG_ENDIAN
bool
help
This option tells the build system that the target system is big-endian.
Little-endian architecture is the default and should leave this option
unselected. This option is selected by arch/$ARCH/Kconfig,
soc/**/Kconfig, or boards/**/Kconfig and the user should generally avoid
modifying it. The option is used to select linker script OUTPUT_FORMAT
and command line option for gen_isr_tables.py.
config 64BIT
bool
help
This option tells the build system that the target system is
using a 64-bit address space, meaning that pointer and long types
are 64 bits wide. This option is selected by arch/$ARCH/Kconfig,
soc/**/Kconfig, or boards/**/Kconfig and the user should generally
avoid modifying it.
if ARC || ARM || NIOS2 || X86
# Workaround for not being able to have commas in macro arguments
DT_CHOSEN_Z_SRAM := zephyr,sram
config SRAM_SIZE
int "SRAM Size in kB"
default $(dt_chosen_reg_size_int,$(DT_CHOSEN_Z_SRAM),0,K)
help
This option specifies the size of the SRAM in kB. It is normally set by
the board's defconfig file and the user should generally avoid modifying
it via the menu configuration.
config SRAM_BASE_ADDRESS
hex "SRAM Base Address"
default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_SRAM))
help
This option specifies the base address of the SRAM on the board. It is
normally set by the board's defconfig file and the user should generally
avoid modifying it via the menu configuration.
# Workaround for not being able to have commas in macro arguments
DT_CHOSEN_Z_FLASH := zephyr,flash
config FLASH_SIZE
int "Flash Size in kB"
default $(dt_chosen_reg_size_int,$(DT_CHOSEN_Z_FLASH),0,K) if (XIP && ARM) || !ARM
help
This option specifies the size of the flash in kB. It is normally set by
the board's defconfig file and the user should generally avoid modifying
it via the menu configuration.
config FLASH_BASE_ADDRESS
hex "Flash Base Address"
default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_FLASH)) if (XIP && ARM) || !ARM
help
This option specifies the base address of the flash on the board. It is
normally set by the board's defconfig file and the user should generally
avoid modifying it via the menu configuration.
endif # ARM || ARC || NIOS2 || X86
if ARCH_HAS_TRUSTED_EXECUTION
config TRUSTED_EXECUTION_SECURE
bool "Trusted Execution: Secure firmware image"
help
Select this option to enable building a Secure firmware
image for a platform that supports Trusted Execution. A
Secure firmware image will execute in Secure state. It may
allow the CPU to execute in Non-Secure (Normal) state.
Therefore, a Secure firmware image shall be able to
configure security attributions of CPU resources (memory
areas, peripherals, interrupts, etc.) as well as to handle
faults, related to security violations. It may optionally
allow certain functions to be called from the Non-Secure
(Normal) domain.
config TRUSTED_EXECUTION_NONSECURE
depends on !TRUSTED_EXECUTION_SECURE
bool "Trusted Execution: Non-Secure firmware image"
help
Select this option to enable building a Non-Secure
firmware image for a platform that supports Trusted
Execution. A Non-Secure firmware image will execute
in Non-Secure (Normal) state. Therefore, it shall not
access CPU resources (memory areas, peripherals,
interrupts etc.) belonging to the Secure domain.
endif # ARCH_HAS_TRUSTED_EXECUTION
config HW_STACK_PROTECTION
bool "Hardware Stack Protection"
depends on ARCH_HAS_STACK_PROTECTION
help
Select this option to enable hardware-based platform features to
catch stack overflows when the system is running in privileged
mode. If CONFIG_USERSPACE is not enabled, the system is always
running in privileged mode.
Note that this does not necessarily prevent corruption and assertions
about the overall system state when a fault is triggered cannot be
made.
config USERSPACE
bool "User mode threads"
depends on ARCH_HAS_USERSPACE
help
When enabled, threads may be created or dropped down to user mode,
which has significantly restricted permissions and must interact
with the kernel via system calls. See Zephyr documentation for more
details about this feature.
If a user thread overflows its stack, this will be caught and the
kernel itself will be shielded from harm. Enabling this option
may or may not catch stack overflows when the system is in
privileged mode or handling a system call; to ensure these are always
caught, enable CONFIG_HW_STACK_PROTECTION.
config PRIVILEGED_STACK_SIZE
int "Size of privileged stack"
default 1024
depends on ARCH_HAS_USERSPACE
help
This option sets the privileged stack region size that will be used
in addition to the user mode thread stack. During normal execution,
this region will be inaccessible from user mode. During system calls,
this region will be utilized by the system call.
config PRIVILEGED_STACK_TEXT_AREA
int "Privileged stacks text area"
default 512 if COVERAGE_GCOV
default 256
depends on ARCH_HAS_USERSPACE
help
Stack text area size for privileged stacks.
config KOBJECT_TEXT_AREA
int "Size if kobject text area"
default 512 if COVERAGE_GCOV
default 512 if NO_OPTIMIZATIONS
default 256
depends on ARCH_HAS_USERSPACE
help
Size of kernel object text area. Used in linker script.
config STACK_GROWS_UP
bool "Stack grows towards higher memory addresses"
help
Select this option if the architecture has upward growing thread
stacks. This is not common.
config MAX_THREAD_BYTES
int "Bytes to use when tracking object thread permissions"
default 2
depends on USERSPACE
help
Every kernel object will have an associated bitfield to store
thread permissions for that object. This controls the size of the
bitfield (in bytes) and imposes a limit on how many threads can
be created in the system.
config DYNAMIC_OBJECTS
bool "Allow kernel objects to be allocated at runtime"
depends on USERSPACE
help
Enabling this option allows for kernel objects to be requested from
the calling thread's resource pool, at a slight cost in performance
due to the supplemental run-time tables required to validate such
objects.
Objects allocated in this way can be freed with a supervisor-only
API call, or when the number of references to that object drops to
zero.
if ARCH_HAS_NOCACHE_MEMORY_SUPPORT
config NOCACHE_MEMORY
bool "Support for uncached memory"
help
Add a "nocache" read-write memory section that is configured to
not be cached. This memory section can be used to perform DMA
transfers when cache coherence issues are not optimal or can not
be solved using cache maintenance operations.
endif # ARCH_HAS_NOCACHE_MEMORY_SUPPORT
menu "Interrupt Configuration"
#
# Interrupt related configs
#
config DYNAMIC_INTERRUPTS
bool "Enable installation of IRQs at runtime"
help
Enable installation of interrupts at runtime, which will move some
interrupt-related data structures to RAM instead of ROM, and
on some architectures increase code size.
config GEN_ISR_TABLES
bool "Use generated IRQ tables"
help
This option controls whether a platform uses the gen_isr_tables
script to generate its interrupt tables. This mechanism will create
an appropriate hardware vector table and/or software IRQ table.
config GEN_IRQ_VECTOR_TABLE
bool "Generate an interrupt vector table"
default y
depends on GEN_ISR_TABLES
help
This option controls whether a platform using gen_isr_tables
needs an interrupt vector table created. Only disable this if the
platform does not use a vector table at all, or requires the vector
table to be in a format that is not an array of function pointers
indexed by IRQ line. In the latter case, the vector table must be
supplied by the application or architecture code.
config GEN_SW_ISR_TABLE
bool "Generate a software ISR table"
default y
depends on GEN_ISR_TABLES
help
This option controls whether a platform using gen_isr_tables
needs a software ISR table table created. This is an array of struct
_isr_table_entry containing the interrupt service routine and supplied
parameter.
config ARCH_SW_ISR_TABLE_ALIGN
int "Alignment size of a software ISR table"
default 0
depends on GEN_SW_ISR_TABLE
help
This option controls alignment size of generated
_sw_isr_table. Some architecture needs a software ISR table
to be aligned to architecture specific size. The default
size is 0 for no alignment.
config GEN_IRQ_START_VECTOR
int
default 0
depends on GEN_ISR_TABLES
help
On some architectures, part of the vector table may be reserved for
system exceptions and is declared separately from the tables
created by gen_isr_tables.py. When creating these tables, this value
will be subtracted from CONFIG_NUM_IRQS to properly size them.
This is a hidden option which needs to be set per architecture and
left alone.
config IRQ_OFFLOAD
bool "Enable IRQ offload"
depends on TEST
help
Enable irq_offload() API which allows functions to be synchronously
run in interrupt context. Only useful for test cases that need
to validate the correctness of kernel objects in IRQ context.
endmenu # Interrupt configuration
endmenu
#
# Architecture Capabilities
#
config ARCH_HAS_TRUSTED_EXECUTION
bool
config ARCH_HAS_STACK_PROTECTION
bool
config ARCH_HAS_USERSPACE
bool
config ARCH_HAS_EXECUTABLE_PAGE_BIT
bool
config ARCH_HAS_NOCACHE_MEMORY_SUPPORT
bool
config ARCH_HAS_RAMFUNC_SUPPORT
bool
config ARCH_HAS_NESTED_EXCEPTION_DETECTION
bool
#
# Other architecture related options
#
config ARCH_HAS_THREAD_ABORT
bool
#
# Hidden PM feature configs which are to be selected by
# individual SoC.
#
config HAS_SYS_POWER_STATE_SLEEP_1
config SYS_POWER_LOW_POWER_STATE_SUPPORTED
# Hidden
bool
default n
help
This option signifies that the target supports the SYS_POWER_STATE_SLEEP_1
configuration option.
This option signifies that the target supports the SYS_POWER_LOW_POWER_STATE
configuration option.
config HAS_SYS_POWER_STATE_SLEEP_2
config SYS_POWER_DEEP_SLEEP_SUPPORTED
# Hidden
bool
default n
help
This option signifies that the target supports the SYS_POWER_STATE_SLEEP_2
configuration option.
config HAS_SYS_POWER_STATE_SLEEP_3
# Hidden
bool
help
This option signifies that the target supports the SYS_POWER_STATE_SLEEP_3
configuration option.
config HAS_SYS_POWER_STATE_DEEP_SLEEP_1
# Hidden
bool
help
This option signifies that the target supports the SYS_POWER_STATE_DEEP_SLEEP_1
configuration option.
config HAS_SYS_POWER_STATE_DEEP_SLEEP_2
# Hidden
bool
help
This option signifies that the target supports the SYS_POWER_STATE_DEEP_SLEEP_2
configuration option.
config HAS_SYS_POWER_STATE_DEEP_SLEEP_3
# Hidden
bool
help
This option signifies that the target supports the SYS_POWER_STATE_DEEP_SLEEP_3
configuration option.
config BOOTLOADER_CONTEXT_RESTORE_SUPPORTED
# Hidden
bool
help
This option signifies that the target has options of bootloaders
that support context restore upon resume from deep sleep
# End hidden CPU family configs
#
config CPU_HAS_TEE
bool
help
This option is enabled when the CPU has support for Trusted
Execution Environment (e.g. when it has a security attribution
unit).
config CPU_HAS_FPU
bool
help
This option is enabled when the CPU has hardware floating point
unit.
config CPU_HAS_MPU
bool
help
This option is enabled when the CPU has a Memory Protection Unit (MPU).
config MEMORY_PROTECTION
bool
help
This option is enabled when Memory Protection features are supported.
Memory protection support is currently available on ARC, ARM, and x86
architectures.
config MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
bool
help
This option is enabled when the MPU requires a power of two alignment
and size for MPU regions.
config MPU_REQUIRES_NON_OVERLAPPING_REGIONS
bool
help
This option is enabled when the MPU requires the active (i.e. enabled)
MPU regions to be non-overlapping with each other.
config MPU_GAP_FILLING
bool "Force MPU to be filling in background memory regions"
depends on MPU_REQUIRES_NON_OVERLAPPING_REGIONS
default y if !USERSPACE
help
This Kconfig option instructs the MPU driver to enforce
a full kernel SRAM partitioning, when it programs the
dynamic MPU regions (user thread stack, PRIV stack guard
and application memory domains) during context-switch. We
allow this to be a configurable option, in order to be able
to switch the option off and have an increased number of MPU
regions available for application memory domain programming.
Notes:
An increased number of MPU regions should only be required,
when building with USERSPACE support. As a result, when we
build without USERSPACE support, gap filling should always
be required.
When the option is switched off, access to memory areas not
covered by explicit MPU regions is restricted to privileged
code on an ARCH-specific basis. Refer to ARCH-specific
documentation for more information on how this option is
used.
menuconfig FLOAT
bool "Floating point"
depends on CPU_HAS_FPU
depends on ARM || X86 || ARC
help
This option allows threads to use the floating point registers.
By default, only a single thread may use the registers.
Disabling this option means that any thread that uses a
floating point register will get a fatal exception.
if FLOAT
config FP_SHARING
bool "Floating point register sharing"
help
This option allows multiple threads to use the floating point
registers.
endif # FLOAT
This option signifies that the target supports the SYS_POWER_DEEP_SLEEP
configuration option.
#
# End hidden PM feature configs
#
@@ -504,34 +66,39 @@ endif # FLOAT
config ARCH
string
help
System architecture string.
System architecture string.
config SOC
string
help
SoC name which can be found under soc/<arch>/<soc name>.
This option holds the directory name used by the build system to locate
the correct linker and header files for the SoC.
SoC name which can be found under arch/<arch>/soc/<soc name>.
This option holds the directory name used by the build system to locate
the correct linker and header files for the SoC. This option will go away
once all SoCs are using family/series structure.
config SOC_SERIES
string
help
SoC series name which can be found under soc/<arch>/<family>/<series>.
This option holds the directory name used by the build system to locate
the correct linker and header files.
SoC series name which can be found under arch/<arch>/soc/<family>/<series>.
This option holds the directory name used by the build system to locate
the correct linker and header files.
config SOC_FAMILY
string
help
SoC family name which can be found under soc/<arch>/<family>.
This option holds the directory name used by the build system to locate
the correct linker and header files.
SoC family name which can be found under arch/<arch>/soc/<family>.
This option holds the directory name used by the build system to locate
the correct linker and header files.
config BOARD
string
help
This option holds the name of the board and is used to locate the files
related to the board in the source tree (under boards/).
The Board is the first location where we search for a linker.ld file,
if not found we look for the linker file in
soc/<arch>/<family>/<series>
This option holds the name of the board and is used to located the files
related to the board in the source tree (under boards/).
The Board is the first location where we search for a linker.ld file,
if not found we look for the linker file in
arch/<arch>/soc/<family>/<series>
source "arch/*/Kconfig"
source "boards/Kconfig"

1
arch/Makefile Normal file
View File

@@ -0,0 +1 @@
obj-y += $(ARCH)/

View File

@@ -1,15 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# Enable debug support in mdb
# Dwarf version 2 can be recognized by mdb
# The default dwarf version in gdb is not recognized by mdb
zephyr_cc_option(-g3 -gdwarf-2)
# Without this (poorly named) option, compiler may generate undefined
# references to abort().
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63691
zephyr_cc_option(-fno-delete-null-pointer-checks)
zephyr_cc_option_ifdef(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS -munaligned-access)
add_subdirectory(core)

6
arch/arc/Kbuild Normal file
View File

@@ -0,0 +1,6 @@
subdir-ccflags-y +=-I$(srctree)/include/drivers
subdir-ccflags-y +=-I$(srctree)/drivers
subdir-asflags-y += $(subdir-ccflags-y)
obj-y += soc/$(SOC_PATH)/
obj-y += core/

View File

@@ -1,7 +1,28 @@
# ARC options
# ARC EM4 options
#
# Copyright (c) 2014 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
choice
prompt "ARC SoC Selection"
depends on ARC
source "arch/arc/soc/*/Kconfig.soc"
endchoice
# Copyright (c) 2014, 2019 Wind River Systems, Inc.
# SPDX-License-Identifier: Apache-2.0
menu "ARC Options"
depends on ARC
@@ -9,70 +30,53 @@ menu "ARC Options"
config ARCH
default "arc"
choice
prompt "ARC core family"
default CPU_ARCEM
config ARCH_DEFCONFIG
string
default "arch/arc/defconfig"
config CPU_ARCEM
bool "ARC EM cores"
menu "ARC EM4 processor options"
config CPU_ARCEM4
bool
default y
select CPU_ARCV2
select ATOMIC_OPERATIONS_C
help
This option signifies the use of an ARC EM CPU
This option signifies the use of an ARC EM4 CPU
config CPU_ARCHS
bool "ARC HS cores"
select CPU_ARCV2
select ATOMIC_OPERATIONS_BUILTIN
help
This option signifies the use of an ARC HS CPU
endchoice
config CPU_EM4
bool
help
If y, the SoC uses an ARC EM4 CPU
config CPU_EM4_DMIPS
bool
help
If y, the SoC uses an ARC EM4 DMIPS CPU
config CPU_EM4_FPUS
bool
help
If y, the SoC uses an ARC EM4 DMIPS CPU with the single-precision
floating-point extension
config CPU_EM4_FPUDA
bool
help
If y, the SoC uses an ARC EM4 DMIPS CPU with single-precision
floating-point and double assist instructions
config CPU_EM6
bool
help
If y, the SoC uses an ARC EM6 CPU
config FP_FPU_DA
bool
endmenu
menu "ARCv2 Family Options"
config CPU_ARCV2
config CPU_ARCV2
bool
select ARCH_HAS_STACK_PROTECTION if ARC_HAS_STACK_CHECKING || ARC_MPU
select ARCH_HAS_USERSPACE if ARC_MPU
select USE_SWITCH
select USE_SWITCH_SUPPORTED
default y
select NANOKERNEL_TICKLESS_IDLE_SUPPORTED
help
This option signifies the use of a CPU of the ARCv2 family.
config NUM_IRQ_PRIO_LEVELS
int "Number of supported interrupt priority levels"
config NSIM
prompt "Running on the MetaWare nSIM simulator"
bool
default n
help
For running on nSIM simulator.
a) Uses non-XIP to run in RAM.
b) Linked at address 0x4000 with 0x4000 of RAM so that it works with
a pc_size of 16 (default).
config DATA_ENDIANNESS_LITTLE
bool
default y
help
This is driven by the processor implementation, since it is fixed in
hardware. The BSP should set this value to 'n' if the data is
implemented as big endian.
config NUM_IRQ_PRIO_LEVELS
int
prompt "Number of supported interrupt priority levels"
range 1 16
help
Interrupt priorities available will be 0 to NUM_IRQ_PRIO_LEVELS-1.
@@ -80,8 +84,25 @@ config NUM_IRQ_PRIO_LEVELS
The BSP must provide a valid default for proper operation.
config NUM_IRQS
int "Upper limit of interrupt numbers/IDs used"
config NUM_REGULAR_IRQ_PRIO_LEVELS
int
prompt "Number of supported regular interrupt priority levels"
range 1 16
help
This represents the number of Regular Interrupt priorities, which
does NOT include the Fast (FIRQ) priority.
The Regular Interrupt priorities available will be
(NUM_IRQ_PRIO_LEVELS - NUM_REGULAR_IRQ_PRIO_LEVELS) to
(NUM_REGULAR_IRQ_PRIO_LEVELS - 1).
The maximum value is NUM_IRQ_PRIO_LEVELS.
The BSP must provide a valid default for proper operation.
config NUM_IRQS
int
prompt "Upper limit of interrupt numbers/IDs used"
range 17 256
help
Interrupts available will be 0 to NUM_IRQS-1.
@@ -91,247 +112,181 @@ config NUM_IRQS
The BSP must provide a valid default. This drives the size of the
vector table.
config RGF_NUM_BANKS
int "Number of General Purpose Register Banks"
config FIRQ_STACK_SIZE
int
prompt "Size of stack for FIRQs (in bytes)"
depends on CPU_ARCV2
range 1 2
default 2
help
The ARC CPU can be configured to have more than one register
bank. If fast interrupts are supported (FIRQ), the 2nd
register bank, in the set, will be used by FIRQ interrupts.
If fast interrupts are supported but there is only 1
register bank, the fast interrupt handler must save
and restore general purpose registers.
config ARC_FIRQ
bool "FIRQ enable"
default y
help
Fast interrupts are supported (FIRQ). If FIRQ enabled, for interrupts
with highest priority, status32 and pc will be saved in aux regs,
other regs will be saved according to the number of register bank;
If FIRQ is disabled, the handle of interrupts with highest priority
will be same with other interrupts.
config ARC_FIRQ_STACK
bool "Enable separate firq stack"
depends on ARC_FIRQ && RGF_NUM_BANKS > 1
default n
help
Use separate stack for FIRQ handing. When the fast irq is also a direct
irq, this will get the minimal interrupt latency.
config ARC_FIRQ_STACK_SIZE
int "FIRQ stack size"
depends on ARC_FIRQ_STACK
default 1024
help
The size of firq stack.
FIRQs and regular IRQs have different stacks so that a FIRQ can start
running without doing stack switching in software.
config ARC_HAS_STACK_CHECKING
bool "ARC has STACK_CHECKING"
default y
config ARC_STACK_CHECKING
bool "Enable Stack Checking"
depends on CPU_ARCV2
default n
help
ARC is configured with STACK_CHECKING which is a mechanism for
checking stack accesses and raising an exception when a stack
overflow or underflow is detected.
ARCV2 has a special feature allowing to check stack overflows. This
enables code that allows using this debug feature
config ARC_CONNECT
bool "ARC has ARC connect"
select SCHED_IPI_SUPPORTED
help
ARC is configured with ARC CONNECT which is a hardware for connecting
multi cores.
config ARC_STACK_CHECKING
bool
help
Use ARC STACK_CHECKING to do stack protection
config ARC_STACK_PROTECTION
bool
default y if HW_STACK_PROTECTION
select ARC_STACK_CHECKING if ARC_HAS_STACK_CHECKING
select MPU_STACK_GUARD if (!ARC_STACK_CHECKING && ARC_MPU)
select THREAD_STACK_INFO
help
This option enables either:
- The ARC stack checking, or
- the MPU-based stack guard
to cause a system fatal error
if the bounds of the current process stack are overflowed.
The two stack guard options are mutually exclusive. The
selection of the ARC stack checking is
prioritized over the MPU-based stack guard.
config ARC_USE_UNALIGNED_MEM_ACCESS
bool "Enable unaligned access in HW"
default n if CPU_ARCEM
default y if CPU_ARCHS
depends on (CPU_ARCEM && !ARC_HAS_SECURE) || CPU_ARCHS
help
ARC EM cores w/o secure shield 2+2 mode support might be configured
to support unaligned memory access which is then disabled by default.
Enable unaligned access in hardware and make software to use it.
config FAULT_DUMP
int "Fault dump level"
config FAULT_DUMP
int
prompt "Fault dump level"
default 2
range 0 2
help
Different levels for display information when a fault occurs.
Different levels for display information when a fault occurs.
2: The default. Display specific and verbose information. Consumes
2: The default. Display specific and verbose information. Consumes
the most memory (long strings).
1: Display general and short information. Consumes less memory
1: Display general and short information. Consumes less memory
(short strings).
0: Off.
0: Off.
config XIP
default y if !UART_NSIM
config IRQ_OFFLOAD
bool "Enable IRQ offload"
default n
help
Enable irq_offload() API which allows functions to be synchronously
run in interrupt context. Uses one entry in the IDT. Mainly useful
for test cases.
config GEN_ISR_TABLES
config XIP
default n if NSIM
default y
config GEN_IRQ_START_VECTOR
default 16
config HARVARD
bool "Harvard Architecture"
prompt "Harvard Architecture"
bool
default n
help
The ARC CPU can be configured to have two busses;
one for instruction fetching and another that serves as a data bus.
config CODE_DENSITY
bool "Code Density Option"
config ICCM_SIZE
int "ICCM Size in kB"
help
Enable code density option to get better code density
This option specifies the size of the ICCM in kB. It is normally set by
the board's defconfig file and the user should generally avoid modifying
it via the menu configuration.
config ARC_HAS_ACCL_REGS
bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
default y if FLOAT
config ICCM_BASE_ADDRESS
hex "ICCM Base Address"
help
Depending on the configuration, CPU can contain accumulator reg-pair
(also referred to as r58:r59). These can also be used by gcc as GPR so
kernel needs to save/restore per process
This option specifies the base address of the ICCM on the board. It is
normally set by the board's defconfig file and the user should generally
avoid modifying it via the menu configuration.
config ARC_HAS_SECURE
bool "ARC has SecureShield"
select CPU_HAS_TEE
select ARCH_HAS_TRUSTED_EXECUTION
config DCCM_SIZE
int "DCCM Size in kB"
help
This option is enabled when ARC core supports secure mode
This option specifies the size of the DCCM in kB. It is normally set by
the board's defconfig file and the user should generally avoid modifying
it via the menu configuration.
config SJLI_TABLE_SIZE
int "SJLI table size"
depends on ARC_SECURE_FIRMWARE
default 8
config DCCM_BASE_ADDRESS
hex "DCCM Base Address"
help
The size of sjli (Secure Jump and Link Indexed) table. The
code in normal mode call secure services in secure mode through
sjli instruction.
This option specifies the base address of the DCCM on the board. It is
normally set by the board's defconfig file and the user should generally
avoid modifying it via the menu configuration.
config ARC_SECURE_FIRMWARE
prompt "Generate Secure Firmware"
config SRAM_SIZE
int "SRAM Size in kB"
help
This option specifies the size of the SRAM in kB. It is normally set by
the board's defconfig file and the user should generally avoid modifying
it via the menu configuration.
config SRAM_BASE_ADDRESS
hex "SRAM Base Address"
help
This option specifies the base address of the SRAM on the board. It is
normally set by the board's defconfig file and the user should generally
avoid modifying it via the menu configuration.
config FLASH_SIZE
int "Flash Size in kB"
help
This option specifies the size of the flash in kB. It is normally set by
the board's defconfig file and the user should generally avoid modifying
it via the menu configuration.
config FLASH_BASE_ADDRESS
hex "Flash Base Address"
help
This option specifies the base address of the flash on the board. It is
normally set by the board's defconfig file and the user should generally
avoid modifying it via the menu configuration.
config SW_ISR_TABLE
bool
depends on ARC_HAS_SECURE
default y if TRUSTED_EXECUTION_SECURE
prompt "Enable software interrupt handler table"
default y
help
This option indicates that we are building a Zephyr image that
is intended to execute in secure mode. The option is only
applicable to ARC processors that implement the SecureShield.
Enable an interrupt handler table implemented in software. This
table, unlike ISRs connected directly in the vector table, allow
a parameter to be passed to the interrupt handlers. Also, invoking
the exeception/interrupt exit stub is automatically done.
This option enables Zephyr to include code that executes in
secure mode, as well as to exclude code that is designed to
execute only in normal mode.
This has to be enabled for dynamically connecting interrupt handlers
at runtime (SW_ISR_TABLE_DYNAMIC).
Code executing in secure mode has access to both the secure
and normal resources of the ARC processors.
config ARC_NORMAL_FIRMWARE
prompt "Generate Normal Firmware"
config SW_ISR_TABLE_DYNAMIC
bool
depends on !ARC_SECURE_FIRMWARE
depends on ARC_HAS_SECURE
default y if TRUSTED_EXECUTION_NONSECURE
help
This option indicates that we are building a Zephyr image that
is intended to execute in normal mode. Execution of this
image is triggered by secure firmware that executes in secure
mode. The option is only applicable to ARC processors that
implement the SecureShield.
This option enables Zephyr to include code that executes in
normal mode only, as well as to exclude code that is
designed to execute only in secure mode.
Code executing in normal mode has no access to secure
resources of the ARC processors, and, therefore, it shall avoid
accessing them.
menu "ARC MPU Options"
depends on CPU_HAS_MPU
config ARC_MPU_ENABLE
bool "Enable MPU"
select ARC_MPU
help
Enable MPU
source "arch/arc/core/mpu/Kconfig"
endmenu
config CACHE_LINE_SIZE_DETECT
bool "Detect d-cache line size at runtime"
help
This option enables querying the d-cache build register for finding
the d-cache line size at the expense of taking more memory and code
and a slightly increased boot time.
If the CPU's d-cache line size is known in advance, disable this
option and manually enter the value for CACHE_LINE_SIZE.
config CACHE_LINE_SIZE
int "Cache line size" if !CACHE_LINE_SIZE_DETECT
default 32
help
Size in bytes of a CPU d-cache line.
Detect automatically at runtime by selecting CACHE_LINE_SIZE_DETECT.
config ARCH_CACHE_FLUSH_DETECT
bool
config CACHE_FLUSHING
bool "Enable d-cache flushing mechanism"
help
This links in the sys_cache_flush() function, which provides a
way to flush multiple lines of the d-cache.
If the d-cache is present, set this to y.
If the d-cache is NOT present, set this to n.
config ARC_EXCEPTION_STACK_SIZE
int "ARC exception handling stack size"
default 768
help
Size in bytes of exception handling stack which is at the top of
interrupt stack to get smaller memory footprint because exception
is not frequent. To reduce the impact on interrupt handling,
especially nested interrupt, it cannot be too large.
endmenu
config ARC_EXCEPTION_DEBUG
bool "Unhandled exception debugging information"
prompt "Allow installing interrupt handlers at runtime"
depends on SW_ISR_TABLE
default n
depends on PRINTK || LOG
help
Print human-readable information about exception vectors, cause codes,
and parameters, at a cost of code/data size for the human-readable
strings.
This option enables irq_connect_dynamic(). It moves the ISR table to
SRAM so that it is writable. This has the side-effect of removing
write-protection on the ISR table.
config IRQ_VECTOR_TABLE_CUSTOM
bool
prompt "Projects provide a custom static IRQ part of vector table"
depends on !SW_ISR_TABLE
default n
help
Projects, not the BSP, provide the IRQ part of the vector table.
This is the table of interrupt handlers with the best potential
performance, but is the less flexible.
The ISRs are installed directly in the vector table, thus are
directly called by the CPU when an interrupt is taken. This adds
the least overhead when handling an interrupt.
Downsides:
- ISRs cannot have a parameter
- ISRs cannot be connected at runtime
- ISRs must notify the kernel manually by invoking _ExcExit() when
then are about to return.
config IRQ_VECTOR_TABLE_BSP
bool
# omit prompt to signify a "hidden" option
depends on SW_ISR_TABLE || !IRQ_VECTOR_TABLE_CUSTOM
default y
help
Not user-selectable, helps build system logic.
config ARCH_HAS_TASK_ABORT
bool
# omit prompt to signify a "hidden" option
default n
config ARCH_HAS_NANO_FIBER_ABORT
bool
# omit prompt to signify a "hidden" option
default n
endmenu
source "arch/arc/soc/*/Kconfig"
endmenu

15
arch/arc/Makefile Normal file
View File

@@ -0,0 +1,15 @@
cflags-y += $(call cc-option,-ffunction-sections,) $(call cc-option,-fdata-sections,)
cflags-$(CONFIG_ARC_STACK_CHECKING) = $(call cc-option,-fomit-frame-pointer)
cflags-$(CONFIG_LTO) = $(call cc-option,-flto,)
include $(srctree)/arch/$(ARCH)/soc/$(SOC_PATH)/Makefile
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CXXFLAGS += $(cflags-y)
soc-cxxflags ?= $(soc-cflags)
soc-aflags ?= $(soc-cflags)
KBUILD_CFLAGS += $(soc-cflags)
KBUILD_CXXFLAGS += $(soc-cxxflags)
KBUILD_AFLAGS += $(soc-aflags)

View File

@@ -1,32 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
zephyr_library_sources(
thread.c
thread_entry_wrapper.S
cpu_idle.S
fatal.c
fault.c
fault_s.S
irq_manage.c
timestamp.c
isr_wrapper.S
regular_irq.S
switch.S
prep_c.c
reset.S
vector_table.c
)
zephyr_library_sources_ifdef(CONFIG_CACHE_FLUSHING cache.c)
zephyr_library_sources_ifdef(CONFIG_ARC_FIRQ fast_irq.S)
zephyr_library_sources_if_kconfig(irq_offload.c)
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
zephyr_library_sources_ifdef(CONFIG_ARC_CONNECT arc_connect.c)
zephyr_library_sources_ifdef(CONFIG_ARC_CONNECT arc_smp.c)
add_subdirectory_ifdef(CONFIG_ARC_CORE_MPU mpu)
add_subdirectory_ifdef(CONFIG_ARC_SECURE_FIRMWARE secureshield)

21
arch/arc/core/Makefile Normal file
View File

@@ -0,0 +1,21 @@
ccflags-y += -I$(srctree)/kernel/nanokernel/include
ccflags-y +=-I$(srctree)/arch/$(ARCH)/include
ccflags-y += -I$(srctree)/kernel/microkernel/include
obj-y += thread.o thread_entry_wrapper.o \
cpu_idle.o fast_irq.o fatal.o fault.o \
fault_s.o irq_manage.o \
isr_wrapper.o regular_irq.o swap_macros.h swap.o \
sys_fatal_error_handler.o
obj-y += prep_c.o \
reset.o \
vector_table.o
obj-$(CONFIG_IRQ_OFFLOAD) += irq_offload.o
# Some ARC cores like the EM4 lack the atomic LLOCK/SCOND and
# can't use these.
obj-$(CONFIG_ATOMIC_OPERATIONS_CUSTOM) += atomic.o
obj-$(CONFIG_IRQ_VECTOR_TABLE_BSP) += irq_vector_table.o
obj-$(CONFIG_SW_ISR_TABLE) += sw_isr_table.o

View File

@@ -1,449 +0,0 @@
/*
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARCv2 ARC CONNECT driver
*
*/
#include <kernel.h>
#include <arch/cpu.h>
#include <spinlock.h>
static struct k_spinlock arc_connect_spinlock;
#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
__key = k_spin_lock(lck); \
!__i.key; \
k_spin_unlock(lck, __key), __i.key = 1)
/* Generate an inter-core interrupt to the target core */
void z_arc_connect_ici_generate(u32_t core)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_GENERATE_IRQ, core);
}
}
/* Acknowledge the inter-core interrupt raised by core */
void z_arc_connect_ici_ack(u32_t core)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_GENERATE_ACK, core);
}
}
/* Read inter-core interrupt status */
u32_t z_arc_connect_ici_read_status(u32_t core)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_READ_STATUS, core);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Check the source of inter-core interrupt */
u32_t z_arc_connect_ici_check_src(void)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_CHECK_SOURCE, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Clear the inter-core interrupt */
void z_arc_connect_ici_clear(void)
{
u32_t cpu, c;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_CHECK_SOURCE, 0);
cpu = z_arc_connect_cmd_readback(); /* 1,2,4,8... */
/*
* In rare case, multiple concurrent ICIs sent to same target can
* possibly be coalesced by MCIP into 1 asserted IRQ, so @cpu can be
* "vectored" (multiple bits sets) as opposed to typical single bit
*/
while (cpu) {
c = find_lsb_set(cpu) - 1;
z_arc_connect_cmd(
ARC_CONNECT_CMD_INTRPT_GENERATE_ACK, c);
cpu &= ~(1U << c);
}
}
}
/* Reset the cores in core_mask */
void z_arc_connect_debug_reset(u32_t core_mask)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_RESET,
0, core_mask);
}
}
/* Halt the cores in core_mask */
void z_arc_connect_debug_halt(u32_t core_mask)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_HALT,
0, core_mask);
}
}
/* Run the cores in core_mask */
void z_arc_connect_debug_run(u32_t core_mask)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_RUN,
0, core_mask);
}
}
/* Set core mask */
void z_arc_connect_debug_mask_set(u32_t core_mask, u32_t mask)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_SET_MASK,
mask, core_mask);
}
}
/* Read core mask */
u32_t z_arc_connect_debug_mask_read(u32_t core_mask)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_READ_MASK,
0, core_mask);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/*
* Select cores that should be halted if the core issuing the command is halted
*/
void z_arc_connect_debug_select_set(u32_t core_mask)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_SET_SELECT,
0, core_mask);
}
}
/* Read the select value */
u32_t z_arc_connect_debug_select_read(void)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_SELECT, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the status, halt or run of all cores in the system */
u32_t z_arc_connect_debug_en_read(void)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_EN, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the last command sent */
u32_t z_arc_connect_debug_cmd_read(void)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_CMD, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the value of internal MCD_CORE register */
u32_t z_arc_connect_debug_core_read(void)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_CORE, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Clear global free running counter */
void z_arc_connect_gfrc_clear(void)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_CLEAR, 0);
}
}
/* Read total 64 bits of global free running counter */
u64_t z_arc_connect_gfrc_read(void)
{
u32_t low;
u32_t high;
u32_t key;
/*
* each core has its own arc connect interface, i.e.,
* CMD/READBACK. So several concurrent commands to ARC
* connect are of if they are trying to access different
* sub-components. For GFRC, HW allows simultaneously accessing to
* counters. So an irq lock is enough.
*/
key = arch_irq_lock();
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_LO, 0);
low = z_arc_connect_cmd_readback();
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HI, 0);
high = z_arc_connect_cmd_readback();
arch_irq_unlock(key);
return (((u64_t)high) << 32) | low;
}
/* Enable global free running counter */
void z_arc_connect_gfrc_enable(void)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_ENABLE, 0);
}
}
/* Disable global free running counter */
void z_arc_connect_gfrc_disable(void)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_DISABLE, 0);
}
}
/* Disable global free running counter */
void z_arc_connect_gfrc_core_set(u32_t core_mask)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_GFRC_SET_CORE,
0, core_mask);
}
}
/* Set the relevant cores to halt global free running counter */
u32_t z_arc_connect_gfrc_halt_read(void)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HALT, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the internal CORE register */
u32_t z_arc_connect_gfrc_core_read(void)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_CORE, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Enable interrupt distribute unit */
void z_arc_connect_idu_enable(void)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_ENABLE, 0);
}
}
/* Disable interrupt distribute unit */
void z_arc_connect_idu_disable(void)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_DISABLE, 0);
}
}
/* Read enable status of interrupt distribute unit */
u32_t z_arc_connect_idu_read_enable(void)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_ENABLE, 0);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/*
* Set the triggering mode and distribution mode for the specified common
* interrupt
*/
void z_arc_connect_idu_set_mode(u32_t irq_num,
u16_t trigger_mode, u16_t distri_mode)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_MODE,
irq_num, (distri_mode | (trigger_mode << 4)));
}
}
/* Read the internal MODE register of the specified common interrupt */
u32_t z_arc_connect_idu_read_mode(u32_t irq_num)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_MODE, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/*
* Set the target cores to receive the specified common interrupt
* when it is triggered
*/
void z_arc_connect_idu_set_dest(u32_t irq_num, u32_t core_mask)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_DEST,
irq_num, core_mask);
}
}
/* Read the internal DEST register of the specified common interrupt */
u32_t z_arc_connect_idu_read_dest(u32_t irq_num)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_DEST, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Assert the specified common interrupt */
void z_arc_connect_idu_gen_cirq(u32_t irq_num)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_GEN_CIRQ, irq_num);
}
}
/* Acknowledge the specified common interrupt */
void z_arc_connect_idu_ack_cirq(u32_t irq_num)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_ACK_CIRQ, irq_num);
}
}
/* Read the internal STATUS register of the specified common interrupt */
u32_t z_arc_connect_idu_check_status(u32_t irq_num)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_STATUS, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Read the internal SOURCE register of the specified common interrupt */
u32_t z_arc_connect_idu_check_source(u32_t irq_num)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_SOURCE, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/* Mask or unmask the specified common interrupt */
void z_arc_connect_idu_set_mask(u32_t irq_num, u32_t mask)
{
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_MASK,
irq_num, mask);
}
}
/* Read the internal MASK register of the specified common interrupt */
u32_t z_arc_connect_idu_read_mask(u32_t irq_num)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_MASK, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}
/*
* Check if it is the first-acknowledging core to the common interrupt
* if IDU is programmed in the first-acknowledged mode
*/
u32_t z_arc_connect_idu_check_first(u32_t irq_num)
{
u32_t ret = 0;
LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_FIRST, irq_num);
ret = z_arc_connect_cmd_readback();
}
return ret;
}

View File

@@ -1,185 +0,0 @@
/*
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief codes required for ARC multicore and Zephyr smp support
*
*/
#include <device.h>
#include <kernel.h>
#include <kernel_structs.h>
#include <ksched.h>
#include <soc.h>
#include <init.h>
#ifndef IRQ_ICI
#define IRQ_ICI 19
#endif
#define ARCV2_ICI_IRQ_PRIORITY 1
volatile struct {
void (*fn)(int, void*);
void *arg;
} arc_cpu_init[CONFIG_MP_NUM_CPUS];
/*
* arc_cpu_wake_flag is used to sync up master core and slave cores
* Slave core will spin for arc_cpu_wake_flag until master core sets
* it to the core id of slave core. Then, slave core clears it to notify
* master core that it's waken
*
*/
volatile u32_t arc_cpu_wake_flag;
volatile char *arc_cpu_sp;
/*
* _curr_cpu is used to record the struct of _cpu_t of each cpu.
* for efficient usage in assembly
*/
volatile _cpu_t *_curr_cpu[CONFIG_MP_NUM_CPUS];
/* Called from Zephyr initialization */
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
void (*fn)(int, void *), void *arg)
{
_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
arc_cpu_init[cpu_num].fn = fn;
arc_cpu_init[cpu_num].arg = arg;
/* set the initial sp of target sp through arc_cpu_sp
* arc_cpu_wake_flag will protect arc_cpu_sp that
* only one slave cpu can read it per time
*/
arc_cpu_sp = Z_THREAD_STACK_BUFFER(stack) + sz;
arc_cpu_wake_flag = cpu_num;
/* wait slave cpu to start */
while (arc_cpu_wake_flag != 0) {
;
}
}
/* the C entry of slave cores */
void z_arc_slave_start(int cpu_num)
{
void (*fn)(int, void*);
#ifdef CONFIG_SMP
z_icache_setup();
z_irq_setup();
z_arc_connect_ici_clear();
z_irq_priority_set(IRQ_ICI, ARCV2_ICI_IRQ_PRIORITY, 0);
irq_enable(IRQ_ICI);
#endif
/* call the function set by arch_start_cpu */
fn = arc_cpu_init[cpu_num].fn;
fn(cpu_num, arc_cpu_init[cpu_num].arg);
}
#ifdef CONFIG_SMP
static void sched_ipi_handler(void *unused)
{
ARG_UNUSED(unused);
z_arc_connect_ici_clear();
z_sched_ipi();
}
/**
* @brief Check whether need to do thread switch in isr context
*
* @details u64_t is used to let compiler use (r0, r1) as return register.
* use register r0 and register r1 as return value, r0 has
* new thread, r1 has old thread. If r0 == 0, it means no thread switch.
*/
u64_t z_arc_smp_switch_in_isr(void)
{
u64_t ret = 0;
u32_t new_thread;
u32_t old_thread;
old_thread = (u32_t)_current;
new_thread = (u32_t)z_get_next_ready_thread();
if (new_thread != old_thread) {
#ifdef CONFIG_TIMESLICING
z_reset_time_slice();
#endif
_current_cpu->swap_ok = 0;
((struct k_thread *)new_thread)->base.cpu =
arch_curr_cpu()->id;
_current = (struct k_thread *) new_thread;
ret = new_thread | ((u64_t)(old_thread) << 32);
}
return ret;
}
/* arch implementation of sched_ipi */
void arch_sched_ipi(void)
{
u32_t i;
/* broadcast sched_ipi request to other cores
* if the target is current core, hardware will ignore it
*/
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
z_arc_connect_ici_generate(i);
}
}
static int arc_smp_init(struct device *dev)
{
ARG_UNUSED(dev);
struct arc_connect_bcr bcr;
/* necessary master core init */
_kernel.cpus[0].id = 0;
_kernel.cpus[0].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack)
+ CONFIG_ISR_STACK_SIZE;
_curr_cpu[0] = &(_kernel.cpus[0]);
bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
if (bcr.ipi) {
/* register ici interrupt, just need master core to register once */
z_arc_connect_ici_clear();
IRQ_CONNECT(IRQ_ICI, ARCV2_ICI_IRQ_PRIORITY,
sched_ipi_handler, NULL, 0);
irq_enable(IRQ_ICI);
} else {
__ASSERT(0,
"ARC connect has no inter-core interrupt\n");
return -ENODEV;
}
if (bcr.gfrc) {
/* global free running count init */
z_arc_connect_gfrc_enable();
/* when all cores halt, gfrc halt */
z_arc_connect_gfrc_core_set((1 << CONFIG_MP_NUM_CPUS) - 1);
z_arc_connect_gfrc_clear();
} else {
__ASSERT(0,
"ARC connect has no global free running counter\n");
return -ENODEV;
}
return 0;
}
SYS_INIT(arc_smp_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif

434
arch/arc/core/atomic.S Normal file
View File

@@ -0,0 +1,434 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief ARC atomic operations library
*
* This library provides routines to perform a number of atomic operations
* on a memory location: add, subtract, increment, decrement, bitwise OR,
* bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*
* This requires the processor to support LLOCK and SCOND instructions,
* where they are not supported on ARC EM family processors.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
/* exports */
GTEXT(atomic_set)
GTEXT(atomic_get)
GTEXT(atomic_add)
GTEXT(atomic_nand)
GTEXT(atomic_and)
GTEXT(atomic_or)
GTEXT(atomic_xor)
GTEXT(atomic_clear)
GTEXT(atomic_dec)
GTEXT(atomic_inc)
GTEXT(atomic_sub)
GTEXT(atomic_cas)
.section .TEXT._Atomic, "ax"
.balign 2
/**
*
* @brief Atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
mov_s r1, 0
/* fall through into atomic_set */
/**
*
* @brief Atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
ex r1, [r0] /* swap new value with old value */
j_s.d [blink]
mov_s r0, r1 /* return old value */
/**
*
* @brief Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* atomic_val_t atomic_get
* (
* atomic_t *target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
SECTION_FUNC(TEXT, atomic_get)
ld_s r0, [r0, 0]
j_s [blink]
/**
*
* @brief Atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
mov_s r1, 1
/* fall through into atomic_add */
/**
*
* @brief Atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
llock r2, [r0] /* load old value and mark exclusive access */
add_s r3, r1, r2
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_add /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/**
*
* @brief Atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_dec)
mov_s r1, 1
/* fall through into atomic_sub */
/**
*
* @brief Atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_dec_sub, atomic_sub)
llock r2, [r0] /* load old value and mark exclusive access */
sub r3, r2, r1
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_sub /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/**
*
* @brief Atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_nand)
llock r2, [r0] /* load old value and mark exclusive access */
and r3, r1, r2
not r3, r3
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_nand /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/**
*
* @brief Atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_and)
llock r2, [r0] /* load old value and mark exclusive access */
and r3, r1, r2
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_and /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/**
*
* @brief Atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_or)
llock r2, [r0] /* load old value and mark exclusive access */
or r3, r1, r2
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_or /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/**
*
* @brief Atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_xor)
llock r2, [r0] /* load old value and mark exclusive access */
xor r3, r1, r2
scond r3, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_xor /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, r2 /* return old value */
/**
*
* @brief Atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_cas)
llock r3, [r0] /* load old value and mark exclusive access */
cmp_s r1, r3
bne_s nanoAtomicCas_fail
scond r2, [r0] /* try to store new value */
/* STATUS32.Z = 1 if successful */
bne_s atomic_cas /* if store is not successful, retry */
j_s.d [blink]
mov_s r0, 1 /* return TRUE */
/* failed comparison */
nanoAtomicCas_fail:
scond r1, [r0] /* write old value to clear the access lock */
j_s.d [blink]
mov_s r0, 0 /* return FALSE */

View File

@@ -1,171 +0,0 @@
/* cache.c - d-cache support for ARC CPUs */
/*
* Copyright (c) 2016 Synopsys, Inc. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief d-cache manipulation
*
* This module contains functions for manipulation of the d-cache.
*/
#include <kernel.h>
#include <arch/cpu.h>
#include <sys/util.h>
#include <toolchain.h>
#include <cache.h>
#include <linker/linker-defs.h>
#include <arch/arc/v2/aux_regs.h>
#include <kernel_internal.h>
#include <sys/__assert.h>
#include <init.h>
#include <stdbool.h>
#if (CONFIG_CACHE_LINE_SIZE == 0) && !defined(CONFIG_CACHE_LINE_SIZE_DETECT)
#error Cannot use this implementation with a cache line size of 0
#endif
#if defined(CONFIG_CACHE_LINE_SIZE_DETECT)
#define DCACHE_LINE_SIZE sys_cache_line_size
#else
#define DCACHE_LINE_SIZE CONFIG_CACHE_LINE_SIZE
#endif
#define DC_CTRL_DC_ENABLE 0x0 /* enable d-cache */
#define DC_CTRL_DC_DISABLE 0x1 /* disable d-cache */
#define DC_CTRL_INVALID_ONLY 0x0 /* invalid d-cache only */
#define DC_CTRL_INVALID_FLUSH 0x40 /* invalid and flush d-cache */
#define DC_CTRL_ENABLE_FLUSH_LOCKED 0x80 /* locked d-cache can be flushed */
#define DC_CTRL_DISABLE_FLUSH_LOCKED 0x0 /* locked d-cache cannot be flushed */
#define DC_CTRL_FLUSH_STATUS 0x100/* flush status */
#define DC_CTRL_DIRECT_ACCESS 0x0 /* direct access mode */
#define DC_CTRL_INDIRECT_ACCESS 0x20 /* indirect access mode */
#define DC_CTRL_OP_SUCCEEDED 0x4 /* d-cache operation succeeded */
static bool dcache_available(void)
{
unsigned long val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
val &= 0xff; /* extract version */
return (val == 0) ? false : true;
}
static void dcache_dc_ctrl(u32_t dcache_en_mask)
{
if (dcache_available()) {
z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask);
}
}
static void dcache_enable(void)
{
dcache_dc_ctrl(DC_CTRL_DC_ENABLE);
}
/**
*
* @brief Flush multiple d-cache lines to memory
*
* No alignment is required for either <start_addr> or <size>, but since
* dcache_flush_mlines() iterates on the d-cache lines, a cache line
* alignment for both is optimal.
*
* The d-cache line size is specified either via the CONFIG_CACHE_LINE_SIZE
* kconfig option or it is detected at runtime.
*
* @param start_addr the pointer to start the multi-line flush
* @param size the number of bytes that are to be flushed
*
* @return N/A
*/
static void dcache_flush_mlines(u32_t start_addr, u32_t size)
{
u32_t end_addr;
unsigned int key;
if (!dcache_available() || (size == 0U)) {
return;
}
end_addr = start_addr + size - 1;
start_addr &= (u32_t)(~(DCACHE_LINE_SIZE - 1));
key = irq_lock(); /* --enter critical section-- */
do {
z_arc_v2_aux_reg_write(_ARC_V2_DC_FLDL, start_addr);
__asm__ volatile("nop_s");
__asm__ volatile("nop_s");
__asm__ volatile("nop_s");
/* wait for flush completion */
do {
if ((z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) &
DC_CTRL_FLUSH_STATUS) == 0) {
break;
}
} while (1);
start_addr += DCACHE_LINE_SIZE;
} while (start_addr <= end_addr);
irq_unlock(key); /* --exit critical section-- */
}
/**
*
* @brief Flush d-cache lines to main memory
*
* No alignment is required for either <virt> or <size>, but since
* sys_cache_flush() iterates on the d-cache lines, a d-cache line alignment for
* both is optimal.
*
* The d-cache line size is specified either via the CONFIG_CACHE_LINE_SIZE
* kconfig option or it is detected at runtime.
*
* @param start_addr the pointer to start the multi-line flush
* @param size the number of bytes that are to be flushed
*
* @return N/A
*/
void sys_cache_flush(vaddr_t start_addr, size_t size)
{
dcache_flush_mlines((u32_t)start_addr, (u32_t)size);
}
#if defined(CONFIG_CACHE_LINE_SIZE_DETECT)
size_t sys_cache_line_size;
static void init_dcache_line_size(void)
{
u32_t val;
val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
__ASSERT((val&0xff) != 0U, "d-cache is not present");
val = ((val>>16) & 0xf) + 1;
val *= 16U;
sys_cache_line_size = (size_t) val;
}
#endif
static int init_dcache(struct device *unused)
{
ARG_UNUSED(unused);
dcache_enable();
#if defined(CONFIG_CACHE_LINE_SIZE_DETECT)
init_dcache_line_size();
#endif
return 0;
}
SYS_INIT(init_dcache, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -11,20 +21,36 @@
* CPU power management routines.
*/
#include <kernel_structs.h>
#include <offsets_short.h>
#define _ASMLANGUAGE
#include <nano_private.h>
#include <offsets.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <sections.h>
#include <arch/cpu.h>
GTEXT(arch_cpu_idle)
GTEXT(arch_cpu_atomic_idle)
GDATA(z_arc_cpu_sleep_mode)
GTEXT(nano_cpu_idle)
GTEXT(nano_cpu_atomic_idle)
GDATA(nano_cpu_sleep_mode)
SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
.balign 4
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
GTEXT(_power_save_idle)
#endif
SECTION_VAR(BSS, nano_cpu_sleep_mode)
.word 0
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
.macro enter_tickless_idle
/* interrupts are locked when entering here */
push_s blink
jl _power_save_idle
pop_s blink
.endm
#else
#define enter_tickless_idle
#endif
/*
* @brief Put the CPU in low-power mode
*
@@ -33,29 +59,18 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
* void nanCpuIdle(void)
*/
SECTION_FUNC(TEXT, arch_cpu_idle)
SECTION_FUNC(TEXT, nano_cpu_idle)
#ifdef CONFIG_TRACING
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push_s blink
jl sys_trace_idle
jl _sys_k_event_logger_enter_sleep
pop_s blink
#endif
enter_tickless_idle
ld r1, [z_arc_cpu_sleep_mode]
ld r1, [nano_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
/*
* It's found that (in nsim_hs_smp), when cpu
* is sleeping, no response to inter-processor interrupt
* although it's pending and interrupts are enabled.
* here is a workround
*/
#if !defined(CONFIG_SOC_NSIM) && !defined(CONFIG_SMP)
sleep r1
#else
seti r1
_z_arc_idle_loop:
b _z_arc_idle_loop
#endif
j_s [blink]
nop
@@ -64,17 +79,19 @@ _z_arc_idle_loop:
*
* This function exits with interrupts restored to <key>.
*
* void arch_cpu_atomic_idle(unsigned int key)
* void nano_cpu_atomic_idle(unsigned int key)
*/
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
#ifdef CONFIG_TRACING
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push_s blink
jl sys_trace_idle
jl _sys_k_event_logger_enter_sleep
pop_s blink
#endif
ld r1, [z_arc_cpu_sleep_mode]
enter_tickless_idle
ld r1, [nano_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1
j_s.d [blink]

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -13,15 +23,21 @@
* See isr_wrapper.S for details.
*/
#include <kernel_structs.h>
#include <offsets_short.h>
#define _ASMLANGUAGE
#include <nano_private.h>
#include <offsets.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
#include <swap_macros.h>
#include "swap_macros.h"
GTEXT(_firq_enter)
GTEXT(_firq_exit)
GTEXT(_firq_stack_setup)
GDATA(_firq_stack)
SECTION_VAR(NOINIT, _firq_stack)
.space CONFIG_FIRQ_STACK_SIZE
/**
*
@@ -46,27 +62,20 @@ GTEXT(_firq_exit)
*/
SECTION_FUNC(TEXT, _firq_enter)
/*
* ATTENTION:
* If CONFIG_RGF_NUM_BANKS>1, firq uses a 2nd register bank so GPRs do
* not need to be saved.
* If CONFIG_RGF_NUM_BANKS==1, firq must use the stack to save registers.
* This has already been done by _isr_wrapper.
*/
/*
* ATTENTION:
* firq uses a 2nd register bank so GPRs do not need to be saved.
*/
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r2, [_ARC_V2_SEC_STAT]
bclr r2, r2, _ARC_V2_SEC_STAT_SSC_BIT
sflag r2
#else
/* disable stack checking */
lr r2, [_ARC_V2_STATUS32]
bclr r2, r2, _ARC_V2_STATUS32_SC_BIT
kflag r2
#endif
#endif
#if CONFIG_RGF_NUM_BANKS != 1
#ifndef CONFIG_FIRQ_NO_LPCC
/*
* Save LP_START/LP_COUNT/LP_END because called handler might use.
* Save these in callee saved registers to avoid using memory.
@@ -77,59 +86,8 @@ SECTION_FUNC(TEXT, _firq_enter)
lr r25, [_ARC_V2_LP_END]
#endif
/* check whether irq stack is used */
_check_and_inc_int_nest_counter r0, r1
bne.d firq_nest
mov_s r0, sp
_get_curr_cpu_irq_stack sp
#if CONFIG_RGF_NUM_BANKS != 1
b firq_nest_1
firq_nest:
/*
* because firq and rirq share the same interrupt stack,
* switch back to original register bank to get correct sp.
* to get better firq latency, an approach is to prepare
* separate interrupt stack for firq and do not do thread
* switch in firq.
*/
lr r1, [_ARC_V2_STATUS32]
and r1, r1, ~_ARC_V2_STATUS32_RB(7)
kflag r1
/* here use _ARC_V2_USER_SP and ilink to exchange sp
* save original value of _ARC_V2_USER_SP and ilink into
* the stack of interrupted context first, then restore them later
*/
push ilink
PUSHAX ilink, _ARC_V2_USER_SP
/* sp here is the sp of interrupted context */
sr sp, [_ARC_V2_USER_SP]
/* here, bank 0 sp must go back to the value before push and
* PUSHAX as we will switch to bank1, the pop and POPAX later will
* change bank1's sp, not bank0's sp
*/
add sp, sp, 8
/* switch back to banked reg, only ilink can be used */
lr ilink, [_ARC_V2_STATUS32]
or ilink, ilink, _ARC_V2_STATUS32_RB(1)
kflag ilink
lr sp, [_ARC_V2_USER_SP]
POPAX ilink, _ARC_V2_USER_SP
pop ilink
firq_nest_1:
#else
firq_nest:
#endif
push_s r0
j @_isr_demux
/**
*
* @brief Work to be done exiting a FIRQ
@@ -139,69 +97,50 @@ firq_nest:
SECTION_FUNC(TEXT, _firq_exit)
#if CONFIG_RGF_NUM_BANKS != 1
#ifndef CONFIG_FIRQ_NO_LPCC
/* restore lp_count, lp_start, lp_end from r23-r25 */
mov lp_count,r23
sr r24, [_ARC_V2_LP_START]
sr r25, [_ARC_V2_LP_END]
#endif
_dec_int_nest_counter r0, r1
_check_nest_int_by_irq_act r0, r1
mov_s r1, _nanokernel
ld_s r2, [r1, __tNANO_current_OFFSET]
jne _firq_no_reschedule
#if CONFIG_NUM_IRQ_PRIO_LEVELS > 1
/* check if we're a nested interrupt: if so, let the interrupted
* interrupt handle the reschedule */
#ifdef CONFIG_STACK_SENTINEL
bl z_check_stack_sentinel
#endif
lr r3, [_ARC_V2_AUX_IRQ_ACT]
#ifdef CONFIG_PREEMPT_ENABLED
#ifdef CONFIG_SMP
bl z_arc_smp_switch_in_isr
/* r0 points to new thread, r1 points to old thread */
brne r0, 0, _firq_reschedule
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/* Check if the current thread (in r2) is the cached thread */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
brne r0, r2, _firq_reschedule
#endif
/* fall to no rescheduling */
#endif /* CONFIG_PREEMPT_ENABLED */
.balign 4
_firq_no_reschedule:
pop sp
/*
* Keeping this code block close to those that use it allows using brxx
* instruction instead of a pair of cmp and bxx
/* the OS on ARCv2 always runs in kernel mode, so assume bit31 [U] in
* AUX_IRQ_ACT is always 0: if the contents of AUX_IRQ_ACT is not 1, it
* means that another bit is set so an interrupt was interrupted.
*/
#if CONFIG_RGF_NUM_BANKS == 1
_pop_irq_stack_frame
#endif
breq r3, 1, _check_if_current_is_the_task
rtie
#ifdef CONFIG_PREEMPT_ENABLED
#endif
.balign 4
_check_if_current_is_the_task:
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bnz _check_if_a_fiber_is_ready
rtie
.balign 4
_check_if_a_fiber_is_ready:
ld_s r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
brne r0, 0, _firq_reschedule
rtie
.balign 4
_firq_reschedule:
pop sp
#if CONFIG_RGF_NUM_BANKS != 1
#ifdef CONFIG_SMP
/*
* save r0, r1 in irq stack for a while, as they will be changed by register
* bank switch
*/
_get_curr_cpu_irq_stack r2
st r0, [r2, -4]
st r1, [r2, -8]
#endif
/*
* We know there is no interrupted interrupt of lower priority at this
* point, so when switching back to register bank 0, it will contain the
@@ -222,38 +161,29 @@ _firq_reschedule:
*/
lr r0, [_ARC_V2_STATUS32_P0]
st_s r0, [sp, ___isf_t_status32_OFFSET]
st_s r0, [sp, __tISF_status32_OFFSET]
st ilink, [sp, ___isf_t_pc_OFFSET] /* ilink into pc */
#ifdef CONFIG_SMP
/*
* load r0, r1 from irq stack
*/
_get_curr_cpu_irq_stack r2
ld r0, [r2, -4]
ld r1, [r2, -8]
#endif
#endif
st ilink, [sp, __tISF_pc_OFFSET] /* ilink into pc */
mov_s r1, _nanokernel
ld_s r2, [r1, __tNANO_current_OFFSET]
#ifdef CONFIG_SMP
mov_s r2, r1
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
#endif
_save_callee_saved_regs
st _CAUSE_FIRQ, [r2, _thread_offset_to_relinquish_cause]
st _CAUSE_FIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
#ifdef CONFIG_SMP
mov_s r2, r0
#else
ld_s r2, [r1, _kernel_offset_to_ready_q_cache]
st_s r2, [r1, _kernel_offset_to_current]
#endif
ld_s r2, [r1, __tNANO_fiber_OFFSET]
st_s r2, [r1, __tNANO_current_OFFSET]
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
#ifdef CONFIG_ARC_STACK_CHECKING
_load_stack_check_regs
/* Use stack top and down registers from restored context */
add r3, r2, __tTCS_NOFLOAT_SIZEOF
sr r3, [_ARC_V2_KSTACK_TOP]
ld_s r3, [r2, __tTCS_stack_top_OFFSET]
sr r3, [_ARC_V2_KSTACK_BASE]
#endif
/*
* _load_callee_saved_regs expects incoming thread in r2.
@@ -261,39 +191,38 @@ _firq_reschedule:
*/
_load_callee_saved_regs
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
mov_s r0, r2
bl configure_mpu_thread
pop_s r2
#endif
#if defined(CONFIG_USERSPACE)
/*
* see comments in regular_irq.S
*/
lr r0, [_ARC_V2_AUX_IRQ_ACT]
bclr r0, r0, 31
sr r0, [_ARC_V2_AUX_IRQ_ACT]
#endif
ld r3, [r2, _thread_offset_to_relinquish_cause]
ld_s r3, [r2, __tTCS_relinquish_cause_OFFSET]
breq r3, _CAUSE_RIRQ, _firq_return_from_rirq
nop_s
nop
breq r3, _CAUSE_FIRQ, _firq_return_from_firq
nop_s
nop
/* fall through */
.balign 4
_firq_return_from_coop:
ld_s r3, [r2, __tTCS_intlock_key_OFFSET]
st 0, [r2, __tTCS_intlock_key_OFFSET]
/* pc into ilink */
pop_s r0
mov_s ilink, r0
mov ilink, r0
pop_s r0 /* status32 into r0 */
/*
* There are only two interrupt lock states: locked and unlocked. When
* entering _Swap(), they are always locked, so the IE bit is unset in
* status32. If the incoming thread had them locked recursively, it
* means that the IE bit should stay unset. The only time the bit
* has to change is if they were not locked recursively.
*/
and.f r3, r3, (1 << 4)
or.nz r0, r0, _ARC_V2_STATUS32_IE
sr r0, [_ARC_V2_STATUS32_P0]
ld_s r0, [r2, __tTCS_return_value_OFFSET]
rtie
.balign 4
@@ -306,7 +235,37 @@ _firq_return_from_firq:
sr ilink, [_ARC_V2_STATUS32_P0]
ld ilink, [sp, -8] /* pc into ilink */
/* fall through to rtie instruction */
.balign 4
_firq_no_reschedule:
/* LP registers are already restored, just switch back to bank 0 */
rtie
#endif /* CONFIG_PREEMPT_ENABLED */
/**
*
* @brief Install the FIRQ stack in register bank 1
*
* @return N/A
*/
SECTION_FUNC(TEXT, _firq_stack_setup)
lr r0, [_ARC_V2_STATUS32]
and r0, r0, ~_ARC_V2_STATUS32_RB(7)
or r0, r0, _ARC_V2_STATUS32_RB(1)
kflag r0
mov sp, _firq_stack
add sp, sp, CONFIG_FIRQ_STACK_SIZE
/*
* We have to reload r0 here, because it is bank1 r0 which contains
* garbage, not bank0 r0 containing the previous value of status32.
*/
lr r0, [_ARC_V2_STATUS32]
and r0, r0, ~_ARC_V2_STATUS32_RB(7)
kflag r0
j_s [blink]

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -12,33 +22,74 @@
* ARCv2 CPUs.
*/
#include <kernel.h>
#include <offsets_short.h>
#include <nano_private.h>
#include <offsets.h>
#include <toolchain.h>
#include <arch/cpu.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os);
void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
const NANO_ESF _default_esf = {
0xdeaddead, /* placeholder */
};
/**
*
* @brief Nanokernel fatal error handler
*
* This routine is called when fatal error conditions are detected by software
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or use a pointer to the global default ESF <_default_esf>.
*
* @return This function does not return.
*/
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf)
{
if (reason == K_ERR_CPU_EXCEPTION) {
LOG_ERR("Faulting instruction address = 0x%lx",
z_arc_v2_aux_reg_read(_ARC_V2_ERET));
switch (reason) {
case _NANO_ERR_INVALID_TASK_EXIT:
PR_EXC("***** Invalid Exit Software Error! *****\n");
break;
#if defined(CONFIG_STACK_CANARIES)
case _NANO_ERR_STACK_CHK_FAIL:
PR_EXC("***** Stack Check Fail! *****\n");
break;
#endif
case _NANO_ERR_ALLOCATION_FAIL:
PR_EXC("**** Kernel Allocation Failure! ****\n");
break;
default:
PR_EXC("**** Unknown Fatal Error %d! ****\n", reason);
break;
}
PR_EXC("Current thread ID = 0x%x\n"
"Faulting instruction address = 0x%x\n",
sys_thread_self_get(),
_arc_v2_aux_reg_read(_ARC_V2_ERET));
z_fatal_error(reason, esf);
}
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
{
z_arc_fatal_error(K_ERR_KERNEL_OOPS, ssf_ptr);
CODE_UNREACHABLE;
}
FUNC_NORETURN void arch_system_halt(unsigned int reason)
{
ARG_UNUSED(reason);
__asm__("brk");
CODE_UNREACHABLE;
/*
* Now that the error has been reported, call the user implemented
* policy
* to respond to the error. The decisions as to what responses are
* appropriate to the various errors are something the customer must
* decide.
*/
_SysFatalErrorHandler(reason, pEsf);
for (;;)
;
}

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -12,423 +22,68 @@
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <inttypes.h>
#include <sections.h>
#include <kernel.h>
#include <kernel_internal.h>
#include <kernel_structs.h>
#include <exc_handle.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os);
#include <nanokernel.h>
#include <nano_private.h>
#ifdef CONFIG_USERSPACE
Z_EXC_DECLARE(z_arc_user_string_nlen);
static const struct z_exc_handle exceptions[] = {
Z_EXC_HANDLE(z_arc_user_string_nlen)
};
#endif
#if defined(CONFIG_MPU_STACK_GUARD)
#define IS_MPU_GUARD_VIOLATION(guard_start, fault_addr, stack_ptr) \
((fault_addr >= guard_start) && \
(fault_addr < (guard_start + STACK_GUARD_SIZE)) && \
(stack_ptr <= (guard_start + STACK_GUARD_SIZE)))
/**
* @brief Assess occurrence of current thread's stack corruption
*
* This function performs an assessment whether a memory fault (on a
* given memory address) is the result of stack memory corruption of
* the current thread.
*
* Thread stack corruption for supervisor threads or user threads in
* privilege mode (when User Space is supported) is reported upon an
* attempt to access the stack guard area (if MPU Stack Guard feature
* is supported). Additionally the current thread stack pointer
* must be pointing inside or below the guard area.
*
* Thread stack corruption for user threads in user mode is reported,
* if the current stack pointer is pointing below the start of the current
* thread's stack.
*
* Notes:
* - we assume a fully descending stack,
* - we assume a stacking error has occurred,
* - the function shall be called when handling MPU privilege violation
*
* If stack corruption is detected, the function returns the lowest
* allowed address where the Stack Pointer can safely point to, to
* prevent from errors when un-stacking the corrupted stack frame
* upon exception return.
*
* @param fault_addr memory address on which memory access violation
* has been reported.
* @param sp stack pointer when exception comes out
*
* @return The lowest allowed stack frame pointer, if error is a
* thread stack corruption, otherwise return 0.
*/
static u32_t z_check_thread_stack_fail(const u32_t fault_addr, u32_t sp)
{
const struct k_thread *thread = _current;
if (!thread) {
return 0;
}
#if defined(CONFIG_USERSPACE)
if (thread->arch.priv_stack_start) {
/* User thread */
if (z_arc_v2_aux_reg_read(_ARC_V2_ERSTATUS)
& _ARC_V2_STATUS32_U) {
/* Thread's user stack corruption */
#ifdef CONFIG_ARC_HAS_SECURE
sp = z_arc_v2_aux_reg_read(_ARC_V2_SEC_U_SP);
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
sp = z_arc_v2_aux_reg_read(_ARC_V2_USER_SP);
#endif
if (sp <= (u32_t)thread->stack_obj) {
return (u32_t)thread->stack_obj;
}
} else {
/* User thread in privilege mode */
if (IS_MPU_GUARD_VIOLATION(
thread->arch.priv_stack_start - STACK_GUARD_SIZE,
fault_addr, sp)) {
/* Thread's privilege stack corruption */
return thread->arch.priv_stack_start;
}
}
} else {
/* Supervisor thread */
if (IS_MPU_GUARD_VIOLATION((u32_t)thread->stack_obj,
fault_addr, sp)) {
/* Supervisor thread stack corruption */
return (u32_t)thread->stack_obj + STACK_GUARD_SIZE;
}
}
#else /* CONFIG_USERSPACE */
if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start,
fault_addr, sp)) {
/* Thread stack corruption */
return thread->stack_info.start + STACK_GUARD_SIZE;
}
#endif /* CONFIG_USERSPACE */
return 0;
}
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
#if (CONFIG_FAULT_DUMP > 0)
#define FAULT_DUMP(esf, fault) _FaultDump(esf, fault)
#else
#define FAULT_DUMP(esf, fault) \
do { \
(void) esf; \
(void) fault; \
} while ((0))
#endif
#ifdef CONFIG_ARC_EXCEPTION_DEBUG
/* For EV_ProtV, the numbering/semantics of the parameter are consistent across
* several codes, although not all combination will be reported.
#if (CONFIG_FAULT_DUMP > 0)
/*
* @brief Dump information regarding fault (FAULT_DUMP > 0)
*
* These codes and parameters do not have associated* names in
* the technical manual, just switch on the values in Table 6-5
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form).
*
* @return N/A
*/
static const char *get_protv_access_err(u32_t parameter)
void _FaultDump(const NANO_ESF *esf, int fault)
{
switch (parameter) {
case 0x1:
return "code protection scheme";
case 0x2:
return "stack checking scheme";
case 0x4:
return "MPU";
case 0x8:
return "MMU";
case 0x10:
return "NVM";
case 0x24:
return "Secure MPU";
case 0x44:
return "Secure MPU with SID mismatch";
default:
return "unknown";
}
}
ARG_UNUSED(esf);
#ifdef CONFIG_PRINTK
uint32_t exc_addr = _arc_v2_aux_reg_read(_ARC_V2_EFA);
uint32_t ecr = _arc_v2_aux_reg_read(_ARC_V2_ECR);
static void dump_protv_exception(u32_t cause, u32_t parameter)
{
switch (cause) {
case 0x0:
LOG_ERR("Instruction fetch violation (%s)",
get_protv_access_err(parameter));
break;
case 0x1:
LOG_ERR("Memory read protection violation (%s)",
get_protv_access_err(parameter));
break;
case 0x2:
LOG_ERR("Memory write protection violation (%s)",
get_protv_access_err(parameter));
break;
case 0x3:
LOG_ERR("Memory read-modify-write violation (%s)",
get_protv_access_err(parameter));
break;
case 0x10:
LOG_ERR("Normal vector table in secure memory");
break;
case 0x11:
LOG_ERR("NS handler code located in S memory");
break;
case 0x12:
LOG_ERR("NSC Table Range Violation");
break;
default:
LOG_ERR("unknown");
break;
}
PR_EXC("Exception vector: 0x%x, cause code: 0x%x, parameter 0x%x\n",
_ARC_V2_ECR_VECTOR(ecr),
_ARC_V2_ECR_CODE(ecr),
_ARC_V2_ECR_PARAMETER(ecr));
PR_EXC("Address 0x%x\n", exc_addr);
#endif
}
static void dump_machine_check_exception(u32_t cause, u32_t parameter)
{
switch (cause) {
case 0x0:
LOG_ERR("double fault");
break;
case 0x1:
LOG_ERR("overlapping TLB entries");
break;
case 0x2:
LOG_ERR("fatal TLB error");
break;
case 0x3:
LOG_ERR("fatal cache error");
break;
case 0x4:
LOG_ERR("internal memory error on instruction fetch");
break;
case 0x5:
LOG_ERR("internal memory error on data fetch");
break;
case 0x6:
LOG_ERR("illegal overlapping MPU entries");
if (parameter == 0x1) {
LOG_ERR(" - jump and branch target");
}
break;
case 0x10:
LOG_ERR("secure vector table not located in secure memory");
break;
case 0x11:
LOG_ERR("NSC jump table not located in secure memory");
break;
case 0x12:
LOG_ERR("secure handler code not located in secure memory");
break;
case 0x13:
LOG_ERR("NSC target address not located in secure memory");
break;
case 0x80:
LOG_ERR("uncorrectable ECC or parity error in vector memory");
break;
default:
LOG_ERR("unknown");
break;
}
}
static void dump_privilege_exception(u32_t cause, u32_t parameter)
{
switch (cause) {
case 0x0:
LOG_ERR("Privilege violation");
break;
case 0x1:
LOG_ERR("disabled extension");
break;
case 0x2:
LOG_ERR("action point hit");
break;
case 0x10:
switch (parameter) {
case 0x1:
LOG_ERR("N to S return using incorrect return mechanism");
break;
case 0x2:
LOG_ERR("N to S return with incorrect operating mode");
break;
case 0x3:
LOG_ERR("IRQ/exception return fetch from wrong mode");
break;
case 0x4:
LOG_ERR("attempt to halt secure processor in NS mode");
break;
case 0x20:
LOG_ERR("attempt to access secure resource from normal mode");
break;
case 0x40:
LOG_ERR("SID violation on resource access (APEX/UAUX/key NVM)");
break;
default:
LOG_ERR("unknown");
break;
}
break;
case 0x13:
switch (parameter) {
case 0x20:
LOG_ERR("attempt to access secure APEX feature from NS mode");
break;
case 0x40:
LOG_ERR("SID violation on access to APEX feature");
break;
default:
LOG_ERR("unknown");
break;
}
break;
default:
LOG_ERR("unknown");
break;
}
}
static void dump_exception_info(u32_t vector, u32_t cause, u32_t parameter)
{
if (vector >= 0x10 && vector <= 0xFF) {
LOG_ERR("interrupt %u", vector);
return;
}
/* Names are exactly as they appear in Designware ARCv2 ISA
* Programmer's reference manual for easy searching
*/
switch (vector) {
case ARC_EV_RESET:
LOG_ERR("Reset");
break;
case ARC_EV_MEM_ERROR:
LOG_ERR("Memory Error");
break;
case ARC_EV_INS_ERROR:
LOG_ERR("Instruction Error");
break;
case ARC_EV_MACHINE_CHECK:
LOG_ERR("EV_MachineCheck");
dump_machine_check_exception(cause, parameter);
break;
case ARC_EV_TLB_MISS_I:
LOG_ERR("EV_TLBMissI");
break;
case ARC_EV_TLB_MISS_D:
LOG_ERR("EV_TLBMissD");
break;
case ARC_EV_PROT_V:
LOG_ERR("EV_ProtV");
dump_protv_exception(cause, parameter);
break;
case ARC_EV_PRIVILEGE_V:
LOG_ERR("EV_PrivilegeV");
dump_privilege_exception(cause, parameter);
break;
case ARC_EV_SWI:
LOG_ERR("EV_SWI");
break;
case ARC_EV_TRAP:
LOG_ERR("EV_Trap");
break;
case ARC_EV_EXTENSION:
LOG_ERR("EV_Extension");
break;
case ARC_EV_DIV_ZERO:
LOG_ERR("EV_DivZero");
break;
case ARC_EV_DC_ERROR:
LOG_ERR("EV_DCError");
break;
case ARC_EV_MISALIGNED:
LOG_ERR("EV_Misaligned");
break;
case ARC_EV_VEC_UNIT:
LOG_ERR("EV_VecUnit");
break;
default:
LOG_ERR("unknown");
break;
}
}
#endif /* CONFIG_ARC_EXCEPTION_DEBUG */
#endif /* CONFIG_FAULT_DUMP */
/*
* @brief Fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine k_sys_fatal_error_handler() which is
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* @return This function does not return.
*/
void _Fault(z_arch_esf_t *esf, u32_t old_sp)
void _Fault(void)
{
u32_t vector, cause, parameter;
u32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);
u32_t ecr = z_arc_v2_aux_reg_read(_ARC_V2_ECR);
uint32_t ecr = _arc_v2_aux_reg_read(_ARC_V2_ECR);
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
u32_t start = (u32_t)exceptions[i].start;
u32_t end = (u32_t)exceptions[i].end;
FAULT_DUMP(&_default_esf, ecr);
if (esf->pc >= start && esf->pc < end) {
esf->pc = (u32_t)(exceptions[i].fixup);
return;
}
}
#endif
vector = Z_ARC_V2_ECR_VECTOR(ecr);
cause = Z_ARC_V2_ECR_CODE(ecr);
parameter = Z_ARC_V2_ECR_PARAMETER(ecr);
/* exception raised by kernel */
if (vector == ARC_EV_TRAP && parameter == _TRAP_S_CALL_RUNTIME_EXCEPT) {
/*
* in user mode software-triggered system fatal exceptions only allow
* K_ERR_KERNEL_OOPS and K_ERR_STACK_CHK_FAIL
*/
#ifdef CONFIG_USERSPACE
if ((esf->status32 & _ARC_V2_STATUS32_U) &&
esf->r0 != K_ERR_STACK_CHK_FAIL) {
esf->r0 = K_ERR_KERNEL_OOPS;
}
#endif
z_arc_fatal_error(esf->r0, esf);
return;
}
LOG_ERR("***** Exception vector: 0x%x, cause code: 0x%x, parameter 0x%x",
vector, cause, parameter);
LOG_ERR("Address 0x%x", exc_addr);
#ifdef CONFIG_ARC_EXCEPTION_DEBUG
dump_exception_info(vector, cause, parameter);
#endif
#ifdef CONFIG_ARC_STACK_CHECKING
/* Vector 6 = EV_ProV. Regardless of cause, parameter 2 means stack
* check violation
* stack check and mpu violation can come out together, then
* parameter = 0x2 | [0x4 | 0x8 | 0x1]
*/
if (vector == ARC_EV_PROT_V && parameter & 0x2) {
z_arc_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
return;
}
#endif
#ifdef CONFIG_MPU_STACK_GUARD
if (vector == ARC_EV_PROT_V && ((parameter == 0x4) ||
(parameter == 0x24))) {
if (z_check_thread_stack_fail(exc_addr, old_sp)) {
z_arc_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
return;
}
}
#endif
z_arc_fatal_error(K_ERR_CPU_EXCEPTION, esf);
_SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
}

View File

@@ -1,8 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
* Copyright (c) 2018 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -12,14 +21,15 @@
* Fault handlers for ARCv2 processors.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <linker/sections.h>
#include <sections.h>
#include <arch/cpu.h>
#include <swap_macros.h>
#include <syscall.h>
#include "swap_macros.h"
GTEXT(_Fault)
GTEXT(z_do_kernel_oops)
GTEXT(__reset)
GTEXT(__memory_error)
GTEXT(__instruction_error)
@@ -34,20 +44,18 @@ GTEXT(__ev_extension)
GTEXT(__ev_div_zero)
GTEXT(__ev_dc_error)
GTEXT(__ev_maligned)
#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(z_irq_do_offload);
#endif
GDATA(_firq_stack)
SECTION_VAR(BSS, saved_stack_pointer)
.word 0
/*
* The exception handling will use top part of interrupt stack to
* get smaller memory footprint, because exception is not frequent.
* To reduce the impact on interrupt handling, especially nested interrupt
* the top part of interrupt stack cannot be too large, so add a check
* here
#if CONFIG_NUM_IRQ_PRIO_LEVELS == 1
#error "NUM_IRQ_PRIO_LEVELS==1 is not supported."
/* The code below sets bit 1 in AUX_IRQ_ACT and thus requires
* priority 0 and 1 at a minimum. Supporting only 1 priority
* requires a change to this file but also changes to make
* FIRQ optional.
*/
#if CONFIG_ARC_EXCEPTION_STACK_SIZE > (CONFIG_ISR_STACK_SIZE >> 1)
#error "interrupt stack size is too small"
#endif
/*
@@ -62,299 +70,157 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_tlb_miss_d)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_prot_v)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_privilege_v)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_swi)
#ifndef CONFIG_IRQ_OFFLOAD
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
#endif
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_extension)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_div_zero)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_dc_error)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_maligned)
_exc_entry:
/*
* re-use the top part of interrupt stack as exception
* stack. If this top part is used by interrupt handling,
* and exception is raised, then here it's guaranteed that
* exception handling has necessary stack to use
*/
mov_s ilink, sp
_get_curr_cpu_irq_stack sp
sub sp, sp, (CONFIG_ISR_STACK_SIZE - CONFIG_ARC_EXCEPTION_STACK_SIZE)
/*
* save caller saved registers
* this stack frame is set up in exception stack,
* not in the original sp (thread stack or interrupt stack).
* Because the exception may be raised by stack checking or
* mpu protect violation related to stack. If this stack frame
* is setup in original sp, double exception may be raised during
* _create_irq_stack_frame, which is unrecoverable.
*/
_create_irq_stack_frame
#ifdef CONFIG_ARC_HAS_SECURE
/* ERSEC_STAT is IOW/RAZ in normal mode */
lr r0,[_ARC_V2_ERSEC_STAT]
st_s r0, [sp, ___isf_t_sec_stat_OFFSET]
#endif
lr r0,[_ARC_V2_ERSTATUS]
st_s r0, [sp, ___isf_t_status32_OFFSET]
lr r0,[_ARC_V2_ERET]
st_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */
/* sp is parameter of _Fault */
mov_s r0, sp
/* ilink is the thread's original sp */
mov_s r1, ilink
jl _Fault
_exc_return:
/* the exception cause must be fixed in exception handler when exception returns
* directly, or exception will be repeated.
*
* If thread switch is raised in exception handler, the context of old thread will
* not be saved, i.e., it cannot be recovered, because we don't know where the
* exception comes out, thread context?irq_context?nest irq context?
*/
#ifdef CONFIG_PREEMPT_ENABLED
#ifdef CONFIG_SMP
bl z_arc_smp_switch_in_isr
breq r0, 0, _exc_return_from_exc
mov_s r2, r0
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
breq r0, r2, _exc_return_from_exc
ld_s r2, [r1, _kernel_offset_to_ready_q_cache]
st_s r2, [r1, _kernel_offset_to_current]
#endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/*
* sync up the ERSEC_STAT.ERM and SEC_STAT.IRM.
* use a fake interrupt return to simulate an exception turn.
* ERM and IRM record which mode the cpu should return, 1: secure
* 0: normal
*/
lr r3,[_ARC_V2_ERSEC_STAT]
btst r3, 31
bset.nz r3, r3, _ARC_V2_SEC_STAT_IRM_BIT
bclr.z r3, r3, _ARC_V2_SEC_STAT_IRM_BIT
sflag r3
#endif
/* clear AE bit to forget this was an exception, and go to
* register bank0 (if exception is raised in firq with 2 reg
* banks, then we may be bank1)
*/
#if defined(CONFIG_ARC_FIRQ) && CONFIG_RGF_NUM_BANKS != 1
/* save r2 in ilink because of the possible following reg
* bank switch
*/
mov_s ilink, r2
#endif
lr r3, [_ARC_V2_STATUS32]
and r3,r3,(~(_ARC_V2_STATUS32_AE | _ARC_V2_STATUS32_RB(7)))
kflag r3
/* pretend lowest priority interrupt happened to use common handler
* if exception is raised in irq, i.e., _ARC_V2_AUX_IRQ_ACT !=0,
* ignore irq handling, we cannot return to irq handling which may
* raise exception again. The ignored interrupts will be re-triggered
* if not cleared, or re-triggered by interrupt sources, or just missed
* Before invoking exception handler, the kernel switches to an exception
* stack, which is really the FIRQ stack, to save the faulting thread's
* registers. It can use the FIRQ stack because it knows it is unused
* since it is save to assume that if an exception has happened in FIRQ
* handler, the problem is fatal and all the kernel can do is just print
* a diagnostic message and halt.
*/
#ifdef CONFIG_ARC_SECURE_FIRMWARE
mov_s r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else
mov_s r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
#ifdef CONFIG_ARC_STACK_CHECKING
push_s r2
mov_s r0, _ARC_V2_AUX_IRQ_ACT
mov_s r1, r3
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
/* disable stack checking */
lr r2, [_ARC_V2_STATUS32]
bclr r2, r2, _ARC_V2_STATUS32_SC_BIT
kflag r2
pop_s r2
#else
sr r3, [_ARC_V2_AUX_IRQ_ACT]
#endif
#if defined(CONFIG_ARC_FIRQ) && CONFIG_RGF_NUM_BANKS != 1
mov r2, ilink
#endif
st sp, [saved_stack_pointer]
mov_s sp, _firq_stack
add sp, sp, CONFIG_FIRQ_STACK_SIZE
/* Assumption: r2 has current thread */
b _rirq_common_interrupt_swap
#endif
_exc_return_from_exc:
ld_s r0, [sp, ___isf_t_pc_OFFSET]
sr r0, [_ARC_V2_ERET]
_pop_irq_stack_frame
mov_s sp, ilink
rtie
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
/* get the id of trap_s */
lr ilink, [_ARC_V2_ECR]
and ilink, ilink, 0x3f
#ifdef CONFIG_USERSPACE
cmp ilink, _TRAP_S_CALL_SYSTEM_CALL
bne _do_non_syscall_trap
/* do sys_call */
mov_s ilink, K_SYSCALL_LIMIT
cmp r6, ilink
blt valid_syscall_id
mov_s r0, r6
mov_s r6, K_SYSCALL_BAD
valid_syscall_id:
/* create a sys call frame
* caller regs (r0 - 12) are saved in _create_irq_stack_frame
* ok to use them later
*/
_create_irq_stack_frame
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* ERSEC_STAT is IOW/RAZ in normal mode */
lr r0, [_ARC_V2_ERSEC_STAT]
st_s r0, [sp, ___isf_t_sec_stat_OFFSET]
#endif
lr r0,[_ARC_V2_ERET]
st_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */
lr r0,[_ARC_V2_ERSTATUS]
st_s r0, [sp, ___isf_t_status32_OFFSET]
bclr r0, r0, _ARC_V2_STATUS32_U_BIT
sr r0, [_ARC_V2_ERSTATUS]
mov_s r0, _arc_do_syscall
sr r0, [_ARC_V2_ERET]
rtie
_do_non_syscall_trap:
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_IRQ_OFFLOAD
/*
* IRQ_OFFLOAD is to simulate interrupt handling through exception,
* so its entry is different with normal exception handling, it is
* handled in isr stack
*/
cmp ilink, _TRAP_S_SCALL_IRQ_OFFLOAD
bne _exc_entry
/* save caller saved registers */
_create_irq_stack_frame
#ifdef CONFIG_ARC_HAS_SECURE
lr r0,[_ARC_V2_ERSEC_STAT]
st_s r0, [sp, ___isf_t_sec_stat_OFFSET]
#endif
lr r0,[_ARC_V2_ERSTATUS]
st_s r0, [sp, ___isf_t_status32_OFFSET]
st_s r0, [sp, __tISF_status32_OFFSET]
lr r0,[_ARC_V2_ERET]
st_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */
st_s r0, [sp, __tISF_pc_OFFSET] /* eret into pc */
jl _Fault
/* check whether irq stack is used */
_check_and_inc_int_nest_counter r0, r1
/* if _Fault returns, restore the registers */
_pop_irq_stack_frame
bne.d exc_nest_handle
mov_s r0, sp
/* now restore the stack */
ld sp,[saved_stack_pointer]
rtie
_get_curr_cpu_irq_stack sp
exc_nest_handle:
push_s r0
#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_irq_do_offload);
jl z_irq_do_offload
pop sp
_dec_int_nest_counter r0, r1
lr r0, [_ARC_V2_AUX_IRQ_ACT]
and r0, r0, 0xffff
cmp r0, 0
bne _exc_return_from_exc
#ifdef CONFIG_PREEMPT_ENABLED
#ifdef CONFIG_SMP
bl z_arc_smp_switch_in_isr
breq r0, 0, _exc_return_from_irqoffload_trap
mov_s r2, r1
_save_callee_saved_regs
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
mov_s r2, r0
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
breq r0, r2, _exc_return_from_irqoffload_trap
#endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
/*
* sync up the ERSEC_STAT.ERM and SEC_STAT.IRM.
* use a fake interrupt return to simulate an exception turn.
* ERM and IRM record which mode the cpu should return, 1: secure
* 0: normal
* Before invoking exception handler, the kernel switches to an exception
* stack, which is really the FIRQ stack, to save the faulting thread's
* registers. It can use the FIRQ stack because it knows it is unused
* since it is safe to assume that if an exception has happened in FIRQ
* handler, the problem is fatal and all the kernel can do is just print
* a diagnostic message and halt.
*/
lr r3,[_ARC_V2_ERSEC_STAT]
btst r3, 31
bset.nz r3, r3, _ARC_V2_SEC_STAT_IRM_BIT
bclr.z r3, r3, _ARC_V2_SEC_STAT_IRM_BIT
sflag r3
/* save _ARC_V2_SEC_STAT */
and r3, r3, 0xff
push_s r3
#ifdef CONFIG_ARC_STACK_CHECKING
push_s r2
/* disable stack checking */
lr r2, [_ARC_V2_STATUS32]
bclr r2, r2, _ARC_V2_STATUS32_SC_BIT
kflag r2
pop_s r2
#endif
#ifndef CONFIG_MICROKERNEL
st sp, [saved_stack_pointer]
mov_s sp, _firq_stack
add sp, sp, CONFIG_FIRQ_STACK_SIZE
#endif
/* save caller saved registers */
_create_irq_stack_frame
lr r0,[_ARC_V2_ERSTATUS]
st_s r0, [sp, __tISF_status32_OFFSET]
lr r0,[_ARC_V2_ERET]
st_s r0, [sp, __tISF_pc_OFFSET] /* eret into pc */
jl _irq_do_offload
#ifdef CONFIG_MICROKERNEL
mov_s r1, _nanokernel
ld_s r2, [r1, __tNANO_current_OFFSET]
#if CONFIG_NUM_IRQ_PRIO_LEVELS > 1
/* check if we're a nested interrupt: if so, let the
* interrupted interrupt handle the reschedule
*/
lr r3, [_ARC_V2_AUX_IRQ_ACT]
/* the OS on ARCv2 always runs in kernel mode, so assume bit31 [U] in
* AUX_IRQ_ACT is always 0: if the contents of AUX_IRQ_ACT is 0, it
* means trap was taken from outside an interrupt handler.
* But if it was inside, let that handler do the swap.
*/
breq r3, 0, _trap_check_for_swap
_trap_return:
_pop_irq_stack_frame
rtie
#endif
.balign 4
_trap_check_for_swap:
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bnz _e_check_if_a_fiber_is_ready
b _trap_return
.balign 4
_e_check_if_a_fiber_is_ready:
ld_s r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
brne r0, 0, _trap_reschedule
b _trap_return
.balign 4
_trap_reschedule:
_save_callee_saved_regs
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
st _CAUSE_RIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
/* note: Ok to use _CAUSE_RIRQ since everything is saved */
mov_s r2, r0
#ifndef CONFIG_SMP
st_s r2, [r1, _kernel_offset_to_current]
#endif
ld_s r2, [r1, __tNANO_fiber_OFFSET]
st_s r2, [r1, __tNANO_current_OFFSET]
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
/* clear AE bit to forget this was an exception */
lr r3, [_ARC_V2_STATUS32]
and r3,r3,(~_ARC_V2_STATUS32_AE)
kflag r3
/* pretend lowest priority interrupt happened to use common handler */
/* pretend priority 1 int happened to use common handler */
lr r3, [_ARC_V2_AUX_IRQ_ACT]
#ifdef CONFIG_ARC_SECURE_FIRMWARE
or r3, r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else
or r3, r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
push_s r2
mov_s r0, _ARC_V2_AUX_IRQ_ACT
mov_s r1, r3
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
pop_s r2
#else
or r3,r3,2
sr r3, [_ARC_V2_AUX_IRQ_ACT]
#endif
/* Assumption: r2 has current thread */
b _rirq_common_interrupt_swap
#else
/* Nanokernel-only just returns from exception */
/* if _Fault returns, restore the registers */
_pop_irq_stack_frame
/* now restore the stack */
ld sp,[saved_stack_pointer]
rtie
#endif
_exc_return_from_irqoffload_trap:
_pop_irq_stack_frame
rtie
#endif /* CONFIG_IRQ_OFFLOAD */
b _exc_entry

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -12,76 +22,21 @@
* Interrupt management:
*
* - enabling/disabling
* - dynamic ISR connecting/replacing
*
* SW_ISR_TABLE_DYNAMIC has to be enabled for connecting ISRs at runtime.
*
* An IRQ number passed to the @a irq parameters found in this file is a
* number from 16 to last IRQ number on the platform.
*/
#include <kernel.h>
#include <nanokernel.h>
#include <arch/cpu.h>
#include <sys/__assert.h>
#include <misc/__assert.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <sections.h>
#include <sw_isr_table.h>
#include <irq.h>
#include <sys/printk.h>
/*
* storage space for the interrupt stack of fast_irq
*/
#if defined(CONFIG_ARC_FIRQ_STACK)
#if defined(CONFIG_SMP)
K_THREAD_STACK_ARRAY_DEFINE(_firq_interrupt_stack, CONFIG_MP_NUM_CPUS,
CONFIG_ARC_FIRQ_STACK_SIZE);
#else
K_THREAD_STACK_DEFINE(_firq_interrupt_stack, CONFIG_ARC_FIRQ_STACK_SIZE);
#endif
/*
* @brief Set the stack pointer for firq handling
*
* @return N/A
*/
void z_arc_firq_stack_set(void)
{
#ifdef CONFIG_SMP
char *firq_sp = Z_THREAD_STACK_BUFFER(
_firq_interrupt_stack[z_arc_v2_core_id()]) +
CONFIG_ARC_FIRQ_STACK_SIZE;
#else
char *firq_sp = Z_THREAD_STACK_BUFFER(_firq_interrupt_stack) +
CONFIG_ARC_FIRQ_STACK_SIZE;
#endif
/* the z_arc_firq_stack_set must be called when irq diasbled, as
* it can be called not only in the init phase but also other places
*/
unsigned int key = irq_lock();
__asm__ volatile (
/* only ilink will not be banked, so use ilink as channel
* between 2 banks
*/
"mov ilink, %0 \n\t"
"lr %0, [%1] \n\t"
"or %0, %0, %2 \n\t"
"kflag %0 \n\t"
"mov sp, ilink \n\t"
/* switch back to bank0, use ilink to avoid the pollution of
* bank1's gp regs.
*/
"lr ilink, [%1] \n\t"
"and ilink, ilink, %3 \n\t"
"kflag ilink \n\t"
:
: "r"(firq_sp), "i"(_ARC_V2_STATUS32),
"i"(_ARC_V2_STATUS32_RB(1)),
"i"(~_ARC_V2_STATUS32_RB(7))
);
irq_unlock(key);
}
#endif
/*
* @brief Enable an interrupt line
@@ -93,11 +48,11 @@ void z_arc_firq_stack_set(void)
* @return N/A
*/
void arch_irq_enable(unsigned int irq)
void _arch_irq_enable(unsigned int irq)
{
unsigned int key = irq_lock();
int key = irq_lock();
z_arc_v2_irq_unit_int_enable(irq);
_arc_v2_irq_unit_int_enable(irq);
irq_unlock(key);
}
@@ -110,25 +65,14 @@ void arch_irq_enable(unsigned int irq)
* @return N/A
*/
void arch_irq_disable(unsigned int irq)
void _arch_irq_disable(unsigned int irq)
{
unsigned int key = irq_lock();
int key = irq_lock();
z_arc_v2_irq_unit_int_disable(irq);
_arc_v2_irq_unit_int_disable(irq);
irq_unlock(key);
}
/**
* @brief Return IRQ enable state
*
* @param irq IRQ line
* @return interrupt enable state, true or false
*/
int arch_irq_is_enabled(unsigned int irq)
{
return z_arc_v2_irq_unit_int_enabled(irq);
}
/*
* @internal
*
@@ -143,25 +87,15 @@ int arch_irq_is_enabled(unsigned int irq)
* @return N/A
*/
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{
ARG_UNUSED(flags);
unsigned int key = irq_lock();
int key = irq_lock();
__ASSERT(prio < CONFIG_NUM_IRQ_PRIO_LEVELS,
"invalid priority %d for irq %d", prio, irq);
/* 0 -> CONFIG_NUM_IRQ_PRIO_LEVELS allocted to secure world
* left prio levels allocated to normal world
*/
#if defined(CONFIG_ARC_SECURE_FIRMWARE)
prio = prio < ARC_N_IRQ_START_LEVEL ?
prio : (ARC_N_IRQ_START_LEVEL - 1);
#elif defined(CONFIG_ARC_NORMAL_FIRMWARE)
prio = prio < ARC_N_IRQ_START_LEVEL ?
ARC_N_IRQ_START_LEVEL : prio;
#endif
z_arc_v2_irq_unit_prio_set(irq, prio);
_arc_v2_irq_unit_prio_set(irq, prio);
irq_unlock(key);
}
@@ -174,19 +108,84 @@ void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
* @return N/A
*/
void z_irq_spurious(void *unused)
#include <misc/printk.h>
void _irq_spurious(void *unused)
{
ARG_UNUSED(unused);
z_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
printk("_irq_spurious(). Spinning...\n");
for (;;)
;
}
#ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter,
u32_t flags)
#if CONFIG_SW_ISR_TABLE_DYNAMIC
/*
* @internal
*
* @brief Replace an interrupt handler by another
*
* An interrupt's ISR can be replaced at runtime.
*
* @return N/A
*/
void _irq_handler_set(
unsigned int irq,
void (*new)(void *arg),
void *arg
)
{
z_isr_install(irq, routine, parameter);
z_irq_priority_set(irq, priority, flags);
int key = irq_lock();
int index = irq - 16;
__ASSERT(irq < CONFIG_NUM_IRQS, "IRQ number too high");
_sw_isr_table[index].isr = new;
_sw_isr_table[index].arg = arg;
irq_unlock(key);
}
/*
* @brief Connect an ISR to an interrupt line
*
* @a isr is connected to interrupt line @a irq, a number greater than or equal
* 16. No prior ISR can have been connected on @a irq interrupt line since the
* system booted.
*
* This routine will hang if another ISR was connected for interrupt line @a irq
* and ASSERT_ON is enabled; if ASSERT_ON is disabled, it will fail silently.
*
* @return the interrupt line number
*/
int _arch_irq_connect_dynamic(
unsigned int irq,
unsigned int prio,
void (*isr)(void *arg),
void *arg,
uint32_t flags
)
{
ARG_UNUSED(flags);
_irq_handler_set(irq, isr, arg);
_irq_priority_set(irq, prio);
return irq;
}
#endif /* CONFIG_DYNAMIC_INTERRUPTS */
/*
* @internal
*
* @brief Disconnect an ISR from an interrupt line
*
* Interrupt line @a irq is disconnected from its ISR and the latter is
* replaced by _irq_spurious(). irq_disable() should have been called before
* invoking this routine.
*
* @return N/A
*/
void _irq_disconnect(unsigned int irq)
{
_irq_handler_set(irq, _irq_spurious, NULL);
}
#endif /* CONFIG_SW_ISR_TABLE_DYNAMIC */

View File

@@ -1,36 +1,44 @@
/*
* Copyright (c) 2015 Intel corporation
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file Software interrupts utility code - ARC implementation
*/
#include <kernel.h>
#include <nanokernel.h>
#include <irq_offload.h>
static irq_offload_routine_t offload_routine;
static void *offload_param;
/* Called by trap_s exception handler */
void z_irq_do_offload(void)
void _irq_do_offload(void)
{
offload_routine(offload_param);
}
void arch_irq_offload(irq_offload_routine_t routine, void *parameter)
void irq_offload(irq_offload_routine_t routine, void *parameter)
{
unsigned int key;
int key;
key = irq_lock();
offload_routine = routine;
offload_param = parameter;
__asm__ volatile ("trap_s %[id]"
:
: [id] "i"(_TRAP_S_SCALL_IRQ_OFFLOAD) : );
__asm__ volatile ("trap_s 0");
irq_unlock(key);
}

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief IRQ part of vector table for Quark SE Sensor Subsystem
*
* This file contains the IRQ part of the vector table. It is meant to be used
* for one of two cases:
*
* a) When software-managed ISRs (SW_ISR_TABLE) is enabled, and in that case it
* binds _IsrWrapper() to all the IRQ entries in the vector table.
*
* b) When the BSP is written so that device ISRs are installed directly in the
* vector table, they are enumerated here.
*
*/
#include <toolchain.h>
#include <sections.h>
extern void _isr_enter(void);
typedef void (*vth)(void); /* Vector Table Handler */
#if defined(CONFIG_SW_ISR_TABLE)
vth __irq_vector_table _irq_vector_table[CONFIG_NUM_IRQS - 16] = {
[0 ...(CONFIG_NUM_IRQS - 17)] = _isr_enter
};
#elif !defined(CONFIG_IRQ_VECTOR_TABLE_CUSTOM)
extern void _SpuriousIRQ(void);
/* placeholders: fill with real ISRs */
vth __irq_vector_table _irq_vector_table[CONFIG_NUM_IRQS - 16] = {
[0 ...(CONFIG_NUM_IRQS - 17)] = _SpuriousIRQ
};
#endif /* CONFIG_SW_ISR_TABLE */

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014-2015 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -13,19 +23,20 @@
* a parameter.
*/
#include <offsets_short.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <sw_isr_table.h>
#include <kernel_structs.h>
#include <arch/cpu.h>
#include <swap_macros.h>
#define _ASMLANGUAGE
GTEXT(_isr_wrapper)
#include <offsets.h>
#include <toolchain.h>
#include <sections.h>
#include <sw_isr_table.h>
#include <nano_private.h>
#include <arch/cpu.h>
GTEXT(_isr_enter)
GTEXT(_isr_demux)
#if defined(CONFIG_SYS_POWER_MANAGEMENT)
GTEXT(z_sys_power_save_idle_exit)
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
GTEXT(_power_save_idle_exit)
#endif
/*
@@ -34,15 +45,15 @@ _rirq_enter/_firq_enter: they are jump points.
The flow is the following:
ISR -> _isr_wrapper -- + -> _rirq_enter -> _isr_demux -> ISR -> _rirq_exit
|
+ -> _firq_enter -> _isr_demux -> ISR -> _firq_exit
ISR -> _isr_enter -- + -> _rirq_enter -> _isr_demux -> ISR -> _rirq_exit
|
+ -> _firq_enter -> _isr_demux -> ISR -> _firq_exit
Context switch explanation:
The context switch code is spread in these files:
isr_wrapper.s, switch.s, swap_macros.s, fast_irq.s, regular_irq.s
isr_wrapper.s, swap.s, swap_macros.s, fast_irq.s, regular_irq.s
IRQ stack frame layout:
@@ -50,6 +61,9 @@ IRQ stack frame layout:
status32
pc
[jli_base]
[ldi_base]
[ei_base]
lp_count
lp_start
lp_end
@@ -60,15 +74,17 @@ IRQ stack frame layout:
low address
[registers not taken into account in the current implementation]
The context switch code adopts this standard so that it is easier to follow:
- r1 contains _kernel ASAP and is not overwritten over the lifespan of
- r1 contains _nanokernel ASAP and is not overwritten over the lifespan of
the functions.
- r2 contains _kernel.current ASAP, and the incoming thread when we
- r2 contains _nanokernel.current ASAP, and the incoming thread when we
transition from outgoing thread to incoming thread
Not loading _kernel into r0 allows loading _kernel without stomping on
the parameter in r0 in arch_switch().
Not loading _nanokernel into r0 allows loading _nanokernel without stomping on
the parameter in r0 in _Swap().
ARCv2 processors have two kinds of interrupts: fast (FIRQ) and regular. The
@@ -76,34 +92,27 @@ official documentation calls the regular interrupts 'IRQs', but the internals
of the kernel call them 'RIRQs' to differentiate from the 'irq' subsystem,
which is the interrupt API/layer of abstraction.
For FIRQ, there are two cases, depending upon the value of
CONFIG_RGF_NUM_BANKS.
FIRQs can be used to allow ISRs to run without having to save any context,
since they work with a second register bank. However, they can be somewhat more
limited than RIRQs since the register bank does not copy every possible
register that is needed to implement all available instructions: an example is
that the 'loop' registers (lp_count, lp_end, lp_start) are not present in the
second bank. The kernel thus takes upon itself to save these extra registers,
if the FIRQ is made known to the kernel. It is possible for a FIRQ to operate
outside of the kernel, but care must be taken to only use instructions that
only use the banked registers. RIRQs must always use the kernel's interrupt
entry and exit mechanisms.
CONFIG_RGF_NUM_BANKS==1 case:
Scratch registers are pushed onto the current stack just as they are with
RIRQ. See the above frame layout. Unlike RIRQ, the status32_p0 and ilink
registers are where status32 and the program counter are located, so these
need to be pushed.
CONFIG_RGF_NUM_BANKS!=1 case:
The FIRQ handler has its own register bank for general purpose registers,
and thus it doesn't have to save them on a stack. The 'loop' registers
(lp_count, lp_end, lp_start), however, are not present in the
second bank. The handler saves these special registers in unused callee saved
registers (to avoid stack accesses). It is possible to register a FIRQ
handler that operates outside of the kernel, but care must be taken to only
use instructions that only use the banked registers.
The kernel is able to handle transitions to and from FIRQ, RIRQ and threads.
The contexts are saved 'lazily': the minimum amount of work is
The kernel is able to handle transitions to and from FIRQ, RIRQ and threads
(fibers/task). The contexts are saved 'lazily': the minimum amount of work is
done upfront, and the rest is done when needed:
o RIRQ
All needed registers to run C code in the ISR are saved automatically
All needed regisers to run C code in the ISR are saved automatically
on the outgoing thread's stack: loop, status32, pc, and the caller-
saved GPRs. That stack frame layout is pre-determined. If returning
to a thread, the stack is popped and no registers have to be saved by
to a fiber, the stack is popped and no registers have to be saved by
the kernel. If a context switch is required, the callee-saved GPRs
are then saved in the thread control structure (TCS).
@@ -113,24 +122,14 @@ o FIRQ
the FIRQ does not take a scheduling decision and leaves it the RIRQ to
handle. This limits the amount of code that has to run at interrupt-level.
CONFIG_RGF_NUM_BANKS==1 case:
Registers are saved on the stack frame just as they are for RIRQ.
Context switch can happen just as it does in the RIRQ case, however,
if the FIRQ interrupted a RIRQ, the FIRQ will return from interrupt and
let the RIRQ do the context switch. At entry, one register is needed in order
to have code to save other registers. r0 is saved first in a global called
saved_r0.
CONFIG_RGF_NUM_BANKS!=1 case:
During early initialization, the sp in the 2nd register bank is made to
refer to _firq_stack. This allows for the FIRQ handler to use its own stack.
GPRs are banked, loop registers are saved in unused callee saved regs upon
interrupt entry. If returning to a thread, loop registers are restored and the
GPRs are banked, loop registers are saved in a global structure upon
interrupt entry. If returning to a fiber, loop registers are poppped and the
CPU switches back to bank 0 for the GPRs. If a context switch is
needed, at this point only are all the registers saved. First, a
stack frame with the same layout as the automatic RIRQ one is created
and then the callee-saved GPRs are saved in the TCS. status32_p0 and
ilink are saved in this case, not status32 and pc.
To create the stack frame, the FIRQ handling code must first go back to using
bank0 of registers, since that is where the registers containing the exiting
thread are saved. Care must be taken not to touch any register before saving
@@ -150,7 +149,7 @@ From coop:
o to coop
Do a normal function call return.
Restore interrupt lock level and do a normal function call return.
o to any irq
@@ -160,21 +159,20 @@ From coop:
From FIRQ:
When CONFIG_RGF_NUM_BANKS==1, context switch is done as it is for RIRQ.
When CONFIG_RGF_NUM_BANKS!=1, the processor is put back to using bank0,
not bank1 anymore, because it had to save the outgoing context from bank0,
and now has to load the incoming one
The processor is back to using bank0, not bank1 anymore, because it had to
save the outgoing context from bank0, and now has to load the incoming one
into bank0.
o to coop
The address of the returning instruction from arch_switch() is loaded
in ilink and the saved status32 in status32_p0.
The address of the returning instruction from _Swap() is loaded in ilink and
the saved status32 in status32_p0, taking care to adjust the interrupt lock
state desired in status32_p0. The return value is put in r0.
o to any irq
The IRQ has saved the caller-saved registers in a stack frame, which must be
popped, and status32 and pc loaded in status32_p0 and ilink.
popped, and statu32 and pc loaded in status32_p0 and ilink.
From RIRQ:
@@ -182,7 +180,7 @@ From RIRQ:
The interrupt return mechanism in the processor expects a stack frame, but
the outgoing context did not create one. A fake one is created here, with
only the relevant values filled in: pc, status32.
only the relevant values filled in: pc, status32 and the return value in r0.
There is a discrepancy between the ABI from the ARCv2 docs, including the
way the processor pushes GPRs in pairs in the IRQ stack frame, and the ABI
@@ -201,57 +199,41 @@ From RIRQ:
interrupt.
*/
SECTION_FUNC(TEXT, _isr_wrapper)
#if defined(CONFIG_ARC_FIRQ)
#if CONFIG_RGF_NUM_BANKS == 1
/* free r0 here, use r0 to check whether irq is firq.
* for rirq, as sp will not change and r0 already saved, this action
* in fact is an action like nop.
* for firq, r0 will be restored later
*/
st_s r0, [sp]
#endif
SECTION_FUNC(TEXT, _isr_enter)
lr r0, [_ARC_V2_AUX_IRQ_ACT]
ffs r0, r0
cmp r0, 0
#if CONFIG_RGF_NUM_BANKS == 1
bnz rirq_path
ld_s r0, [sp]
/* 1-register bank FIRQ handling must save registers on stack */
_create_irq_stack_frame
lr r0, [_ARC_V2_STATUS32_P0]
st_s r0, [sp, ___isf_t_status32_OFFSET]
lr r0, [_ARC_V2_ERET]
st_s r0, [sp, ___isf_t_pc_OFFSET]
mov_s r3, _firq_exit
mov_s r2, _firq_enter
j_s [r2]
rirq_path:
mov_s r3, _rirq_exit
mov_s r2, _rirq_enter
j_s [r2]
#else
mov.z r3, _firq_exit
mov.z r2, _firq_enter
mov.nz r3, _rirq_exit
mov.nz r2, _rirq_enter
j_s [r2]
#endif
#else
mov_s r3, _rirq_exit
mov_s r2, _rirq_enter
j_s [r2]
#endif
#if defined(CONFIG_TRACING)
GTEXT(sys_trace_isr_enter)
j_s [r2]
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
GTEXT(_sys_k_event_logger_exit_sleep)
.macro log_sleep_k_event
clri r0 /* do not interrupt event logger operations */
push_s r0
push_s blink
jl _sys_k_event_logger_exit_sleep
pop_s blink
pop_s r0
seti r0
.endm
#else
#define log_sleep_k_event
#endif
#if defined(CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT)
GTEXT(_sys_k_event_logger_interrupt)
.macro log_interrupt_k_event
clri r0 /* do not interrupt event logger operations */
push_s r0
push_s blink
jl sys_trace_isr_enter
jl _sys_k_event_logger_interrupt
pop_s blink
pop_s r0
seti r0
@@ -260,23 +242,14 @@ GTEXT(sys_trace_isr_enter)
#define log_interrupt_k_event
#endif
#if defined(CONFIG_SYS_POWER_MANAGEMENT)
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
.macro exit_tickless_idle
clri r0 /* do not interrupt exiting tickless idle operations */
push_s r1
push_s r0
mov_s r1, _kernel
ld_s r0, [r1, _kernel_offset_to_idle] /* requested idle duration */
breq r0, 0, _skip_sys_power_save_idle_exit
st 0, [r1, _kernel_offset_to_idle] /* zero idle duration */
push_s blink
jl z_sys_power_save_idle_exit
jl _power_save_idle_exit
pop_s blink
_skip_sys_power_save_idle_exit:
pop_s r0
pop_s r1
seti r0
.endm
#else
@@ -287,56 +260,23 @@ _skip_sys_power_save_idle_exit:
SECTION_FUNC(TEXT, _isr_demux)
push_s r3
/* according to ARCv2 ISA, r25, r30, r58, r59 are caller-saved
* scratch registers, possibly used by interrupt handlers
*/
push r25
push r30
#ifdef CONFIG_ARC_HAS_ACCL_REGS
push r58
push r59
#endif
#ifdef CONFIG_EXECUTION_BENCHMARKING
bl read_timer_start_of_isr
#endif
/* cannot be done before this point because we must be able to run C */
/* r0 is available to be stomped here, and exit_tickless_idle uses it */
exit_tickless_idle
log_interrupt_k_event
log_sleep_k_event
lr r0, [_ARC_V2_ICAUSE]
/* handle software triggered interrupt */
lr r3, [_ARC_V2_AUX_IRQ_HINT]
brne r3, r0, irq_hint_handled
sr 0, [_ARC_V2_AUX_IRQ_HINT]
irq_hint_handled:
sub r0, r0, 16
mov_s r1, _sw_isr_table
mov r1, _sw_isr_table
add3 r0, r1, r0 /* table entries are 8-bytes wide */
ld_s r1, [r0, 4] /* ISR into r1 */
#ifdef CONFIG_EXECUTION_BENCHMARKING
push_s r0
push_s r1
bl read_timer_end_of_isr
pop_s r1
pop_s r0
#endif
jl_s.d [r1]
ld_s r0, [r0] /* delay slot: ISR parameter into r0 */
#ifdef CONFIG_ARC_HAS_ACCL_REGS
pop r59
pop r58
#endif
pop r30
pop r25
/* back from ISR, jump to exit stub */
pop_s r3
j_s [r3]
nop_s
nop

View File

@@ -1,6 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
zephyr_library_sources_if_kconfig(arc_core_mpu.c)
zephyr_library_sources_if_kconfig(arc_mpu.c)

View File

@@ -1,33 +0,0 @@
# Memory Protection Unit (MPU) configuration options
# Copyright (c) 2017 Synopsys
# SPDX-License-Identifier: Apache-2.0
config ARC_MPU_VER
int "ARC MPU version"
range 2 4
default 2
help
ARC MPU has several versions. For MPU v2, the minimum region is 2048 bytes;
For MPU v3, the minimum region is 32 bytes
config ARC_CORE_MPU
bool "ARC Core MPU functionalities"
help
ARC core MPU functionalities
config MPU_STACK_GUARD
bool "Thread Stack Guards"
depends on ARC_CORE_MPU
help
Enable thread stack guards via MPU. ARC supports built-in stack protection.
If your core supports that, it is preferred over MPU stack guard
config ARC_MPU
bool "ARC MPU Support"
select ARC_CORE_MPU
select THREAD_STACK_INFO
select MEMORY_PROTECTION
select MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT if ARC_MPU_VER = 2
help
Target has ARC MPU (currently only works for EMSK 2.2/2.3 ARCEM7D)

View File

@@ -1,101 +0,0 @@
/*
* Copyright (c) 2017 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <device.h>
#include <init.h>
#include <kernel.h>
#include <soc.h>
#include <arch/arc/v2/mpu/arc_core_mpu.h>
#include <kernel_structs.h>
/*
* @brief Configure MPU for the thread
*
* This function configures per thread memory map reprogramming the MPU.
*
* @param thread thread info data structure.
*/
void configure_mpu_thread(struct k_thread *thread)
{
arc_core_mpu_disable();
arc_core_mpu_configure_thread(thread);
arc_core_mpu_enable();
}
#if defined(CONFIG_USERSPACE)
int arch_mem_domain_max_partitions_get(void)
{
return arc_core_mpu_get_max_domain_partition_regions();
}
/*
* Reset MPU region for a single memory partition
*/
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id)
{
if (_current->mem_domain_info.mem_domain != domain) {
return;
}
arc_core_mpu_disable();
arc_core_mpu_remove_mem_partition(domain, partition_id);
arc_core_mpu_enable();
}
/*
* Configure MPU memory domain
*/
void arch_mem_domain_thread_add(struct k_thread *thread)
{
if (_current != thread) {
return;
}
arc_core_mpu_disable();
arc_core_mpu_configure_mem_domain(thread);
arc_core_mpu_enable();
}
/*
* Destroy MPU regions for the mem domain
*/
void arch_mem_domain_destroy(struct k_mem_domain *domain)
{
if (_current->mem_domain_info.mem_domain != domain) {
return;
}
arc_core_mpu_disable();
arc_core_mpu_remove_mem_domain(domain);
arc_core_mpu_enable();
}
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id)
{
/* No-op on this architecture */
}
void arch_mem_domain_thread_remove(struct k_thread *thread)
{
if (_current != thread) {
return;
}
arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
}
/*
* Validate the given buffer is user accessible or not
*/
int arch_buffer_validate(void *addr, size_t size, int write)
{
return arc_core_mpu_buffer_validate(addr, size, write);
}
#endif

View File

@@ -1,59 +0,0 @@
/*
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <device.h>
#include <init.h>
#include <kernel.h>
#include <soc.h>
#include <arch/arc/v2/aux_regs.h>
#include <arch/arc/v2/mpu/arc_mpu.h>
#include <arch/arc/v2/mpu/arc_core_mpu.h>
#include <linker/linker-defs.h>
#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_REGISTER(mpu);
/**
* @brief Get the number of supported MPU regions
*
*/
static inline u8_t get_num_regions(void)
{
u32_t num = z_arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD);
num = (num & 0xFF00U) >> 8U;
return (u8_t)num;
}
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct parameter set.
*/
static inline u32_t get_region_attr_by_type(u32_t type)
{
switch (type) {
case THREAD_STACK_USER_REGION:
return REGION_RAM_ATTR;
case THREAD_STACK_REGION:
return AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR;
case THREAD_APP_DATA_REGION:
return REGION_RAM_ATTR;
case THREAD_STACK_GUARD_REGION:
/* no Write and Execute to guard region */
return AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR;
default:
/* unknown type */
return 0;
}
}
#if CONFIG_ARC_MPU_VER == 2
#include "arc_mpu_v2_internal.h"
#elif CONFIG_ARC_MPU_VER == 3
#include "arc_mpu_v3_internal.h"
#endif

View File

@@ -1,479 +0,0 @@
/*
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_
#define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_
#define AUX_MPU_RDB_VALID_MASK (0x1)
#define AUX_MPU_EN_ENABLE (0x40000000)
#define AUX_MPU_EN_DISABLE (0xBFFFFFFF)
#define AUX_MPU_RDP_REGION_SIZE(bits) \
(((bits - 1) & 0x3) | (((bits - 1) & 0x1C) << 7))
#define AUX_MPU_RDP_ATTR_MASK (0x1FC)
#define AUX_MPU_RDP_SIZE_MASK (0xE03)
#define _ARC_V2_MPU_EN (0x409)
#define _ARC_V2_MPU_RDB0 (0x422)
#define _ARC_V2_MPU_RDP0 (0x423)
/* For MPU version 2, the minimum protection region size is 2048 bytes */
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 11
/**
* This internal function initializes a MPU region
*/
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
u32_t region_attr)
{
u8_t bits = find_msb_set(size) - 1;
index = index * 2U;
if (bits < ARC_FEATURE_MPU_ALIGNMENT_BITS) {
bits = ARC_FEATURE_MPU_ALIGNMENT_BITS;
}
if ((1 << bits) < size) {
bits++;
}
if (size > 0) {
region_attr &= ~(AUX_MPU_RDP_SIZE_MASK);
region_attr |= AUX_MPU_RDP_REGION_SIZE(bits);
region_addr |= AUX_MPU_RDB_VALID_MASK;
} else {
region_addr = 0U;
}
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
}
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/
static inline int get_region_index_by_type(u32_t type)
{
/*
* The new MPU regions are allocated per type after the statically
* configured regions. The type is one-indexed rather than
* zero-indexed.
*
* For ARC MPU v2, the smaller index has higher priority, so the
* index is allocated in reverse order. Static regions start from
* the biggest index, then thread related regions.
*
*/
switch (type) {
case THREAD_STACK_USER_REGION:
return get_num_regions() - mpu_config.num_regions
- THREAD_STACK_REGION;
case THREAD_STACK_REGION:
case THREAD_APP_DATA_REGION:
case THREAD_STACK_GUARD_REGION:
return get_num_regions() - mpu_config.num_regions - type;
case THREAD_DOMAIN_PARTITION_REGION:
#if defined(CONFIG_MPU_STACK_GUARD)
return get_num_regions() - mpu_config.num_regions - type;
#else
/*
* Start domain partition region from stack guard region
* since stack guard is not enabled.
*/
return get_num_regions() - mpu_config.num_regions - type + 1;
#endif
default:
__ASSERT(0, "Unsupported type");
return -EINVAL;
}
}
/**
* This internal function checks if region is enabled or not
*/
static inline bool _is_enabled_region(u32_t r_index)
{
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + r_index * 2U)
& AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
}
/**
* This internal function check if the given buffer in in the region
*/
static inline bool _is_in_region(u32_t r_index, u32_t start, u32_t size)
{
u32_t r_addr_start;
u32_t r_addr_end;
u32_t r_size_lshift;
r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + r_index * 2U)
& (~AUX_MPU_RDB_VALID_MASK);
r_size_lshift = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + r_index * 2U)
& AUX_MPU_RDP_SIZE_MASK;
r_size_lshift = (r_size_lshift & 0x3) | ((r_size_lshift >> 7) & 0x1C);
r_addr_end = r_addr_start + (1 << (r_size_lshift + 1));
if (start >= r_addr_start && (start + size) <= r_addr_end) {
return 1;
}
return 0;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(u32_t r_index, int write)
{
u32_t r_ap;
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + r_index * 2U);
r_ap &= AUX_MPU_RDP_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) ==
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW));
}
return ((r_ap & (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)) ==
(AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR));
}
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size)
{
s32_t region_index = get_region_index_by_type(type);
u32_t region_attr = get_region_attr_by_type(type);
LOG_DBG("Region info: 0x%x 0x%x", base, size);
if (region_attr == 0U || region_index < 0) {
return -EINVAL;
}
/*
* For ARC MPU v2, MPU regions can be overlapped, smaller
* region index has higher priority.
*/
_region_init(region_index, base, size, region_attr);
return 0;
}
/* ARC Core MPU Driver API Implementation for ARC MPUv2 */
/**
* @brief enable the MPU
*/
void arc_core_mpu_enable(void)
{
/* Enable MPU */
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
}
/**
* @brief disable the MPU
*/
void arc_core_mpu_disable(void)
{
/* Disable MPU */
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
}
/**
* @brief configure the thread's MPU regions
*
* @param thread the target thread
*/
void arc_core_mpu_configure_thread(struct k_thread *thread)
{
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_USERSPACE)
if ((thread->base.user_options & K_USER) != 0) {
/* the areas before and after the user stack of thread is
* kernel only. These area can be used as stack guard.
* -----------------------
* | kernel only area |
* |---------------------|
* | user stack |
* |---------------------|
* |privilege stack guard|
* |---------------------|
* | privilege stack |
* -----------------------
*/
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->arch.priv_stack_start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
} else {
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
}
#else
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
#endif
#endif
#if defined(CONFIG_USERSPACE)
/* configure stack region of user thread */
if (thread->base.user_options & K_USER) {
LOG_DBG("configure user thread %p's stack", thread);
if (_mpu_configure(THREAD_STACK_USER_REGION,
(u32_t)thread->stack_obj, thread->stack_info.size) < 0) {
LOG_ERR("user thread %p's stack failed", thread);
return;
}
}
LOG_DBG("configure thread %p's domain", thread);
arc_core_mpu_configure_mem_domain(thread);
#endif
}
/**
* @brief configure the default region
*
* @param region_attr region attribute of default region
*/
void arc_core_mpu_default(u32_t region_attr)
{
u32_t val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
(~AUX_MPU_RDP_ATTR_MASK);
region_attr &= AUX_MPU_RDP_ATTR_MASK;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
}
/**
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
* @param region_attr region attribute
*/
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
u32_t region_attr)
{
if (index >= get_num_regions()) {
return -EINVAL;
}
region_attr &= AUX_MPU_RDP_ATTR_MASK;
_region_init(index, base, size, region_attr);
return 0;
}
#if defined(CONFIG_USERSPACE)
/**
* @brief configure MPU regions for the memory partitions of the memory domain
*
* @param thread the thread which has memory domain
*/
void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{
int region_index =
get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
u32_t num_partitions;
struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = NULL;
if (thread) {
mem_domain = thread->mem_domain_info.mem_domain;
}
if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
LOG_DBG("disable domain partition regions");
num_partitions = 0U;
pparts = NULL;
}
for (; region_index >= 0; region_index--) {
if (num_partitions) {
LOG_DBG("set region 0x%x 0x%lx 0x%x",
region_index, pparts->start, pparts->size);
_region_init(region_index, pparts->start,
pparts->size, pparts->attr);
num_partitions--;
} else {
/* clear the left mpu entries */
_region_init(region_index, 0, 0, 0);
}
pparts++;
}
}
/**
* @brief remove MPU regions for the memory partitions of the memory domain
*
* @param mem_domain the target memory domain
*/
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
{
ARG_UNUSED(mem_domain);
int region_index =
get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
for (; region_index >= 0; region_index--) {
_region_init(region_index, 0, 0, 0);
}
}
/**
* @brief reset MPU region for a single memory partition
*
* @param domain the target memory domain
* @param partition_id memory partition id
*/
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
u32_t part_id)
{
ARG_UNUSED(domain);
int region_index =
get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
LOG_DBG("disable region 0x%x", region_index + part_id);
/* Disable region */
_region_init(region_index + part_id, 0, 0, 0);
}
/**
* @brief get the maximum number of free regions for memory domain partitions
*/
int arc_core_mpu_get_max_domain_partition_regions(void)
{
return get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) + 1;
}
/**
* @brief validate the given buffer is user accessible or not
*/
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
{
int r_index;
/*
* For ARC MPU v2, smaller region number takes priority.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*
*/
for (r_index = 0; r_index < get_num_regions(); r_index++) {
if (!_is_enabled_region(r_index) ||
!_is_in_region(r_index, (u32_t)addr, size)) {
continue;
}
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
return -EPERM;
}
#endif /* CONFIG_USERSPACE */
/* ARC MPU Driver Initial Setup */
/*
* @brief MPU default initialization and configuration
*
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
static int arc_mpu_init(struct device *arg)
{
ARG_UNUSED(arg);
u32_t num_regions;
u32_t i;
num_regions = get_num_regions();
/* ARC MPU supports up to 16 Regions */
if (mpu_config.num_regions > num_regions) {
__ASSERT(0,
"Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
return -EINVAL;
}
/* Disable MPU */
arc_core_mpu_disable();
int r_index;
/*
* the MPU regions are filled in the reverse order.
* According to ARCv2 ISA, the MPU region with smaller
* index has higher priority. The static background MPU
* regions in mpu_config will be in the bottom. Then
* the special type regions will be above.
*
*/
r_index = num_regions - mpu_config.num_regions;
/* clear all the regions first */
for (i = 0U; i < r_index; i++) {
_region_init(i, 0, 0, 0);
}
/* configure the static regions */
for (i = 0U; i < mpu_config.num_regions; i++) {
_region_init(r_index,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
r_index++;
}
/* default region: no read, write and execute */
arc_core_mpu_default(0);
/* Enable MPU */
arc_core_mpu_enable();
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_ */

View File

@@ -1,748 +0,0 @@
/*
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V3_INTERNAL_H_
#define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V3_INTERNAL_H_
#define AUX_MPU_RPER_SID1 0x10000
/* valid mask: SID1+secure+valid */
#define AUX_MPU_RPER_VALID_MASK ((0x1) | AUX_MPU_RPER_SID1 | AUX_MPU_ATTR_S)
#define AUX_MPU_RPER_ATTR_MASK (0x1FF)
#define _ARC_V2_MPU_EN (0x409)
/* aux regs added in MPU version 3 */
#define _ARC_V2_MPU_INDEX (0x448) /* MPU index */
#define _ARC_V2_MPU_RSTART (0x449) /* MPU region start address */
#define _ARC_V2_MPU_REND (0x44A) /* MPU region end address */
#define _ARC_V2_MPU_RPER (0x44B) /* MPU region permission register */
#define _ARC_V2_MPU_PROBE (0x44C) /* MPU probe register */
/* For MPU version 3, the minimum protection region size is 32 bytes */
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 5
#define CALC_REGION_END_ADDR(start, size) \
(start + size - (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS))
#if defined(CONFIG_USERSPACE) && defined(CONFIG_MPU_STACK_GUARD)
/* 1 for stack guard , 1 for user thread, 1 for split */
#define MPU_REGION_NUM_FOR_THREAD 3
#elif defined(CONFIG_USERSPACE) || defined(CONFIG_MPU_STACK_GUARD)
/* 1 for stack guard or user thread stack , 1 for split */
#define MPU_REGION_NUM_FOR_THREAD 2
#else
#define MPU_REGION_NUM_FOR_THREAD 0
#endif
/**
* @brief internal structure holding information of
* memory areas where dynamic MPU programming is allowed.
*/
struct dynamic_region_info {
u8_t index;
u32_t base;
u32_t size;
u32_t attr;
};
#define MPU_DYNAMIC_REGION_AREAS_NUM 2
static u8_t static_regions_num;
static u8_t dynamic_regions_num;
static u8_t dynamic_region_index;
/**
* Global array, holding the MPU region index of
* the memory region inside which dynamic memory
* regions may be configured.
*/
static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
/* \todo through secure service to access mpu */
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
u32_t region_attr)
{
}
static inline void _region_set_attr(u32_t index, u32_t attr)
{
}
static inline u32_t _region_get_attr(u32_t index)
{
return 0;
}
static inline u32_t _region_get_start(u32_t index)
{
return 0;
}
static inline void _region_set_start(u32_t index, u32_t start)
{
}
static inline u32_t _region_get_end(u32_t index)
{
return 0;
}
static inline void _region_set_end(u32_t index, u32_t end)
{
}
/**
* This internal function probes the given addr's MPU index.if not
* in MPU, returns error
*/
static inline int _mpu_probe(u32_t addr)
{
return -EINVAL;
}
/**
* This internal function checks if MPU region is enabled or not
*/
static inline bool _is_enabled_region(u32_t r_index)
{
return false;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(u32_t r_index, int write)
{
return false;
}
#else /* CONFIG_ARC_NORMAL_FIRMWARE */
/* the following functions are prepared for SECURE_FRIMWARE */
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
u32_t region_attr)
{
if (size < (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS)) {
size = (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
}
if (region_attr) {
region_attr &= AUX_MPU_RPER_ATTR_MASK;
region_attr |= AUX_MPU_RPER_VALID_MASK;
}
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
CALC_REGION_END_ADDR(region_addr, size));
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
}
static inline void _region_set_attr(u32_t index, u32_t attr)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, attr |
AUX_MPU_RPER_VALID_MASK);
}
static inline u32_t _region_get_attr(u32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
}
static inline u32_t _region_get_start(u32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RSTART);
}
static inline void _region_set_start(u32_t index, u32_t start)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, start);
}
static inline u32_t _region_get_end(u32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_REND) +
(1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
}
static inline void _region_set_end(u32_t index, u32_t end)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND, end -
(1 << ARC_FEATURE_MPU_ALIGNMENT_BITS));
}
/**
* This internal function probes the given addr's MPU index.if not
* in MPU, returns error
*/
static inline int _mpu_probe(u32_t addr)
{
u32_t val;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
/* if no match or multiple regions match, return error */
if (val & 0xC0000000) {
return -EINVAL;
} else {
return val;
}
}
/**
* This internal function checks if MPU region is enabled or not
*/
static inline bool _is_enabled_region(u32_t r_index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
AUX_MPU_RPER_VALID_MASK) == AUX_MPU_RPER_VALID_MASK);
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(u32_t r_index, int write)
{
u32_t r_ap;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
r_ap &= AUX_MPU_RPER_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) ==
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW));
}
return ((r_ap & (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)) ==
(AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR));
}
#endif /* CONFIG_ARC_NORMAL_FIRMWARE */
/**
* This internal function allocates a dynamic MPU region and returns
* the index or error
*/
static inline int _dynamic_region_allocate_index(void)
{
if (dynamic_region_index >= get_num_regions()) {
LOG_ERR("no enough mpu entries %d", dynamic_region_index);
return -EINVAL;
}
return dynamic_region_index++;
}
/**
* This internal function checks the area given by (start, size)
* and returns the index if the area match one MPU entry
*/
static inline int _get_region_index(u32_t start, u32_t size)
{
int index = _mpu_probe(start);
if (index > 0 && index == _mpu_probe(start + size - 1)) {
return index;
}
return -EINVAL;
}
/* @brief allocate and init a dynamic MPU region
*
* This internal function performs the allocation and initialization of
* a dynamic MPU region
*
* @param base region base
* @param size region size
* @param attr region attribute
* @return <0 failure, >0 allocated dynamic region index
*/
static int _dynamic_region_allocate_and_init(u32_t base, u32_t size,
u32_t attr)
{
int u_region_index = _get_region_index(base, size);
int region_index;
LOG_DBG("Region info: base 0x%x size 0x%x attr 0x%x", base, size, attr);
if (u_region_index == -EINVAL) {
/* no underlying region */
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
/* a new region */
_region_init(region_index, base, size, attr);
}
return region_index;
}
/*
* The new memory region is to be placed inside the underlying
* region, possibly splitting the underlying region into two.
*/
u32_t u_region_start = _region_get_start(u_region_index);
u32_t u_region_end = _region_get_end(u_region_index);
u32_t u_region_attr = _region_get_attr(u_region_index);
u32_t end = base + size;
if ((base == u_region_start) && (end == u_region_end)) {
/* The new region overlaps entirely with the
* underlying region. In this case we simply
* update the partition attributes of the
* underlying region with those of the new
* region.
*/
_region_init(u_region_index, base, size, attr);
region_index = u_region_index;
} else if (base == u_region_start) {
/* The new region starts exactly at the start of the
* underlying region; the start of the underlying
* region needs to be set to the end of the new region.
*/
_region_set_start(u_region_index, base + size);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
}
} else if (end == u_region_end) {
/* The new region ends exactly at the end of the
* underlying region; the end of the underlying
* region needs to be set to the start of the
* new region.
*/
_region_set_end(u_region_index, base);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
}
} else {
/* The new region lies strictly inside the
* underlying region, which needs to split
* into two regions.
*/
_region_set_end(u_region_index, base);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base + size,
u_region_end - end, u_region_attr);
}
}
}
return region_index;
}
/* @brief reset the dynamic MPU regions
*
* This internal function performs the reset of dynamic MPU regions
*/
static void _mpu_reset_dynamic_regions(void)
{
u32_t i;
u32_t num_regions = get_num_regions();
for (i = static_regions_num; i < num_regions; i++) {
_region_init(i, 0, 0, 0);
}
for (i = 0U; i < dynamic_regions_num; i++) {
_region_init(
dyn_reg_info[i].index,
dyn_reg_info[i].base,
dyn_reg_info[i].size,
dyn_reg_info[i].attr);
}
/* dynamic regions are after static regions */
dynamic_region_index = static_regions_num;
}
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size)
{
u32_t region_attr = get_region_attr_by_type(type);
return _dynamic_region_allocate_and_init(base, size, region_attr);
}
/* ARC Core MPU Driver API Implementation for ARC MPUv3 */
/**
* @brief enable the MPU
*/
void arc_core_mpu_enable(void)
{
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* the default region:
* normal:0x000, SID:0x10000, KW:0x100 KR:0x80, KE:0x4 0
*/
#define MPU_ENABLE_ATTR 0x101c0
#else
#define MPU_ENABLE_ATTR 0
#endif
arc_core_mpu_default(MPU_ENABLE_ATTR);
}
/**
* @brief disable the MPU
*/
void arc_core_mpu_disable(void)
{
/* MPU is always enabled, use default region to
* simulate MPU disable
*/
arc_core_mpu_default(REGION_ALL_ATTR | AUX_MPU_ATTR_S |
AUX_MPU_RPER_SID1);
}
/**
* @brief configure the thread's mpu regions
*
* @param thread the target thread
*/
void arc_core_mpu_configure_thread(struct k_thread *thread)
{
/* the mpu entries of ARC MPUv3 are divided into 2 parts:
* static entries: global mpu entries, not changed in context switch
* dynamic entries: MPU entries changed in context switch and
* memory domain configure, including:
* MPU entries for user thread stack
* MPU entries for stack guard
* MPU entries for mem domain
* MPU entries for other thread specific regions
* before configuring thread specific mpu entries, need to reset dynamic
* entries
*/
_mpu_reset_dynamic_regions();
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_USERSPACE)
if ((thread->base.user_options & K_USER) != 0U) {
/* the areas before and after the user stack of thread is
* kernel only. These area can be used as stack guard.
* -----------------------
* | kernel only area |
* |---------------------|
* | user stack |
* |---------------------|
* |privilege stack guard|
* |---------------------|
* | privilege stack |
* -----------------------
*/
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->arch.priv_stack_start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
} else {
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
}
#else
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
#endif
#endif
#if defined(CONFIG_USERSPACE)
u32_t num_partitions;
struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
/* configure stack region of user thread */
if (thread->base.user_options & K_USER) {
LOG_DBG("configure user thread %p's stack", thread);
if (_mpu_configure(THREAD_STACK_USER_REGION,
(u32_t)thread->stack_obj, thread->stack_info.size) < 0) {
LOG_ERR("thread %p's stack failed", thread);
return;
}
}
/* configure thread's memory domain */
if (mem_domain) {
LOG_DBG("configure thread %p's domain: %p",
thread, mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
num_partitions = 0U;
pparts = NULL;
}
for (u32_t i = 0; i < num_partitions; i++) {
if (pparts->size) {
if (_dynamic_region_allocate_and_init(pparts->start,
pparts->size, pparts->attr) < 0) {
LOG_ERR(
"thread %p's mem region: %p failed",
thread, pparts);
return;
}
}
pparts++;
}
#endif
}
/**
* @brief configure the default region
*
* @param region_attr region attribute of default region
*/
void arc_core_mpu_default(u32_t region_attr)
{
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
/* \todo through secure service to access mpu */
#else
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr);
#endif
}
/**
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
* @param size region size
* @param region_attr region attribute
*/
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
u32_t region_attr)
{
if (index >= get_num_regions()) {
return -EINVAL;
}
region_attr &= AUX_MPU_RPER_ATTR_MASK;
_region_init(index, base, size, region_attr);
return 0;
}
#if defined(CONFIG_USERSPACE)
/**
* @brief configure MPU regions for the memory partitions of the memory domain
*
* @param thread the thread which has memory domain
*/
void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{
arc_core_mpu_configure_thread(thread);
}
/**
* @brief remove MPU regions for the memory partitions of the memory domain
*
* @param mem_domain the target memory domain
*/
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
{
u32_t num_partitions;
struct k_mem_partition *pparts;
int index;
if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
LOG_DBG("disable domain partition regions");
num_partitions = 0U;
pparts = NULL;
}
for (u32_t i = 0; i < num_partitions; i++) {
if (pparts->size) {
index = _get_region_index(pparts->start,
pparts->size);
if (index > 0) {
_region_set_attr(index,
REGION_KERNEL_RAM_ATTR);
}
}
pparts++;
}
}
/**
* @brief reset MPU region for a single memory partition
*
* @param partition_id memory partition id
*/
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
u32_t partition_id)
{
struct k_mem_partition *partition = &domain->partitions[partition_id];
int region_index = _get_region_index(partition->start,
partition->size);
if (region_index < 0) {
return;
}
LOG_DBG("remove region 0x%x", region_index);
_region_set_attr(region_index, REGION_KERNEL_RAM_ATTR);
}
/**
* @brief get the maximum number of free regions for memory domain partitions
*/
int arc_core_mpu_get_max_domain_partition_regions(void)
{
/* consider the worst case: each partition requires split */
return (get_num_regions() - MPU_REGION_NUM_FOR_THREAD) / 2;
}
/**
* @brief validate the given buffer is user accessible or not
*/
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
{
int r_index;
/*
* For ARC MPU v3, overlapping is not supported.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*/
r_index = _mpu_probe((u32_t)addr);
/* match and the area is in one region */
if (r_index >= 0 && r_index == _mpu_probe((u32_t)addr + (size - 1))) {
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
return -EPERM;
}
#endif /* CONFIG_USERSPACE */
/* ARC MPU Driver Initial Setup */
/*
* @brief MPU default initialization and configuration
*
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
static int arc_mpu_init(struct device *arg)
{
ARG_UNUSED(arg);
u32_t num_regions;
u32_t i;
num_regions = get_num_regions();
/* ARC MPU supports up to 16 Regions */
if (mpu_config.num_regions > num_regions) {
__ASSERT(0,
"Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
return -EINVAL;
}
/* Disable MPU */
arc_core_mpu_disable();
for (i = 0U; i < mpu_config.num_regions; i++) {
_region_init(i,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
/* record the static region which can be split */
if (mpu_config.mpu_regions[i].attr & REGION_DYNAMIC) {
if (dynamic_regions_num >=
MPU_DYNAMIC_REGION_AREAS_NUM) {
LOG_ERR("not enough dynamic regions %d",
dynamic_regions_num);
return -EINVAL;
}
dyn_reg_info[dynamic_regions_num].index = i;
dyn_reg_info[dynamic_regions_num].base =
mpu_config.mpu_regions[i].base;
dyn_reg_info[dynamic_regions_num].size =
mpu_config.mpu_regions[i].size;
dyn_reg_info[dynamic_regions_num].attr =
mpu_config.mpu_regions[i].attr;
dynamic_regions_num++;
}
}
static_regions_num = mpu_config.num_regions;
for (; i < num_regions; i++) {
_region_init(i, 0, 0, 0);
}
/* Enable MPU */
arc_core_mpu_enable();
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V3_INTERNAL_H_ */

View File

@@ -1,19 +1,30 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief ARCv2 kernel structure member offset definition file
* @brief ARCv2 nano kernel structure member offset definition file
*
* This module is responsible for the generation of the absolute symbols whose
* value represents the member offsets for various ARCv2 kernel structures.
* value represents the member offsets for various ARCv2 nanokernel
* structures.
*
* All of the absolute symbols defined by this module will be present in the
* final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms
* symbol).
* final microkernel or nanokernel ELF image (due to the linker's reference to
* the _OffsetAbsSyms symbol).
*
* INTERNAL
* It is NOT necessary to define the offset for every member of a structure.
@@ -22,96 +33,75 @@
* completeness.
*/
#include <kernel.h>
#include <kernel_arch_data.h>
#include <gen_offset.h>
#include <kernel_offsets.h>
#include <nano_private.h>
#include <nano_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, relinquish_cause);
/* ARCv2-specific tNANO structure member offsets */
GEN_OFFSET_SYM(tNANO, rirq_sp);
#ifdef CONFIG_SYS_POWER_MANAGEMENT
GEN_OFFSET_SYM(tNANO, idle);
#endif
/* ARCv2-specific struct tcs structure member offsets */
GEN_OFFSET_SYM(tTCS, intlock_key);
GEN_OFFSET_SYM(tTCS, relinquish_cause);
GEN_OFFSET_SYM(tTCS, return_value);
#ifdef CONFIG_ARC_STACK_CHECKING
GEN_OFFSET_SYM(_thread_arch_t, k_stack_base);
GEN_OFFSET_SYM(_thread_arch_t, k_stack_top);
#ifdef CONFIG_USERSPACE
GEN_OFFSET_SYM(_thread_arch_t, u_stack_base);
GEN_OFFSET_SYM(_thread_arch_t, u_stack_top);
GEN_OFFSET_SYM(tTCS, stack_top);
#endif
#ifdef CONFIG_THREAD_CUSTOM_DATA
GEN_OFFSET_SYM(tTCS, custom_data);
#endif
/* ARCv2-specific IRQ stack frame structure member offsets */
GEN_OFFSET_SYM(_isf_t, r0);
GEN_OFFSET_SYM(_isf_t, r1);
GEN_OFFSET_SYM(_isf_t, r2);
GEN_OFFSET_SYM(_isf_t, r3);
GEN_OFFSET_SYM(_isf_t, r4);
GEN_OFFSET_SYM(_isf_t, r5);
GEN_OFFSET_SYM(_isf_t, r6);
GEN_OFFSET_SYM(_isf_t, r7);
GEN_OFFSET_SYM(_isf_t, r8);
GEN_OFFSET_SYM(_isf_t, r9);
GEN_OFFSET_SYM(_isf_t, r10);
GEN_OFFSET_SYM(_isf_t, r11);
GEN_OFFSET_SYM(_isf_t, r12);
GEN_OFFSET_SYM(_isf_t, r13);
GEN_OFFSET_SYM(_isf_t, blink);
GEN_OFFSET_SYM(_isf_t, lp_end);
GEN_OFFSET_SYM(_isf_t, lp_start);
GEN_OFFSET_SYM(_isf_t, lp_count);
#ifdef CONFIG_CODE_DENSITY
GEN_OFFSET_SYM(_isf_t, ei_base);
GEN_OFFSET_SYM(_isf_t, ldi_base);
GEN_OFFSET_SYM(_isf_t, jli_base);
#endif
GEN_OFFSET_SYM(_isf_t, pc);
#ifdef CONFIG_ARC_HAS_SECURE
GEN_OFFSET_SYM(_isf_t, sec_stat);
#endif
GEN_OFFSET_SYM(_isf_t, status32);
GEN_ABSOLUTE_SYM(___isf_t_SIZEOF, sizeof(_isf_t));
GEN_OFFSET_SYM(tISF, r0);
GEN_OFFSET_SYM(tISF, r1);
GEN_OFFSET_SYM(tISF, r2);
GEN_OFFSET_SYM(tISF, r3);
GEN_OFFSET_SYM(tISF, r4);
GEN_OFFSET_SYM(tISF, r5);
GEN_OFFSET_SYM(tISF, r6);
GEN_OFFSET_SYM(tISF, r7);
GEN_OFFSET_SYM(tISF, r8);
GEN_OFFSET_SYM(tISF, r9);
GEN_OFFSET_SYM(tISF, r10);
GEN_OFFSET_SYM(tISF, r11);
GEN_OFFSET_SYM(tISF, r12);
GEN_OFFSET_SYM(tISF, r13);
GEN_OFFSET_SYM(tISF, blink);
GEN_OFFSET_SYM(tISF, lp_end);
GEN_OFFSET_SYM(tISF, lp_start);
GEN_OFFSET_SYM(tISF, lp_count);
GEN_OFFSET_SYM(tISF, pc);
GEN_OFFSET_SYM(tISF, status32);
GEN_ABSOLUTE_SYM(__tISF_SIZEOF, sizeof(tISF));
GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(_callee_saved_t));
/* ARCv2-specific preempt registers structure member offsets */
GEN_OFFSET_SYM(tPreempt, sp);
GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(tPreempt));
GEN_OFFSET_SYM(_callee_saved_stack_t, r13);
GEN_OFFSET_SYM(_callee_saved_stack_t, r14);
GEN_OFFSET_SYM(_callee_saved_stack_t, r15);
GEN_OFFSET_SYM(_callee_saved_stack_t, r16);
GEN_OFFSET_SYM(_callee_saved_stack_t, r17);
GEN_OFFSET_SYM(_callee_saved_stack_t, r18);
GEN_OFFSET_SYM(_callee_saved_stack_t, r19);
GEN_OFFSET_SYM(_callee_saved_stack_t, r20);
GEN_OFFSET_SYM(_callee_saved_stack_t, r21);
GEN_OFFSET_SYM(_callee_saved_stack_t, r22);
GEN_OFFSET_SYM(_callee_saved_stack_t, r23);
GEN_OFFSET_SYM(_callee_saved_stack_t, r24);
GEN_OFFSET_SYM(_callee_saved_stack_t, r25);
GEN_OFFSET_SYM(_callee_saved_stack_t, r26);
GEN_OFFSET_SYM(_callee_saved_stack_t, fp);
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
GEN_OFFSET_SYM(_callee_saved_stack_t, kernel_sp);
GEN_OFFSET_SYM(_callee_saved_stack_t, user_sp);
#else
GEN_OFFSET_SYM(_callee_saved_stack_t, user_sp);
#endif
#endif
GEN_OFFSET_SYM(_callee_saved_stack_t, r30);
#ifdef CONFIG_ARC_HAS_ACCL_REGS
GEN_OFFSET_SYM(_callee_saved_stack_t, r58);
GEN_OFFSET_SYM(_callee_saved_stack_t, r59);
#endif
#ifdef CONFIG_FP_SHARING
GEN_OFFSET_SYM(_callee_saved_stack_t, fpu_status);
GEN_OFFSET_SYM(_callee_saved_stack_t, fpu_ctrl);
#ifdef CONFIG_FP_FPU_DA
GEN_OFFSET_SYM(_callee_saved_stack_t, dpfp2h);
GEN_OFFSET_SYM(_callee_saved_stack_t, dpfp2l);
GEN_OFFSET_SYM(_callee_saved_stack_t, dpfp1h);
GEN_OFFSET_SYM(_callee_saved_stack_t, dpfp1l);
#endif
/* ARCv2-specific callee-saved stack */
GEN_OFFSET_SYM(tCalleeSaved, r13);
GEN_OFFSET_SYM(tCalleeSaved, r14);
GEN_OFFSET_SYM(tCalleeSaved, r15);
GEN_OFFSET_SYM(tCalleeSaved, r16);
GEN_OFFSET_SYM(tCalleeSaved, r17);
GEN_OFFSET_SYM(tCalleeSaved, r18);
GEN_OFFSET_SYM(tCalleeSaved, r19);
GEN_OFFSET_SYM(tCalleeSaved, r20);
GEN_OFFSET_SYM(tCalleeSaved, r21);
GEN_OFFSET_SYM(tCalleeSaved, r22);
GEN_OFFSET_SYM(tCalleeSaved, r23);
GEN_OFFSET_SYM(tCalleeSaved, r24);
GEN_OFFSET_SYM(tCalleeSaved, r25);
GEN_OFFSET_SYM(tCalleeSaved, r26);
GEN_OFFSET_SYM(tCalleeSaved, fp);
GEN_OFFSET_SYM(tCalleeSaved, r30);
GEN_ABSOLUTE_SYM(__tCalleeSaved_SIZEOF, sizeof(tCalleeSaved));
#endif
GEN_ABSOLUTE_SYM(___callee_saved_stack_t_SIZEOF, sizeof(_callee_saved_stack_t));
GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, sizeof(struct k_thread));
/* size of the struct tcs structure sans save area for floating point regs */
GEN_ABSOLUTE_SYM(__tTCS_NOFLOAT_SIZEOF, sizeof(tTCS));
GEN_ABS_SYM_END

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -10,22 +20,19 @@
*
*
* Initialization of full C support: zero the .bss, copy the .data if XIP,
* call z_cstart().
* call _Cstart().
*
* Stack is available in this module, but not the global data/bss until their
* initialization is performed.
*/
#include <zephyr/types.h>
#include <stdint.h>
#include <toolchain.h>
#include <linker/linker-defs.h>
#include <linker-defs.h>
#include <arch/arc/v2/aux_regs.h>
#include <kernel_structs.h>
#include <kernel_internal.h>
#include <nano_internal.h>
/* XXX - keep for future use in full-featured cache APIs */
#if 0
/**
*
* @brief Disable the i-cache if present
@@ -40,14 +47,14 @@ static void disable_icache(void)
{
unsigned int val;
val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
val = _arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
val &= 0xff; /* version field */
if (val == 0) {
return; /* skip if i-cache is not present */
}
z_arc_v2_aux_reg_write(_ARC_V2_IC_IVIC, 0);
_arc_v2_aux_reg_write(_ARC_V2_IC_IVIC, 0);
__asm__ __volatile__ ("nop");
z_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, 1);
_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, 1);
}
/**
@@ -64,16 +71,44 @@ static void invalidate_dcache(void)
{
unsigned int val;
val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
val = _arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
val &= 0xff; /* version field */
if (val == 0) {
return; /* skip if d-cache is not present */
}
z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 1);
_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 1);
}
#endif
extern FUNC_NORETURN void z_cstart(void);
/**
*
* @brief Adjust the vector table base
*
* Set the vector table base if the value found in the
* _ARC_V2_IRQ_VECT_BASE auxiliary register is different from the
* _VectorTable known by software. It is important to do this very early
* so that exception vectors can be handled.
*
* @return N/A
*/
static void adjust_vector_table_base(void)
{
extern struct vector_table _VectorTable;
unsigned int vbr;
/* if the compiled-in vector table is different
* from the base address known by the ARC CPU,
* set the vector base to the compiled-in address.
*/
vbr = _arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE);
vbr &= 0xfffffc00;
if (vbr != (unsigned int)&_VectorTable) {
_arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE,
(unsigned int)&_VectorTable);
}
}
extern FUNC_NORETURN void _Cstart(void);
/**
*
* @brief Prepare to and run C code
@@ -85,9 +120,11 @@ extern FUNC_NORETURN void z_cstart(void);
void _PrepC(void)
{
z_icache_setup();
z_bss_zero();
z_data_copy();
z_cstart();
disable_icache();
invalidate_dcache();
adjust_vector_table_base();
_bss_zero();
_data_copy();
_Cstart();
CODE_UNREACHABLE;
}

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -14,190 +24,26 @@
* See isr_wrapper.S for details.
*/
#include <kernel_structs.h>
#include <offsets_short.h>
#define _ASMLANGUAGE
#include <nano_private.h>
#include <offsets.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
#include <swap_macros.h>
#include "swap_macros.h"
GTEXT(_rirq_enter)
GTEXT(_rirq_exit)
GTEXT(_rirq_common_interrupt_swap)
#if 0 /* TODO: when FIRQ is not present, all would be regular */
#define NUM_REGULAR_IRQ_PRIO_LEVELS CONFIG_NUM_IRQ_PRIO_LEVELS
#else
#define NUM_REGULAR_IRQ_PRIO_LEVELS (CONFIG_NUM_IRQ_PRIO_LEVELS-1)
#endif
/* note: the above define assumes that prio 0 IRQ is for FIRQ, and
* that all others are regular interrupts.
* TODO: Revist this if FIRQ becomes configurable.
*/
#if CONFIG_NUM_IRQ_PRIO_LEVELS > 2
#error "NUM_IRQ_PRIO_LEVELS>2 is not supported."
/*
* Nesting of Regularing interrupts is not yet supported.
* If your SOC supports more than 2, set this value to 2.
*/
#endif
===========================================================
RETURN FROM INTERRUPT TO COOPERATIVE THREAD
===========================================================
That's a special case because:
1. We return from IRQ handler to a cooperative thread
2. During IRQ handling context switch did happen
3. Returning to a thread which previously gave control
to another thread because of:
- Calling k_sleep()
- Explicitly yielding
- Bumping into locked sync primitive etc
What (3) means is before passing control to another thread our thread
in question:
a. Stashed all precious caller-saved registers on its stack
b. Pushed return address to the top of the stack as well
That's how thread's stack looks like right before jumping to another thread:
----------------------------->8---------------------------------
PRE-CONTEXT-SWITCH STACK
lower_addr, let's say: 0x1000
--------------------------------------
SP -> | Return address; PC (Program Counter), in fact value taken from
| BLINK register in arch_switch()
--------------------------------------
| STATUS32 value, we explicitly save it here for later usage, read-on
--------------------------------------
| Caller-saved registers: some of R0-R12
--------------------------------------
|...
|...
higher_addr, let's say: 0x2000
----------------------------->8---------------------------------
When context gets switched the kernel saves callee-saved registers in the
thread's stack right on top of pre-switch contents so that's what we have:
----------------------------->8---------------------------------
POST-CONTEXT-SWITCH STACK
lower_addr, let's say: 0x1000
--------------------------------------
SP -> | Callee-saved registers: see struct _callee_saved_stack{}
| |- R13
| |- R14
| | ...
| \- FP
| ...
--------------------------------------
| Return address; PC (Program Counter)
--------------------------------------
| STATUS32 value
--------------------------------------
| Caller-saved registers: some of R0-R12
--------------------------------------
|...
|...
higher_addr, let's say: 0x2000
----------------------------->8---------------------------------
So how do we return in such a complex scenario.
First we restore callee-saved regs with help of _load_callee_saved_regs().
Now we're back to PRE-CONTEXT-SWITCH STACK (see above).
Logically our next step is to load return address from the top of the stack
and jump to that address to continue execution of the desired thread, but
we're still in interrupt handling mode and the only way to return to normal
execution mode is to execute "rtie" instruction. And here we need to deal
with peculiarities of return from IRQ on ARCv2 cores.
Instead of simple jump to a return address stored in the tip of thread's stack
(with subsequent interrupt enable) ARCv2 core additionally automatically
restores some registers from stack. Most important ones are
PC ("Program Counter") which holds address of the next instruction to execute
and STATUS32 which holds imortant flags including global interrupt enable,
zero, carry etc.
To make things worse depending on ARC core configuration and run-time setup
of certain features different set of registers will be restored.
Typically those same registers are automatically saved on stack on entry to
an interrupt, but remember we're returning to the thread which was
not interrupted by interrupt and so on its stack there're no automatically
saved registers, still inevitably on RTIE execution register restoration
will happen. So if we do nothing special we'll end-up with that:
----------------------------->8---------------------------------
lower_addr, let's say: 0x1000
--------------------------------------
# | Return address; PC (Program Counter)
| --------------------------------------
| | STATUS32 value
| --------------------------------------
|
sizeof(_irq_stack_frame)
|
| | Caller-saved registers: R0-R12
V --------------------------------------
|...
SP -> | < Some data on thread's stack>
|...
higher_addr, let's say: 0x2000
----------------------------->8---------------------------------
I.e. we'll go much deeper down the stack over needed return address, read
some value from unexpected location in stack and will try to jump there.
Nobody knows were we end-up then.
To work-around that problem we need to mimic existance of IRQ stack frame
of which we really only need return address obviously to return where we
need to. For that we just shift SP so that it points sizeof(_irq_stack_frame)
above like that:
----------------------------->8---------------------------------
lower_addr, let's say: 0x1000
SP -> |
A | < Some unrelated data >
| |
|
sizeof(_irq_stack_frame)
|
| --------------------------------------
| | Return address; PC (Program Counter)
| --------------------------------------
# | STATUS32 value
--------------------------------------
| Caller-saved registers: R0-R12
--------------------------------------
|...
| < Some data on thread's stack>
|...
higher_addr, let's say: 0x2000
----------------------------->8---------------------------------
Indeed R0-R13 "restored" from IRQ stack frame will contain garbage but
it makes no difference because we're returning to execution of code as if
we're returning from yet another function call and so we will restore
all needed registers from the stack.
One other important remark here is R13.
CPU hardware automatically save/restore registers in pairs and since we
wanted to save/restore R12 in IRQ stack frame as a caller-saved register we
just happen to do that for R13 as well. But given compiler treats it as
a callee-saved register we save/restore it separately in _callee_saved_stack
structure. And when we restore callee-saved registers from stack we among
other registers recover R13. But later on return from IRQ with RTIE
instruction, R13 will be "restored" again from fake IRQ stack frame and
if we don't copy correct R13 value to fake IRQ stack frame R13 value
will be corrupted.
*/
/**
*
@@ -214,33 +60,20 @@ will be corrupted.
SECTION_FUNC(TEXT, _rirq_enter)
mov r1, _nanokernel
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r2, [_ARC_V2_SEC_STAT]
bclr r2, r2, _ARC_V2_SEC_STAT_SSC_BIT
sflag r2
#else
/* disable stack checking */
lr r2, [_ARC_V2_STATUS32]
bclr r2, r2, _ARC_V2_STATUS32_SC_BIT
kflag r2
#endif
ld_s r2, [r1, __tNANO_current_OFFSET]
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS == 1
st sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
ld sp, [r1, __tNANO_rirq_sp_OFFSET]
#else
#error regular irq nesting is not implemented
#endif
clri
/* check whether irq stack is used */
_check_and_inc_int_nest_counter r0, r1
bne.d rirq_nest
mov_s r0, sp
_get_curr_cpu_irq_stack sp
rirq_nest:
push_s r0
seti
j _isr_demux
@@ -252,76 +85,72 @@ rirq_nest:
*/
SECTION_FUNC(TEXT, _rirq_exit)
clri
pop sp
mov r1, _nanokernel
ld_s r2, [r1, __tNANO_current_OFFSET]
_dec_int_nest_counter r0, r1
#if CONFIG_NUM_REGULAR_IRQ_PRIO_LEVELS > 1
/* check if we're a nested interrupt: if so, let the interrupted interrupt
* handle the reschedule */
_check_nest_int_by_irq_act r0, r1
lr r3, [_ARC_V2_AUX_IRQ_ACT]
ffs r0, r3
jne _rirq_no_reschedule
asl r0, 1, r0
#ifdef CONFIG_STACK_SENTINEL
bl z_check_stack_sentinel
/* the OS on ARCv2 always runs in kernel mode, so assume bit31 [U] in
* AUX_IRQ_ACT is always 0: if the contents of AUX_IRQ_ACT is greater
* than FFS(AUX_IRQ_ACT), it means that another bit is set so an
* interrupt was interrupted.
*/
cmp r0, r3
brgt _rirq_return_from_rirq
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
#endif
#ifdef CONFIG_PREEMPT_ENABLED
#ifdef CONFIG_SMP
bl z_arc_smp_switch_in_isr
/* r0 points to new thread, r1 points to old thread */
cmp_s r0, 0
beq _rirq_no_reschedule
mov_s r2, r1
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/*
* Both (a)reschedule and (b)non-reschedule cases need to load the
* current thread's stack, but don't have to use it until the decision
* is taken: load the delay slots with the 'load stack pointer'
* instruction.
* Both (a)reschedule and (b)non-reschedule cases need to load the current
* thread's stack, but don't have to use it until the decision is taken:
* load the delay slots with the 'load stack pointer' instruction.
*
* a) needs to load it to save outgoing context.
* b) needs to load it to restore the interrupted context.
*/
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
cmp_s r0, r2
beq _rirq_no_reschedule
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bz.d _rirq_no_reschedule
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
ld_s r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
cmp r0, 0
bz.d _rirq_no_reschedule
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
/* cached thread to run is in r0, fall through */
#endif
.balign 4
_rirq_reschedule:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to remember SEC_STAT.IRM bit */
lr r3, [_ARC_V2_SEC_STAT]
push_s r3
#endif
/* _save_callee_saved_regs expects outgoing thread in r2 */
_save_callee_saved_regs
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
st _CAUSE_RIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
#ifdef CONFIG_SMP
mov_s r2, r0
#else
/* incoming thread is in r0: it becomes the new 'current' */
mov_s r2, r0
st_s r2, [r1, _kernel_offset_to_current]
#endif
/* incoming fiber is in r0: it becomes the new 'current' */
mov r2, r0
st_s r2, [r1, __tNANO_current_OFFSET]
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
.balign 4
_rirq_common_interrupt_swap:
/* r2 contains pointer to new thread */
#ifdef CONFIG_ARC_STACK_CHECKING
_load_stack_check_regs
/* Use stack top and down registers from restored context */
add r3, r2, __tTCS_NOFLOAT_SIZEOF
sr r3, [_ARC_V2_KSTACK_TOP]
ld_s r3, [r2, __tTCS_stack_top_OFFSET]
sr r3, [_ARC_V2_KSTACK_BASE]
#endif
/*
* _load_callee_saved_regs expects incoming thread in r2.
@@ -329,59 +158,36 @@ _rirq_common_interrupt_swap:
*/
_load_callee_saved_regs
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
mov_s r0, r2
bl configure_mpu_thread
pop_s r2
#endif
#if defined(CONFIG_USERSPACE)
/*
* when USERSPACE is enabled, according to ARCv2 ISA, SP will be switched
* if interrupt comes out in user mode, and will be recorded in bit 31
* (U bit) of IRQ_ACT. when interrupt exits, SP will be switched back
* according to U bit.
*
* For the case that context switches in interrupt, the target sp must be
* thread's kernel stack, no need to do hardware sp switch. so, U bit should
* be cleared.
*/
lr r0, [_ARC_V2_AUX_IRQ_ACT]
bclr r0, r0, 31
sr r0, [_ARC_V2_AUX_IRQ_ACT]
#endif
ld r3, [r2, _thread_offset_to_relinquish_cause]
ld_s r3, [r2, __tTCS_relinquish_cause_OFFSET]
breq r3, _CAUSE_RIRQ, _rirq_return_from_rirq
nop_s
nop
breq r3, _CAUSE_FIRQ, _rirq_return_from_firq
nop_s
nop
/* fall through */
.balign 4
_rirq_return_from_coop:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* must return to secure mode, so set IRM bit to 1 */
lr r0, [_ARC_V2_SEC_STAT]
bset r0, r0, _ARC_V2_SEC_STAT_IRM_BIT
sflag r0
#endif
/* status32 and pc (blink) are already on the stack in the right order */
/*
* See verbose explanation of
* RETURN FROM INTERRUPT TO COOPERATIVE THREAD above
*/
/* update status32.ie (explanation in firq_exit:_firq_return_from_coop) */
ld_s r0, [sp, 4]
ld_s r3, [r2, __tTCS_intlock_key_OFFSET]
st 0, [r2, __tTCS_intlock_key_OFFSET]
cmp r3, 0
or.ne r0, r0, _ARC_V2_STATUS32_IE
st_s r0, [sp, 4]
/* carve fake stack */
sub sp, sp, ___isf_t_pc_OFFSET
sub sp, sp, (__tISF_SIZEOF - 12) /* a) status32/pc are already on the stack
* b) a real value will be pushed in r0 */
/* reset zero-overhead loops */
st 0, [sp, ___isf_t_lp_end_OFFSET]
/* push return value on stack */
ld_s r0, [r2, __tTCS_return_value_OFFSET]
push_s r0
/*
* r13 is part of both the callee and caller-saved register sets because
@@ -389,22 +195,21 @@ _rirq_return_from_coop:
* IRQ prologue. r13 thus has to be set to its correct value in the IRQ
* stack frame.
*/
st_s r13, [sp, ___isf_t_r13_OFFSET]
st_s r13, [sp, __tISF_r13_OFFSET]
/* stack now has the IRQ stack frame layout, pointing to sp */
/* rtie will pop the rest from the stack */
rtie
/* stack now has the IRQ stack frame layout, pointing to r0 */
#endif /* CONFIG_PREEMPT_ENABLED */
/* fall through to rtie instruction */
.balign 4
_rirq_return_from_firq:
_rirq_return_from_rirq:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to recover SEC_STAT.IRM bit */
pop_s r3
sflag r3
#endif
/* rtie will pop the rest from the stack */
/* fall through to rtie instruction */
.balign 4
_rirq_no_reschedule:
rtie

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -11,20 +21,19 @@
* Reset handler that prepares the system for running C code.
*/
#define _ASMLANGUAGE
// #include <board.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <sections.h>
#include <arch/cpu.h>
#include <swap_macros.h>
GDATA(_interrupt_stack)
GDATA(z_main_stack)
GDATA(_VectorTable)
/* use one of the available interrupt stacks during init */
#define INIT_STACK _interrupt_stack
#define INIT_STACK_SIZE CONFIG_ISR_STACK_SIZE
#ifdef CONFIG_HARVARD
#define _TOP_OF_MEMORY (CONFIG_DCCM_BASE_ADDRESS + CONFIG_DCCM_SIZE * 1024)
/* harvard places the initial stack in the dccm memory */
#else
#define _TOP_OF_MEMORY (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_SIZE * 1024)
#endif
GTEXT(__reset)
GTEXT(__start)
@@ -45,131 +54,11 @@ GTEXT(__start)
SECTION_FUNC(TEXT,__reset)
SECTION_FUNC(TEXT,__start)
/* lock interrupts: will get unlocked when switch to main task
* also make sure the processor in the correct status
*/
mov_s r0, 0
kflag r0
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sflag r0
#endif
/* lock interrupts: will get unlocked when switch to main task */
clri
#if defined(CONFIG_BOOT_TIME_MEASUREMENT) && defined(CONFIG_ARCV2_TIMER)
/*
* ARCV2 timer (timer0) is a free run timer, let it start to count
* here.
*/
mov_s r0, 0xffffffff
sr r0, [_ARC_V2_TMR0_LIMIT]
mov_s r0, 0
sr r0, [_ARC_V2_TMR0_COUNT]
#endif
/* interrupt related init */
#ifndef CONFIG_ARC_NORMAL_FIRMWARE
/* IRQ_ACT and IRQ_CTRL should be initialized and set in secure mode */
sr r0, [_ARC_V2_AUX_IRQ_ACT]
sr r0, [_ARC_V2_AUX_IRQ_CTRL]
#endif
sr r0, [_ARC_V2_AUX_IRQ_HINT]
/* set the vector table base early,
* so that exception vectors can be handled.
*/
mov_s r0, _VectorTable
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sr r0, [_ARC_V2_IRQ_VECT_BASE_S]
#else
sr r0, [_ARC_V2_IRQ_VECT_BASE]
#endif
#if defined(CONFIG_USERSPACE)
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_US_BIT
kflag r0
#endif
#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_AD_BIT
kflag r0
#endif
mov_s r1, 1
invalidate_and_disable_icache:
lr r0, [_ARC_V2_I_CACHE_BUILD]
and.f r0, r0, 0xff
bz.nd invalidate_dcache
mov_s r2, 0
sr r2, [_ARC_V2_IC_IVIC]
/* writing to IC_IVIC needs 3 NOPs */
nop_s
nop_s
nop_s
sr r1, [_ARC_V2_IC_CTRL]
invalidate_dcache:
lr r3, [_ARC_V2_D_CACHE_BUILD]
and.f r3, r3, 0xff
bz.nd done_cache_invalidate
sr r1, [_ARC_V2_DC_IVDC]
done_cache_invalidate:
#if defined(CONFIG_SYS_POWER_DEEP_SLEEP_STATES) && \
!defined(CONFIG_BOOTLOADER_CONTEXT_RESTORE)
jl @_sys_resume_from_deep_sleep
#endif
#if CONFIG_MP_NUM_CPUS > 1
_get_cpu_id r0
breq r0, 0, _master_core_startup
/*
* Non-masters wait for master core (core 0) to boot enough
*/
_slave_core_wait:
ld r1, [arc_cpu_wake_flag]
brne r0, r1, _slave_core_wait
ld sp, [arc_cpu_sp]
/* signal master core that slave core runs */
st 0, [arc_cpu_wake_flag]
#if defined(CONFIG_ARC_FIRQ_STACK)
jl z_arc_firq_stack_set
#endif
j z_arc_slave_start
_master_core_startup:
#endif
#ifdef CONFIG_INIT_STACKS
/*
* use the main stack to call memset on the interrupt stack and the
* FIRQ stack when CONFIG_INIT_STACKS is enabled before switching to
* one of them for the rest of the early boot
*/
mov_s sp, z_main_stack
add sp, sp, CONFIG_MAIN_STACK_SIZE
mov_s r0, _interrupt_stack
mov_s r1, 0xaa
mov_s r2, CONFIG_ISR_STACK_SIZE
jl memset
#endif /* CONFIG_INIT_STACKS */
mov_s sp, INIT_STACK
add sp, sp, INIT_STACK_SIZE
#if defined(CONFIG_ARC_FIRQ_STACK)
jl z_arc_firq_stack_set
#endif
/* setup a stack at the end of MEMORY */
mov sp, _TOP_OF_MEMORY
j @_PrepC

View File

@@ -1,12 +0,0 @@
#
# Copyright (c) 2018 Synopsys, Inc. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
zephyr_library_sources(
arc_sjli.c
arc_secure.S
secure_sys_services.c
)

View File

@@ -1,122 +0,0 @@
/*
* Copyright (c) 2018 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
.macro clear_scratch_regs
mov r1, 0
mov r2, 0
mov r3, 0
mov r4, 0
mov r5, 0
mov r6, 0
mov r7, 0
mov r8, 0
mov r9, 0
mov r10, 0
mov r11, 0
mov r12, 0
.endm
.macro clear_callee_regs
mov r25, 0
mov r24, 0
mov r23, 0
mov r22, 0
mov r21, 0
mov r20, 0
mov r19, 0
mov r18, 0
mov r17, 0
mov r16, 0
mov r15, 0
mov r14, 0
mov r13, 0
.endm
GTEXT(arc_go_to_normal)
GTEXT(_arc_do_secure_call)
GDATA(arc_s_call_table)
SECTION_FUNC(TEXT, _arc_do_secure_call)
/* r0-r5: arg1-arg6, r6 is call id */
/* the call id should be checked */
/* disable normal interrupt happened when processor in secure mode ? */
/* seti (0x30 | (ARC_N_IRQ_START_LEVEL-1)) */
breq r6, ARC_S_CALL_CLRI, _s_clri
breq r6, ARC_S_CALL_SETI, _s_seti
push_s blink
mov blink, arc_s_call_table
ld.as r6, [blink, r6]
jl [r6]
/*
* no need to clear callee regs, as they will be saved and restored
* automatically
*/
clear_scratch_regs
mov r29, 0
mov r30, 0
_arc_do_secure_call_exit:
pop_s blink
j [blink]
/* enable normal interrupt */
/*
* j.d [blink]
* seti (0x30 | (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
*/
_s_clri:
lr r0, [_ARC_V2_STATUS32]
and r0, r0, 0x1e
asr r0, r0
or r0, r0, 0x30
mov r6, (0x30 | (ARC_N_IRQ_START_LEVEL-1))
j.d [blink]
seti r6
_s_seti:
btst r0, 4
jnz __seti_0
mov r0, (CONFIG_NUM_IRQ_PRIO_LEVELS - 1)
lr r6, [_ARC_V2_STATUS32]
and r6, r6, 0x1e
asr r6, r6
cmp r0, r6
mov.hs r0, r6
__seti_0:
and r0, r0, 0xf
brhs r0, ARC_N_IRQ_START_LEVEL, __seti_1
mov r0, ARC_N_IRQ_START_LEVEL
__seti_1:
or r0, r0, 0x30
j.d [blink]
seti r0
SECTION_FUNC(TEXT, arc_go_to_normal)
clear_callee_regs
clear_scratch_regs
mov fp, 0
mov r29, 0
mov r30, 0
mov blink, 0
jl [r0]
/* should not come here */
kflag 1

View File

@@ -1,68 +0,0 @@
/*
* Copyright (c) 2018 Synopsys, Inc. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <device.h>
#include <kernel.h>
#include <errno.h>
#include <zephyr/types.h>
#include <init.h>
#include <toolchain.h>
#include <arch/arc/v2/secureshield/arc_secure.h>
static void _default_sjli_entry(void);
/*
* sjli vector table must be in instruction space
* \todo: how to let user to install customized sjli entry easily, e.g.
* through macros or with the help of compiler?
*/
const static u32_t _sjli_vector_table[CONFIG_SJLI_TABLE_SIZE] = {
[0] = (u32_t)_arc_do_secure_call,
[1 ... (CONFIG_SJLI_TABLE_SIZE - 1)] = (u32_t)_default_sjli_entry,
};
/*
* @brief default entry of sjli call
*
*/
static void _default_sjli_entry(void)
{
printk("default sjli entry\n");
}
/*
* @brief initializaiton of sjli related functions
*
*/
static void sjli_table_init(void)
{
/* install SJLI table */
z_arc_v2_aux_reg_write(_ARC_V2_NSC_TABLE_BASE, _sjli_vector_table);
z_arc_v2_aux_reg_write(_ARC_V2_NSC_TABLE_TOP,
(_sjli_vector_table + CONFIG_SJLI_TABLE_SIZE));
}
/*
* @brief initializaiton of secureshield related functions.
*/
static int arc_secureshield_init(struct device *arg)
{
sjli_table_init();
/* set nic bit to enable seti/clri and
* sleep/wevt in normal mode.
* If not set, direct call of seti/clri etc. will raise exception.
* Then, these seti/clri instructions should be replaced with secure
* secure services (sjli call)
*
*/
__asm__ volatile("sflag 0x20");
return 0;
}
SYS_INIT(arc_secureshield_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);

View File

@@ -1,89 +0,0 @@
/*
* Copyright (c) 2018 Synopsys, Inc. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <errno.h>
#include <kernel.h>
#include <arch/cpu.h>
#include <zephyr/types.h>
#include <soc.h>
#include <toolchain.h>
#include <arch/arc/v2/secureshield/arc_secure.h>
#define IRQ_PRIO_MASK (0xffff << ARC_N_IRQ_START_LEVEL)
/*
* @brief read secure auxiliary regs on behalf of normal mode
*
* @param aux_reg address of aux reg
*
* Some aux regs require secure privilege, this function implements
* an secure service to access secure aux regs. Check should be done
* to decide whether the access is valid.
*/
static s32_t arc_s_aux_read(u32_t aux_reg)
{
return -1;
}
/*
* @brief write secure auxiliary regs on behalf of normal mode
*
* @param aux_reg address of aux reg
* @param val, the val to write
*
* Some aux regs require secure privilege, this function implements
* an secure service to access secure aux regs. Check should be done
* to decide whether the access is valid.
*/
static s32_t arc_s_aux_write(u32_t aux_reg, u32_t val)
{
if (aux_reg == _ARC_V2_AUX_IRQ_ACT) {
/* 0 -> CONFIG_NUM_IRQ_PRIO_LEVELS allocated to secure world
* left prio levels allocated to normal world
*/
val &= IRQ_PRIO_MASK;
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_ACT, val |
(z_arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT) &
(~IRQ_PRIO_MASK)));
return 0;
}
return -1;
}
/*
* @brief allocate interrupt for normal world
*
* @param intno, the interrupt to be allocated to normal world
*
* By default, most interrupts are configured to be secure in initialization.
* If normal world wants to use an interrupt, through this secure service to
* apply one. Necessary check should be done to decide whether the apply is
* valid
*/
static s32_t arc_s_irq_alloc(u32_t intno)
{
z_arc_v2_irq_uinit_secure_set(intno, 0);
return 0;
}
/*
* \todo, to access MPU from normal mode, secure mpu service should be
* created. In the secure mpu service, the parameters should be checked
* (e.g., not overwrite the mpu regions for secure world)that operations
* are valid
*/
/*
* \todo, how to add secure service easily
*/
const _arc_s_call_handler_t arc_s_call_table[ARC_S_CALL_LIMIT] = {
[ARC_S_CALL_AUX_READ] = (_arc_s_call_handler_t)arc_s_aux_read,
[ARC_S_CALL_AUX_WRITE] = (_arc_s_call_handler_t)arc_s_aux_write,
[ARC_S_CALL_IRQ_ALLOC] = (_arc_s_call_handler_t)arc_s_irq_alloc,
};

View File

@@ -0,0 +1,64 @@
/* sw_isr_table.S - ISR table for static ISR declarations for ARC */
/*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
#include <arch/cpu.h>
/*
* enable preprocessor features, such
* as %expr - evaluate the expression and use it as a string
*/
.altmacro
/*
* Define an ISR table entry
* Define symbol as weak and give the section .gnu.linkonce
* prefix. This allows linker overload the symbol and the
* whole section by the one defined by a device driver
*/
.macro _isr_table_entry_declare index
WDATA(_isr_irq\index)
.section .gnu.linkonce.isr_irq\index
_isr_irq\index: .word 0xABAD1DEA, _irq_spurious
.endm
/*
* Declare the ISR table
*/
.macro _isr_table_declare from, to
counter = \from
.rept (\to - \from)
_isr_table_entry_declare %counter
counter = counter + 1
.endr
.endm
GTEXT(_irq_spurious)
GDATA(_sw_isr_table)
.section .isr_irq16
.align
_sw_isr_table:
/*In ARC architecture, IRQ 0-15 are reserved for the system and are not
assignable by the user, for that reason the isr table linker section
start at IRQ 16*/
_isr_table_declare 16 CONFIG_NUM_IRQS

197
arch/arc/core/swap.S Normal file
View File

@@ -0,0 +1,197 @@
/*
* Copyright (c) 2014-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Thread context switching
*
* This module implements the routines necessary for thread context switching
* on ARCv2 CPUs.
*
* See isr_wrapper.S for details.
*/
#define _ASMLANGUAGE
#include <nano_private.h>
#include <offsets.h>
#include <toolchain.h>
#include <arch/cpu.h>
#include <v2/irq.h>
#include "swap_macros.h"
GTEXT(_Swap)
GDATA(_nanokernel)
/**
*
* @brief Initiate a cooperative context switch
*
* The _Swap() routine is invoked by various nanokernel services to effect
* a cooperative context switch. Prior to invoking _Swap(), the caller
* disables interrupts via nanoCpuIntLock() and the return 'key' is passed as a
* parameter to _Swap(). The key is in fact the value stored in the register
* operand of a CLRI instruction.
*
* It stores the intlock key parameter into current->intlock_key.
* Given that _Swap() is called to effect a cooperative context switch,
* the caller-saved integer registers are saved on the stack by the function
* call preamble to _Swap(). This creates a custom stack frame that will be
* popped when returning from _Swap(), but is not suitable for handling a return
* from an exception. Thus, the fact that the thread is pending because of a
* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in
* the relinquish_cause of the thread's tTCS. The _IrqExit()/_FirqExit() code
* will take care of doing the right thing to restore the thread status.
*
* When _Swap() is invoked, we know the decision to perform a context switch or
* not has already been taken and a context switch must happen.
*
* @return may contain a return value setup by a call to fiberRtnValueSet()
*
* C function prototype:
*
* unsigned int _Swap (unsigned int key);
*
*/
SECTION_FUNC(TEXT, _Swap)
/* interrupts are locked, interrupt key is in r0 */
mov r1, _nanokernel
ld_s r2, [r1, __tNANO_current_OFFSET]
/* save intlock key */
st_s r0, [r2, __tTCS_intlock_key_OFFSET]
st _CAUSE_COOP, [r2, __tTCS_relinquish_cause_OFFSET]
/*
* Save status32 and blink on the stack before the callee-saved registers.
* This is the same layout as the start of an IRQ stack frame.
*/
lr r3, [_ARC_V2_STATUS32]
push_s r3
#ifdef CONFIG_ARC_STACK_CHECKING
/* disable stack checking during swap */
bclr r3, r3, _ARC_V2_STATUS32_SC_BIT
kflag r3
#endif
push_s blink
_save_callee_saved_regs
ld_s r2, [r1, __tNANO_fiber_OFFSET]
breq r2, 0, _swap_to_the_task
.balign 4
_swap_to_a_fiber:
ld_s r3, [r2, __tTCS_link_OFFSET]
b.d _finish_swapping_to_thread /* always execute delay slot */
st_s r3, [r1, __tNANO_fiber_OFFSET] /* delay slot */
.balign 4
_swap_to_the_task:
ld_s r2, [r1, __tNANO_task_OFFSET]
/* fall through */
.balign 4
_finish_swapping_to_thread:
/* entering here, r2 contains the new current thread */
#if 0
/* don't save flags in tNANO: slower, error-prone, and might not even give
* a speed boost where it's supposed to */
ld_s r3, [r2, __tTCS_flags_OFFSET]
st_s r3, [r1, __tNANO_flags_OFFSET]
#endif
#ifdef CONFIG_ARC_STACK_CHECKING
/* Use stack top and down registers from restored context */
add r3, r2, __tTCS_NOFLOAT_SIZEOF
sr r3, [_ARC_V2_KSTACK_TOP]
ld_s r3, [r2, __tTCS_stack_top_OFFSET]
sr r3, [_ARC_V2_KSTACK_BASE]
#endif
/* XXX - can be moved to delay slot of _CAUSE_RIRQ ? */
st_s r2, [r1, __tNANO_current_OFFSET]
_load_callee_saved_regs
ld_s r3, [r2, __tTCS_relinquish_cause_OFFSET]
breq r3, _CAUSE_RIRQ, _swap_return_from_rirq
nop
breq r3, _CAUSE_FIRQ, _swap_return_from_firq
nop
/* fall through to _swap_return_from_coop */
.balign 4
_swap_return_from_coop:
ld_s r1, [r2, __tTCS_intlock_key_OFFSET]
st 0, [r2, __tTCS_intlock_key_OFFSET]
ld_s r0, [r2, __tTCS_return_value_OFFSET]
lr ilink, [_ARC_V2_STATUS32]
bbit1 ilink, _ARC_V2_STATUS32_AE_BIT, _return_from_exc
pop_s blink /* pc into blink */
pop_s r3 /* status32 into r3 */
kflag r3 /* write status32 */
j_s.d [blink] /* always execute delay slot */
seti r1 /* delay slot */
.balign 4
_swap_return_from_rirq:
_swap_return_from_firq:
lr r3, [_ARC_V2_STATUS32]
bbit1 r3, _ARC_V2_STATUS32_AE_BIT, _return_from_exc_irq
/* pretend interrupt happened to use rtie instruction */
lr r3, [_ARC_V2_AUX_IRQ_ACT]
brne r3,0,_swap_already_in_irq
or r3,r3,(1<<(CONFIG_NUM_IRQ_PRIO_LEVELS-1)) /* use lowest */
sr r3, [_ARC_V2_AUX_IRQ_ACT]
_swap_already_in_irq:
rtie
.balign 4
_return_from_exc_irq:
_pop_irq_stack_frame
sub_s sp, sp, 8
_return_from_exc:
/* put the return address to eret */
ld ilink, [sp] /* pc into ilink */
sr ilink, [_ARC_V2_ERET]
/* put status32 into estatus */
ld ilink, [sp, 4] /* status32 into ilink */
sr ilink, [_ARC_V2_ERSTATUS]
add_s sp, sp, 8
rtie

171
arch/arc/core/swap_macros.h Normal file
View File

@@ -0,0 +1,171 @@
/* swap_macros.h - helper macros for context switch */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _SWAP_MACROS__H_
#define _SWAP_MACROS__H_
#include <nano_private.h>
#include <offsets.h>
#include <toolchain.h>
#include <arch/cpu.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASMLANGUAGE
/* entering this macro, current is in r2 */
.macro _save_callee_saved_regs
sub_s sp, sp, __tCalleeSaved_SIZEOF
/* save regs on stack */
st_s r13, [sp, __tCalleeSaved_r13_OFFSET]
st_s r14, [sp, __tCalleeSaved_r14_OFFSET]
st_s r15, [sp, __tCalleeSaved_r15_OFFSET]
st r16, [sp, __tCalleeSaved_r16_OFFSET]
st r17, [sp, __tCalleeSaved_r17_OFFSET]
st r18, [sp, __tCalleeSaved_r18_OFFSET]
st r19, [sp, __tCalleeSaved_r19_OFFSET]
st r20, [sp, __tCalleeSaved_r20_OFFSET]
st r21, [sp, __tCalleeSaved_r21_OFFSET]
st r22, [sp, __tCalleeSaved_r22_OFFSET]
st r23, [sp, __tCalleeSaved_r23_OFFSET]
st r24, [sp, __tCalleeSaved_r24_OFFSET]
st r25, [sp, __tCalleeSaved_r25_OFFSET]
st r26, [sp, __tCalleeSaved_r26_OFFSET]
st fp, [sp, __tCalleeSaved_fp_OFFSET]
st r30, [sp, __tCalleeSaved_r30_OFFSET]
/* save stack pointer in struct tcs */
st sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
.endm
/* entering this macro, current is in r2 */
.macro _load_callee_saved_regs
/* restore stack pointer from struct tcs */
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
ld_s r13, [sp, __tCalleeSaved_r13_OFFSET]
ld_s r14, [sp, __tCalleeSaved_r14_OFFSET]
ld_s r15, [sp, __tCalleeSaved_r15_OFFSET]
ld r16, [sp, __tCalleeSaved_r16_OFFSET]
ld r17, [sp, __tCalleeSaved_r17_OFFSET]
ld r18, [sp, __tCalleeSaved_r18_OFFSET]
ld r19, [sp, __tCalleeSaved_r19_OFFSET]
ld r20, [sp, __tCalleeSaved_r20_OFFSET]
ld r21, [sp, __tCalleeSaved_r21_OFFSET]
ld r22, [sp, __tCalleeSaved_r22_OFFSET]
ld r23, [sp, __tCalleeSaved_r23_OFFSET]
ld r24, [sp, __tCalleeSaved_r24_OFFSET]
ld r25, [sp, __tCalleeSaved_r25_OFFSET]
ld r26, [sp, __tCalleeSaved_r26_OFFSET]
ld fp, [sp, __tCalleeSaved_fp_OFFSET]
ld r30, [sp, __tCalleeSaved_r30_OFFSET]
add_s sp, sp, __tCalleeSaved_SIZEOF
.endm
/*
* Must be called with interrupts locked or in P0.
* Upon exit, sp will be pointing to the stack frame.
*/
.macro _create_irq_stack_frame
sub_s sp, sp, __tISF_SIZEOF
st blink, [sp, __tISF_blink_OFFSET]
/* store these right away so we can use them if needed */
st_s r13, [sp, __tISF_r13_OFFSET]
st_s r12, [sp, __tISF_r12_OFFSET]
st r11, [sp, __tISF_r11_OFFSET]
st r10, [sp, __tISF_r10_OFFSET]
st r9, [sp, __tISF_r9_OFFSET]
st r8, [sp, __tISF_r8_OFFSET]
st r7, [sp, __tISF_r7_OFFSET]
st r6, [sp, __tISF_r6_OFFSET]
st r5, [sp, __tISF_r5_OFFSET]
st r4, [sp, __tISF_r4_OFFSET]
st_s r3, [sp, __tISF_r3_OFFSET]
st_s r2, [sp, __tISF_r2_OFFSET]
st_s r1, [sp, __tISF_r1_OFFSET]
st_s r0, [sp, __tISF_r0_OFFSET]
mov r0, lp_count
st_s r0, [sp, __tISF_lp_count_OFFSET]
lr r1, [_ARC_V2_LP_START]
lr r0, [_ARC_V2_LP_END]
st_s r1, [sp, __tISF_lp_start_OFFSET]
st_s r0, [sp, __tISF_lp_end_OFFSET]
.endm
/*
* Must be called with interrupts locked or in P0.
* sp must be pointing the to stack frame.
*/
.macro _pop_irq_stack_frame
ld blink, [sp, __tISF_blink_OFFSET]
ld_s r0, [sp, __tISF_lp_count_OFFSET]
mov lp_count, r0
ld_s r1, [sp, __tISF_lp_start_OFFSET]
ld_s r0, [sp, __tISF_lp_end_OFFSET]
sr r1, [_ARC_V2_LP_START]
sr r0, [_ARC_V2_LP_END]
ld_s r13, [sp, __tISF_r13_OFFSET]
ld_s r12, [sp, __tISF_r12_OFFSET]
ld r11, [sp, __tISF_r11_OFFSET]
ld r10, [sp, __tISF_r10_OFFSET]
ld r9, [sp, __tISF_r9_OFFSET]
ld r8, [sp, __tISF_r8_OFFSET]
ld r7, [sp, __tISF_r7_OFFSET]
ld r6, [sp, __tISF_r6_OFFSET]
ld r5, [sp, __tISF_r5_OFFSET]
ld r4, [sp, __tISF_r4_OFFSET]
ld_s r3, [sp, __tISF_r3_OFFSET]
ld_s r2, [sp, __tISF_r2_OFFSET]
ld_s r1, [sp, __tISF_r1_OFFSET]
ld_s r0, [sp, __tISF_r0_OFFSET]
/*
* All gprs have been reloaded, the only one that is still usable is
* ilink.
*
* The pc and status32 values will still be on the stack. We cannot
* pop them yet because the callers of _pop_irq_stack_frame must reload
* status32 differently depending on the execution context they are running
* in (_Swap(), firq or exception).
*/
add_s sp, sp, __tISF_SIZEOF
.endm
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _SWAP_MACROS__H_ */

View File

@@ -1,198 +0,0 @@
/*
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Thread context switching
*
* This module implements the routines necessary for thread context switching
* on ARCv2 CPUs.
*
* See isr_wrapper.S for details.
*/
#include <kernel_structs.h>
#include <offsets_short.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
#include <v2/irq.h>
#include <swap_macros.h>
GTEXT(arch_switch)
/**
*
* @brief Initiate a cooperative context switch
*
* The arch_switch routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking arch_switch, the caller
* disables interrupts via irq_lock()
* Given that arch_switch() is called to effect a cooperative context switch,
* the caller-saved integer registers are saved on the stack by the function
* call preamble to arch_switch. This creates a custom stack frame that will
* be popped when returning from arch_switch, but is not suitable for handling
* a return from an exception. Thus, the fact that the thread is pending because
* of a cooperative call to arch_switch() has to be recorded via the
* _CAUSE_COOP code in the relinquish_cause of the thread's k_thread structure.
* The _rirq_exit()/_firq_exit() code will take care of doing the right thing
* to restore the thread status.
*
* When arch_switch() is invoked, we know the decision to perform a context
* switch or not has already been taken and a context switch must happen.
*
*
* C function prototype:
*
* void arch_switch(void *switch_to, void **switched_from);
*
*/
SECTION_FUNC(TEXT, arch_switch)
#ifdef CONFIG_EXECUTION_BENCHMARKING
push_s r0
push_s r1
push_s blink
bl read_timer_start_of_swap
pop_s blink
pop_s r1
pop_s r0
#endif
/*
* r0 = new_thread->switch_handle = switch_to thread,
* r1 = &old_thread->switch_handle = &switch_from thread
*/
ld_s r2, [r1]
/*
* r2 may be dummy_thread in z_cstart, dummy_thread->switch_handle
* must be 0
*/
breq r2, 0, _switch_to_target_thread
st _CAUSE_COOP, [r2, _thread_offset_to_relinquish_cause]
/*
* Save status32 and blink on the stack before the callee-saved registers.
* This is the same layout as the start of an IRQ stack frame.
*/
lr r3, [_ARC_V2_STATUS32]
push_s r3
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r3, [_ARC_V2_SEC_STAT]
#else
mov_s r3, 0
#endif
push_s r3
#endif
push_s blink
_save_callee_saved_regs
#ifdef CONFIG_ARC_STACK_CHECKING
/* disable stack checking here, as sp will be changed to target
* thread'sp
*/
#if defined(CONFIG_ARC_HAS_SECURE) && defined(CONFIG_ARC_SECURE_FIRMWARE)
bclr r3, r3, _ARC_V2_SEC_STAT_SSC_BIT
sflag r3
#else
bclr r3, r3, _ARC_V2_STATUS32_SC_BIT
kflag r3
#endif
#endif
_switch_to_target_thread:
mov_s r2, r0
/* entering here, r2 contains the new current thread */
#ifdef CONFIG_ARC_STACK_CHECKING
_load_stack_check_regs
#endif
_load_callee_saved_regs
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
bl configure_mpu_thread
pop_s r2
#endif
ld r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _switch_return_from_rirq
nop_s
breq r3, _CAUSE_FIRQ, _switch_return_from_firq
nop_s
/* fall through to _switch_return_from_coop */
.balign 4
_switch_return_from_coop:
pop_s blink /* pc into blink */
#ifdef CONFIG_ARC_HAS_SECURE
pop_s r3 /* pop SEC_STAT */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sflag r3
#endif
#endif
pop_s r3 /* status32 into r3 */
kflag r3 /* write status32 */
#ifdef CONFIG_EXECUTION_BENCHMARKING
b _capture_value_for_benchmarking
#endif
return_loc:
j_s [blink]
.balign 4
_switch_return_from_rirq:
_switch_return_from_firq:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to recover SEC_STAT.IRM bit */
pop_s r3
sflag r3
#endif
lr r3, [_ARC_V2_AUX_IRQ_ACT]
/* use lowest interrupt priority */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
or r3, r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else
or r3, r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
mov_s r0, _ARC_V2_AUX_IRQ_ACT
mov_s r1, r3
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
#else
sr r3, [_ARC_V2_AUX_IRQ_ACT]
#endif
rtie
#ifdef CONFIG_EXECUTION_BENCHMARKING
.balign 4
_capture_value_for_benchmarking:
push_s blink
bl read_timer_end_of_swap
pop_s blink
b return_loc
#endif /* CONFIG_EXECUTION_BENCHMARKING */

View File

@@ -0,0 +1,93 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief ARCv2 system fatal error handler
*
* This module provides the _SysFatalErrorHandler() routine for ARCv2 BSPs.
*/
#include <nanokernel.h>
#include <toolchain.h>
#include <sections.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PRINTK(...) printk(__VA_ARGS__)
#else
#define PRINTK(...)
#endif
#ifdef CONFIG_MICROKERNEL
extern void _TaskAbort(void);
static inline void nonEssentialTaskAbort(void)
{
PRINTK("Fatal fault in task ! Aborting task.\n");
_TaskAbort();
}
#define NON_ESSENTIAL_TASK_ABORT() nonEssentialTaskAbort()
#else
#define NON_ESSENTIAL_TASK_ABORT() \
do {/* nothing */ \
} while ((0))
#endif
/**
*
* @brief Fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current thread and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* @param reason the fatal error reason
* @param pEsf pointer to exception stack frame
*
* @return N/A
*/
void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF * pEsf)
{
nano_context_type_t curCtx = sys_execution_context_type_get();
ARG_UNUSED(reason);
ARG_UNUSED(pEsf);
if ((curCtx == NANO_CTX_ISR) || _is_thread_essential()) {
PRINTK("Fatal fault in %s ! Spinning...\n",
NANO_CTX_ISR == curCtx
? "ISR"
: NANO_CTX_FIBER == curCtx ? "essential fiber"
: "essential task");
for (;;)
; /* spin forever */
}
if (NANO_CTX_FIBER == curCtx) {
PRINTK("Fatal fault in fiber ! Aborting fiber.\n");
fiber_abort();
return;
}
NON_ESSENTIAL_TASK_ABORT();
}

View File

@@ -1,48 +1,91 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief New thread creation for ARCv2
*
* Core thread related primitives for the ARCv2 processor architecture.
* Core nanokernel fiber related primitives for the ARCv2 processor
* architecture.
*/
#include <kernel.h>
#include <ksched.h>
#include <offsets_short.h>
#include <nanokernel.h>
#include <toolchain.h>
#include <nano_private.h>
#include <offsets.h>
#include <wait_q.h>
#ifdef CONFIG_USERSPACE
#include <arch/arc/v2/mpu/arc_core_mpu.h>
#endif
#ifdef CONFIG_INIT_STACKS
#include <string.h>
#endif /* CONFIG_INIT_STACKS */
/* initial stack frame */
struct init_stack_frame {
u32_t pc;
#ifdef CONFIG_ARC_HAS_SECURE
u32_t sec_stat;
#endif
u32_t status32;
u32_t r3;
u32_t r2;
u32_t r1;
u32_t r0;
uint32_t pc;
uint32_t status32;
uint32_t r3;
uint32_t r2;
uint32_t r1;
uint32_t r0;
};
tNANO _nanokernel = {0};
#if defined(CONFIG_THREAD_MONITOR)
#define THREAD_MONITOR_INIT(tcs) thread_monitor_init(tcs)
#else
#define THREAD_MONITOR_INIT(tcs) \
do {/* do nothing */ \
} while ((0))
#endif
#if defined(CONFIG_THREAD_MONITOR)
/*
* @brief Initialize thread monitoring support
*
* Currently only inserts the new thread in the list of active threads.
*
* @return N/A
*/
static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
{
unsigned int key;
/*
* Add the newly initialized thread to head of the list of threads. This
* singly linked list of threads maintains ALL the threads in the system:
* both tasks and fibers regardless of whether they are runnable.
*/
key = irq_lock();
tcs->next_thread = _nanokernel.threads;
_nanokernel.threads = tcs;
irq_unlock(key);
}
#endif /* CONFIG_THREAD_MONITOR */
/*
* @brief Initialize a new thread from its stack space
*
* The thread control structure is put at the lower address of the stack. An
* The control structure (TCS) is put at the lower address of the stack. An
* initial context, to be "restored" by __return_from_coop(), is put at
* the other end of the stack, and thus reusable by the stack when not
* needed anymore.
*
* The initial context is a basic stack frame that contains arguments for
* z_thread_entry() return address, that points at z_thread_entry()
* _thread_entry() return address, that points at _thread_entry()
* and status register.
*
* <options> is currently unused.
@@ -53,253 +96,85 @@ struct init_stack_frame {
* @param parameter1 first param to entry point
* @param parameter2 second param to entry point
* @param parameter3 third param to entry point
* @param priority thread priority
* @param options thread options: K_ESSENTIAL
* @param fiber priority, -1 for task
* @param options is unused (saved for future expansion)
*
* @return N/A
*/
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stackSize, k_thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned int options)
void _new_thread(char *pStackMem, unsigned stackSize,
void *uk_task_ptr, _thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned options)
{
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
Z_ASSERT_VALID_PRIO(priority, pEntry);
char *stackEnd;
char *stackAdjEnd;
char *stackEnd = pStackMem + stackSize;
struct init_stack_frame *pInitCtx;
#ifdef CONFIG_USERSPACE
struct tcs *tcs = (struct tcs *) pStackMem;
size_t stackAdjSize;
size_t offset = 0;
/* adjust stack and stack size */
#if CONFIG_ARC_MPU_VER == 2
stackAdjSize = Z_ARC_MPUV2_SIZE_ALIGN(stackSize);
#elif CONFIG_ARC_MPU_VER == 3
stackAdjSize = STACK_SIZE_ALIGN(stackSize);
#endif
stackEnd = pStackMem + stackAdjSize;
#ifdef CONFIG_STACK_POINTER_RANDOM
offset = stackAdjSize - stackSize;
#ifdef CONFIG_INIT_STACKS
memset(pStackMem, 0xaa, stackSize);
#endif
if (options & K_USER) {
thread->arch.priv_stack_start =
(u32_t)(stackEnd + STACK_GUARD_SIZE);
/* carve the thread entry struct from the "base" of the stack */
stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd +
ARCH_THREAD_STACK_RESERVED);
pInitCtx = (struct init_stack_frame *)(STACK_ROUND_DOWN(stackEnd) -
sizeof(struct init_stack_frame));
/* reserve 4 bytes for the start of user sp */
stackAdjEnd -= 4;
(*(u32_t *)stackAdjEnd) = STACK_ROUND_DOWN(
(u32_t)stackEnd - offset);
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
/* reserve stack space for the userspace local data struct */
thread->userspace_local_data =
(struct _thread_userspace_local_data *)
STACK_ROUND_DOWN(stackEnd -
sizeof(*thread->userspace_local_data) - offset);
/* update the start of user sp */
(*(u32_t *)stackAdjEnd) = (u32_t) thread->userspace_local_data;
#endif
} else {
/* for kernel thread, the privilege stack is merged into thread stack */
/* if MPU_STACK_GUARD is enabled, reserve the the stack area
* |---------------------| |----------------|
* | user stack | | stack guard |
* |---------------------| to |----------------|
* | stack guard | | kernel thread |
* |---------------------| | stack |
* | privilege stack | | |
* ---------------------------------------------
pInitCtx->pc = ((uint32_t)_thread_entry_wrapper);
pInitCtx->r0 = (uint32_t)pEntry;
pInitCtx->r1 = (uint32_t)parameter1;
pInitCtx->r2 = (uint32_t)parameter2;
pInitCtx->r3 = (uint32_t)parameter3;
/*
* For now set the interrupt priority to 15
* we can leave interrupt enable flag set to 0 as
* seti instruction in the end of the _Swap() will
* enable the interrupts based on intlock_key
* value.
*/
pStackMem += STACK_GUARD_SIZE;
stackAdjSize = stackAdjSize + CONFIG_PRIVILEGED_STACK_SIZE;
stackEnd += ARCH_THREAD_STACK_RESERVED;
thread->arch.priv_stack_start = 0;
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
/* reserve stack space for the userspace local data struct */
stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd
- sizeof(*thread->userspace_local_data) - offset);
thread->userspace_local_data =
(struct _thread_userspace_local_data *)stackAdjEnd;
#ifdef CONFIG_ARC_STACK_CHECKING
pInitCtx->status32 = _ARC_V2_STATUS32_SC | _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
tcs->stack_top = (uint32_t) stackEnd;
#else
stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd - offset);
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
#endif
}
tcs->link = NULL;
tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
tcs->prio = priority;
z_new_thread_init(thread, pStackMem, stackAdjSize, priority, options);
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
/* carve the thread entry struct from the "base" of
the privileged stack */
pInitCtx = (struct init_stack_frame *)(
stackAdjEnd - sizeof(struct init_stack_frame));
tcs->custom_data = NULL;
#endif
/* fill init context */
pInitCtx->status32 = 0U;
if (options & K_USER) {
pInitCtx->pc = ((u32_t)z_user_thread_entry_wrapper);
} else {
pInitCtx->pc = ((u32_t)z_thread_entry_wrapper);
}
#ifdef CONFIG_THREAD_MONITOR
/*
* In debug mode tcs->entry give direct access to the thread entry
* and the corresponding parameters.
*/
tcs->entry = (struct __thread_entry *)(pInitCtx);
#endif
#ifdef CONFIG_MICROKERNEL
tcs->uk_task_ptr = uk_task_ptr;
#else
ARG_UNUSED(uk_task_ptr);
#endif
/*
* enable US bit, US is read as zero in user mode. This will allow use
* mode sleep instructions, and it enables a form of denial-of-service
* attack by putting the processor in sleep mode, but since interrupt
* level/mask can't be set from user space that's not worse than
* executing a loop without yielding.
* intlock_key is constructed based on ARCv2 ISA Programmer's
* Reference Manual CLRI instruction description:
* dst[31:6] dst[5] dst[4] dst[3:0]
* 26'd0 1 STATUS32.IE STATUS32.E[3:0]
*/
pInitCtx->status32 |= _ARC_V2_STATUS32_US;
#else /* For no USERSPACE feature */
pStackMem += ARCH_THREAD_STACK_RESERVED;
stackEnd = pStackMem + stackSize;
tcs->intlock_key = 0x3F;
tcs->relinquish_cause = _CAUSE_COOP;
tcs->preempReg.sp = (uint32_t)pInitCtx - __tCalleeSaved_SIZEOF;
z_new_thread_init(thread, pStackMem, stackSize, priority, options);
_nano_timeout_tcs_init(tcs);
stackAdjEnd = stackEnd;
/* initial values in all other registers/TCS entries are irrelevant */
pInitCtx = (struct init_stack_frame *)(
STACK_ROUND_DOWN(stackAdjEnd) -
sizeof(struct init_stack_frame));
pInitCtx->status32 = 0U;
pInitCtx->pc = ((u32_t)z_thread_entry_wrapper);
#endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE
pInitCtx->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
#endif
pInitCtx->r0 = (u32_t)pEntry;
pInitCtx->r1 = (u32_t)parameter1;
pInitCtx->r2 = (u32_t)parameter2;
pInitCtx->r3 = (u32_t)parameter3;
/* stack check configuration */
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
pInitCtx->sec_stat |= _ARC_V2_SEC_STAT_SSC;
#else
pInitCtx->status32 |= _ARC_V2_STATUS32_SC;
#endif
#ifdef CONFIG_USERSPACE
if (options & K_USER) {
thread->arch.u_stack_top = (u32_t)pStackMem;
thread->arch.u_stack_base = (u32_t)stackEnd;
thread->arch.k_stack_top =
(u32_t)(stackEnd + STACK_GUARD_SIZE);
thread->arch.k_stack_base = (u32_t)
(stackEnd + ARCH_THREAD_STACK_RESERVED);
} else {
thread->arch.k_stack_top = (u32_t)pStackMem;
thread->arch.k_stack_base = (u32_t)stackEnd;
thread->arch.u_stack_top = 0;
thread->arch.u_stack_base = 0;
}
#else
thread->arch.k_stack_top = (u32_t) pStackMem;
thread->arch.k_stack_base = (u32_t) stackEnd;
#endif
#endif
#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
pInitCtx->status32 |= _ARC_V2_STATUS32_AD;
#endif
thread->switch_handle = thread;
thread->arch.relinquish_cause = _CAUSE_COOP;
thread->callee_saved.sp =
(u32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF;
/* initial values in all other regs/k_thread entries are irrelevant */
THREAD_MONITOR_INIT(tcs);
}
#ifdef CONFIG_USERSPACE
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
/*
* adjust the thread stack layout
* |----------------| |---------------------|
* | stack guard | | user stack |
* |----------------| to |---------------------|
* | kernel thread | | stack guard |
* | stack | |---------------------|
* | | | privilege stack |
* ---------------------------------------------
*/
_current->stack_info.start = (u32_t)_current->stack_obj;
_current->stack_info.size -= CONFIG_PRIVILEGED_STACK_SIZE;
_current->arch.priv_stack_start =
(u32_t)(_current->stack_info.start +
_current->stack_info.size + STACK_GUARD_SIZE);
#ifdef CONFIG_ARC_STACK_CHECKING
_current->arch.k_stack_top = _current->arch.priv_stack_start;
_current->arch.k_stack_base = _current->arch.priv_stack_start +
CONFIG_PRIVILEGED_STACK_SIZE;
_current->arch.u_stack_top = _current->stack_info.start;
_current->arch.u_stack_base = _current->stack_info.start +
_current->stack_info.size;
#endif
/* possible optimizaiton: no need to load mem domain anymore */
/* need to lock cpu here ? */
configure_mpu_thread(_current);
z_arc_userspace_enter(user_entry, p1, p2, p3,
(u32_t)_current->stack_obj,
_current->stack_info.size);
CODE_UNREACHABLE;
}
#endif
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
int arch_float_disable(struct k_thread *thread)
{
unsigned int key;
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Disable all floating point capabilities for the thread */
thread->base.user_options &= ~K_FP_REGS;
irq_unlock(key);
return 0;
}
int arch_float_enable(struct k_thread *thread)
{
unsigned int key;
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Enable all floating point capabilities for the thread */
thread->base.user_options |= K_FP_REGS;
irq_unlock(key);
return 0;
}
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */

View File

@@ -1,38 +1,48 @@
/*
* Copyright (c) 2014-2015 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Wrapper for z_thread_entry
* @brief Wrapper for _thread_entry
*
* Wrapper for z_thread_entry routine when called from the initial context.
* Wrapper for _thread_entry routine when called from the initial context.
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <v2/irq.h>
#define _ASMLANGUAGE
GTEXT(z_thread_entry_wrapper)
GTEXT(z_thread_entry_wrapper1)
#include <toolchain.h>
#include <sections.h>
GTEXT(_thread_entry_wrapper)
GTEXT(_thread_entry)
/*
* @brief Wrapper for z_thread_entry
* @brief Wrapper for _thread_entry
*
* The routine pops parameters for the z_thread_entry from stack frame, prepared
* by the arch_new_thread() routine.
* The routine pops parameters for the _thread_entry from stack frame, prepared
* by the _new_thread() routine.
*
* @return N/A
*/
SECTION_FUNC(TEXT, z_thread_entry_wrapper)
seti _ARC_V2_INIT_IRQ_LOCK_KEY
z_thread_entry_wrapper1:
SECTION_FUNC(TEXT, _thread_entry_wrapper)
pop_s r3
pop_s r2
pop_s r1
pop_s r0
j z_thread_entry
j _thread_entry
nop

View File

@@ -1,39 +0,0 @@
/*
* Copyright (c) 2017 Synopsys, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Time Stamp API for ARCv2
*
* Provide 64-bit time stamp API
*/
#include <kernel.h>
#include <toolchain.h>
#include <kernel_structs.h>
/*
* @brief Read 64-bit timestamp value
*
* This function returns a 64-bit bit time stamp value that is clocked
* at the same frequency as the CPU.
*
* @return 64-bit time stamp value
*/
u64_t z_tsc_read(void)
{
unsigned int key;
u64_t t;
u32_t count;
key = irq_lock();
t = (u64_t)z_tick_get();
count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
irq_unlock(key);
t *= k_ticks_to_cyc_floor64(1);
t += (u64_t)count;
return t;
}

View File

@@ -1,317 +0,0 @@
/*
* Copyright (c) 2018 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <offsets_short.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <kernel_structs.h>
#include <arch/cpu.h>
#include <syscall.h>
#include <swap_macros.h>
#include <v2/irq.h>
.macro clear_scratch_regs
mov_s r1, 0
mov_s r2, 0
mov_s r3, 0
mov_s r4, 0
mov_s r5, 0
mov_s r6, 0
mov_s r7, 0
mov_s r8, 0
mov_s r9, 0
mov_s r10, 0
mov_s r11, 0
mov_s r12, 0
.endm
.macro clear_callee_regs
mov_s r25, 0
mov_s r24, 0
mov_s r23, 0
mov_s r22, 0
mov_s r21, 0
mov_s r20, 0
mov_s r19, 0
mov_s r18, 0
mov_s r17, 0
mov_s r16, 0
mov_s r15, 0
mov_s r14, 0
mov_s r13, 0
.endm
GTEXT(z_arc_userspace_enter)
GTEXT(_arc_do_syscall)
GTEXT(z_user_thread_entry_wrapper)
GTEXT(arch_user_string_nlen)
GTEXT(z_arc_user_string_nlen_fault_start)
GTEXT(z_arc_user_string_nlen_fault_end)
GTEXT(z_arc_user_string_nlen_fixup)
/*
* @brief Wrapper for z_thread_entry in the case of user thread
* The init parameters are in privileged stack
*
* @return N/A
*/
SECTION_FUNC(TEXT, z_user_thread_entry_wrapper)
seti _ARC_V2_INIT_IRQ_LOCK_KEY
pop_s r3
pop_s r2
pop_s r1
pop_s r0
/* the start of user sp is in r5 */
pop r5
/* start of privilege stack in blink */
mov_s blink, sp
st.aw r0, [r5, -4]
st.aw r1, [r5, -4]
st.aw r2, [r5, -4]
st.aw r3, [r5, -4]
/*
* when CONFIG_INIT_STACKS is enable, stack will be initialized
* in z_new_thread_init.
*/
j _arc_go_to_user_space
/**
*
* User space entry function
*
* This function is the entry point to user mode from privileged execution.
* The conversion is one way, and threads which transition to user mode do
* not transition back later, unless they are doing system calls.
*
*/
SECTION_FUNC(TEXT, z_arc_userspace_enter)
/*
* In ARCv2, the U bit can only be set through exception return
*/
#ifdef CONFIG_ARC_STACK_CHECKING
/* disable stack checking as the stack should be initialized */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr blink, [_ARC_V2_SEC_STAT]
bclr blink, blink, _ARC_V2_SEC_STAT_SSC_BIT
sflag blink
#else
lr blink, [_ARC_V2_STATUS32]
bclr blink, blink, _ARC_V2_STATUS32_SC_BIT
kflag blink
#endif
#endif
/* the end of user stack in r5 */
add r5, r4, r5
/* start of privilege stack */
add blink, r5, CONFIG_PRIVILEGED_STACK_SIZE+STACK_GUARD_SIZE
mov_s sp, r5
push_s r0
push_s r1
push_s r2
push_s r3
mov r5, sp /* skip r0, r1, r2, r3 */
#ifdef CONFIG_INIT_STACKS
mov_s r0, 0xaaaaaaaa
#else
mov_s r0, 0x0
#endif
_clear_user_stack:
st.ab r0, [r4, 4]
cmp r4, r5
jlt _clear_user_stack
#ifdef CONFIG_ARC_STACK_CHECKING
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
_load_stack_check_regs
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r0, [_ARC_V2_SEC_STAT]
bset r0, r0, _ARC_V2_SEC_STAT_SSC_BIT
sflag r0
#else
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_SC_BIT
kflag r0
#endif
#endif
_arc_go_to_user_space:
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_U_BIT
mov_s r1, z_thread_entry_wrapper1
sr r0, [_ARC_V2_ERSTATUS]
sr r1, [_ARC_V2_ERET]
/* fake exception return */
lr r0, [_ARC_V2_STATUS32]
bclr r0, r0, _ARC_V2_STATUS32_AE_BIT
kflag r0
/* when exception returns from kernel to user, sp and _ARC_V2_USER_SP
* /_ARC_V2_SECU_SP will be switched
*/
#if defined(CONFIG_ARC_HAS_SECURE) && defined(CONFIG_ARC_SECURE_FIRMWARE)
lr r0, [_ARC_V2_SEC_STAT]
/* the mode returns from exception return is secure mode */
bset r0, r0, 31
sr r0, [_ARC_V2_ERSEC_STAT]
sr r5, [_ARC_V2_SEC_U_SP]
#else
sr r5, [_ARC_V2_USER_SP]
#endif
mov_s sp, blink
mov_s r0, 0
clear_callee_regs
clear_scratch_regs
mov_s fp, 0
mov_s r29, 0
mov_s r30, 0
mov_s blink, 0
#ifdef CONFIG_EXECUTION_BENCHMARKING
b _capture_value_for_benchmarking_userspace
return_loc_userspace_enter:
#endif /* CONFIG_EXECUTION_BENCHMARKING */
rtie
/**
*
* Userspace system call function
*
* This function is used to do system calls from unprivileged code. This
* function is responsible for the following:
* 1) Dispatching the system call
* 2) Restoring stack and calling back to the caller of the system call
*
*/
SECTION_FUNC(TEXT, _arc_do_syscall)
/*
* r0-r5: arg1-arg6, r6 is call id which is already checked in
* trap_s handler, r7 is the system call stack frame pointer
* need to recover r0, r1, r2 because they will be modified in
* _create_irq_stack_frame. If a specific syscall frame (different
* with irq stack frame) is defined, the cover of r0, r1, r2 can be
* optimized.
*/
ld_s r0, [sp, ___isf_t_r0_OFFSET]
ld_s r1, [sp, ___isf_t_r1_OFFSET]
ld_s r2, [sp, ___isf_t_r2_OFFSET]
mov r7, sp
mov_s blink, _k_syscall_table
ld.as r6, [blink, r6]
jl [r6]
/* save return value */
st_s r0, [sp, ___isf_t_r0_OFFSET]
mov_s r29, 0
mov_s r30, 0
/* through fake exception return, go back to the caller */
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_AE_BIT
kflag r0
#ifdef CONFIG_ARC_SECURE_FIRMWARE
ld_s r0, [sp, ___isf_t_sec_stat_OFFSET]
sr r0,[_ARC_V2_ERSEC_STAT]
#endif
ld_s r0, [sp, ___isf_t_status32_OFFSET]
sr r0,[_ARC_V2_ERSTATUS]
ld_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */
sr r0,[_ARC_V2_ERET]
_pop_irq_stack_frame
rtie
/*
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
*/
SECTION_FUNC(TEXT, arch_user_string_nlen)
/* int err; */
sub_s sp,sp,0x4
/* Initial error value (-1 failure), store at [sp,0] */
mov_s r3, -1
st_s r3, [sp, 0]
/* Loop setup.
* r12 (position locator) = s - 1
* r0 (length counter return value)) = 0
* lp_count = maxsize + 1
* */
sub r12, r0, 0x1
mov_s r0, 0
add_s r1, r1, 1
mov lp_count, r1
strlen_loop:
z_arc_user_string_nlen_fault_start:
/* is the byte at ++r12 a NULL? if so, we're done. Might fault! */
ldb.aw r1, [r12, 1]
z_arc_user_string_nlen_fault_end:
brne_s r1, 0, not_null
strlen_done:
/* Success, set err to 0 */
mov_s r1, 0
st_s r1, [sp, 0]
z_arc_user_string_nlen_fixup:
/* *err_arg = err; Pop stack and return */
ld_s r1, [sp, 0]
add_s sp, sp, 4
j_s.d [blink]
st_s r1, [r2, 0]
not_null:
/* check if we've hit the maximum, if so we're done. */
brne.d.nt lp_count, 0x1, inc_len
sub lp_count, lp_count, 0x1
b_s strlen_done
inc_len:
/* increment length measurement, loop again */
add_s r0, r0, 1
b_s strlen_loop
#ifdef CONFIG_EXECUTION_BENCHMARKING
.balign 4
_capture_value_for_benchmarking_userspace:
mov r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
_save_callee_saved_regs
push_s blink
bl read_timer_end_of_userspace_enter
pop_s blink
mov r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
_load_callee_saved_regs
b return_loc_userspace_enter
#endif

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -23,44 +33,44 @@
* swapped.
*/
#include <zephyr/types.h>
#include <stdint.h>
#include <toolchain.h>
#include "vector_table.h"
struct vector_table {
u32_t reset;
u32_t memory_error;
u32_t instruction_error;
u32_t ev_machine_check;
u32_t ev_tlb_miss_i;
u32_t ev_tlb_miss_d;
u32_t ev_prot_v;
u32_t ev_privilege_v;
u32_t ev_swi;
u32_t ev_trap;
u32_t ev_extension;
u32_t ev_div_zero;
u32_t ev_dc_error;
u32_t ev_maligned;
u32_t unused_1;
u32_t unused_2;
uint32_t reset;
uint32_t memory_error;
uint32_t instruction_error;
uint32_t ev_machine_check;
uint32_t ev_tlb_miss_i;
uint32_t ev_tlb_miss_d;
uint32_t ev_prot_v;
uint32_t ev_privilege_v;
uint32_t ev_swi;
uint32_t ev_trap;
uint32_t ev_extension;
uint32_t ev_div_zero;
uint32_t ev_dc_error;
uint32_t ev_maligned;
uint32_t unused_1;
uint32_t unused_2;
};
struct vector_table _VectorTable Z_GENERIC_SECTION(.exc_vector_table) = {
(u32_t)__reset,
(u32_t)__memory_error,
(u32_t)__instruction_error,
(u32_t)__ev_machine_check,
(u32_t)__ev_tlb_miss_i,
(u32_t)__ev_tlb_miss_d,
(u32_t)__ev_prot_v,
(u32_t)__ev_privilege_v,
(u32_t)__ev_swi,
(u32_t)__ev_trap,
(u32_t)__ev_extension,
(u32_t)__ev_div_zero,
(u32_t)__ev_dc_error,
(u32_t)__ev_maligned,
struct vector_table _VectorTable _GENERIC_SECTION(.exc_vector_table) = {
(uint32_t)__reset,
(uint32_t)__memory_error,
(uint32_t)__instruction_error,
(uint32_t)__ev_machine_check,
(uint32_t)__ev_tlb_miss_i,
(uint32_t)__ev_tlb_miss_d,
(uint32_t)__ev_prot_v,
(uint32_t)__ev_privilege_v,
(uint32_t)__ev_swi,
(uint32_t)__ev_trap,
(uint32_t)__ev_extension,
(uint32_t)__ev_div_zero,
(uint32_t)__ev_dc_error,
(uint32_t)__ev_maligned,
0,
0
};

6
arch/arc/defconfig Normal file
View File

@@ -0,0 +1,6 @@
CONFIG_ARC=y
CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC=32000000
CONFIG_CPU_ARCEM4=y
CONFIG_CPU_ARCV2=y
CONFIG_ARCV2_INTERRUPT_UNIT=y
CONFIG_ARCV2_TIMER=y

View File

@@ -1,179 +0,0 @@
/*
* Copyright (c) 2014-2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions
*
* This file contains private kernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute
* symbols" in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_DATA_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_DATA_H_
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
#include <vector_table.h>
#ifndef _ASMLANGUAGE
#include <kernel.h>
#include <zephyr/types.h>
#include <sys/util.h>
#include <sys/dlist.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFIG_ARC_HAS_SECURE
struct _irq_stack_frame {
u32_t lp_end;
u32_t lp_start;
u32_t lp_count;
#ifdef CONFIG_CODE_DENSITY
/*
* Currently unsupported. This is where those registers are
* automatically pushed on the stack by the CPU when taking a regular
* IRQ.
*/
u32_t ei_base;
u32_t ldi_base;
u32_t jli_base;
#endif
u32_t r0;
u32_t r1;
u32_t r2;
u32_t r3;
u32_t r4;
u32_t r5;
u32_t r6;
u32_t r7;
u32_t r8;
u32_t r9;
u32_t r10;
u32_t r11;
u32_t r12;
u32_t r13;
u32_t blink;
u32_t pc;
u32_t sec_stat;
u32_t status32;
};
#else
struct _irq_stack_frame {
u32_t r0;
u32_t r1;
u32_t r2;
u32_t r3;
u32_t r4;
u32_t r5;
u32_t r6;
u32_t r7;
u32_t r8;
u32_t r9;
u32_t r10;
u32_t r11;
u32_t r12;
u32_t r13;
u32_t blink;
u32_t lp_end;
u32_t lp_start;
u32_t lp_count;
#ifdef CONFIG_CODE_DENSITY
/*
* Currently unsupported. This is where those registers are
* automatically pushed on the stack by the CPU when taking a regular
* IRQ.
*/
u32_t ei_base;
u32_t ldi_base;
u32_t jli_base;
#endif
u32_t pc;
u32_t status32;
};
#endif
typedef struct _irq_stack_frame _isf_t;
/* callee-saved registers pushed on the stack, not in k_thread */
struct _callee_saved_stack {
u32_t r13;
u32_t r14;
u32_t r15;
u32_t r16;
u32_t r17;
u32_t r18;
u32_t r19;
u32_t r20;
u32_t r21;
u32_t r22;
u32_t r23;
u32_t r24;
u32_t r25;
u32_t r26;
u32_t fp; /* r27 */
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
u32_t user_sp;
u32_t kernel_sp;
#else
u32_t user_sp;
#endif
#endif
/* r28 is the stack pointer and saved separately */
/* r29 is ILINK and does not need to be saved */
u32_t r30;
#ifdef CONFIG_ARC_HAS_ACCL_REGS
u32_t r58;
u32_t r59;
#endif
#ifdef CONFIG_FP_SHARING
u32_t fpu_status;
u32_t fpu_ctrl;
#ifdef CONFIG_FP_FPU_DA
u32_t dpfp2h;
u32_t dpfp2l;
u32_t dpfp1h;
u32_t dpfp1l;
#endif
#endif
/*
* No need to save r31 (blink), it's either alread pushed as the pc or
* blink on an irq stack frame.
*/
};
typedef struct _callee_saved_stack _callee_saved_stack_t;
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
/* stacks */
#define STACK_ALIGN_SIZE 4
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_DATA_H_ */

View File

@@ -1,81 +0,0 @@
/*
* Copyright (c) 2014-2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions
*
* This file contains private kernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that kernel
* assembly source files obtains structure offset values via "absolute
* symbols" in the offsets.o module.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_FUNC_H_
#if !defined(_ASMLANGUAGE)
#include <kernel_arch_data.h>
#ifdef CONFIG_CPU_ARCV2
#include <v2/cache.h>
#include <v2/irq.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
static ALWAYS_INLINE void arch_kernel_init(void)
{
z_irq_setup();
_current_cpu->irq_stack =
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
}
/**
*
* @brief Indicates the interrupt number of the highest priority
* active interrupt
*
* @return IRQ number
*/
static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void)
{
u32_t irq_num = z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
return irq_num;
}
static inline bool arch_is_in_isr(void)
{
return z_arc_v2_irq_unit_is_in_isr();
}
extern void z_thread_entry_wrapper(void);
extern void z_user_thread_entry_wrapper(void);
extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
void *p2, void *p3, u32_t stack, u32_t size);
extern void arch_switch(void *switch_to, void **switched_from);
extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
extern void arch_sched_ipi(void);
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_FUNC_H_ */

View File

@@ -0,0 +1,46 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Kernel event logger support for ARM
*/
#ifndef __KERNEL_EVENT_LOGGER_ARCH_H__
#define __KERNEL_EVENT_LOGGER_ARCH_H__
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Get the identification of the current interrupt.
*
* This routine obtain the key of the interrupt that is currently processed
* if it is called from a ISR context.
*
* @return The key of the interrupt that is currently being processed.
*/
int _sys_current_irq_key_get(void)
{
return _INTERRUPT_CAUSE();
}
#ifdef __cplusplus
}
#endif
#endif /* __KERNEL_EVENT_LOGGER_ARCH_H__ */

View File

@@ -0,0 +1,304 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Private nanokernel definitions
*
* This file contains private nanokernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*
* This file is also included by assembly language files which must #define
* _ASMLANGUAGE before including this header file. Note that nanokernel
* assembly source files obtains structure offset values via "absolute
* symbols" in the offsets.o module.
*/
#ifndef _NANO_PRIVATE_H
#define _NANO_PRIVATE_H
#ifdef __cplusplus
extern "C" {
#endif
#include <toolchain.h>
#include <sections.h>
#include <arch/cpu.h>
#include <vector_table.h>
#ifndef _ASMLANGUAGE
#include <nanokernel.h> /* public nanokernel API */
#include <../../../kernel/nanokernel/include/nano_internal.h>
#include <stdint.h>
#include <misc/util.h>
#include <misc/dlist.h>
#endif
#ifndef _ASMLANGUAGE
#ifdef CONFIG_THREAD_MONITOR
struct __thread_entry {
_thread_entry_t pEntry;
void *parameter1;
void *parameter2;
void *parameter3;
};
#endif /*CONFIG_THREAD_MONITOR*/
struct coop {
/*
* Saved on the stack as part of handling a regular IRQ or by the kernel
* when calling the FIRQ return code.
*/
};
struct irq_stack_frame {
uint32_t r0;
uint32_t r1;
uint32_t r2;
uint32_t r3;
uint32_t r4;
uint32_t r5;
uint32_t r6;
uint32_t r7;
uint32_t r8;
uint32_t r9;
uint32_t r10;
uint32_t r11;
uint32_t r12;
uint32_t r13;
uint32_t blink;
uint32_t lp_end;
uint32_t lp_start;
uint32_t lp_count;
#ifdef CONFIG_CODE_DENSITY
/*
* Currently unsupported. This is where those registers are automatically
* pushed on the stack by the CPU when taking a regular IRQ.
*/
uint32_t ei_base;
uint32_t ldi_base;
uint32_t jli_base;
#endif
uint32_t pc;
uint32_t status32;
};
typedef struct irq_stack_frame tISF;
struct preempt {
uint32_t sp; /* r28 */
};
typedef struct preempt tPreempt;
struct callee_saved {
uint32_t r13;
uint32_t r14;
uint32_t r15;
uint32_t r16;
uint32_t r17;
uint32_t r18;
uint32_t r19;
uint32_t r20;
uint32_t r21;
uint32_t r22;
uint32_t r23;
uint32_t r24;
uint32_t r25;
uint32_t r26;
uint32_t fp; /* r27 */
/* r28 is the stack pointer and saved separately */
/* r29 is ILINK and does not need to be saved */
uint32_t r30;
/*
* No need to save r31 (blink), it's either alread pushed as the pc or
* blink on an irq stack frame.
*/
};
typedef struct callee_saved tCalleeSaved;
#endif /* _ASMLANGUAGE */
/* Bitmask definitions for the struct tcs->flags bit field */
#define FIBER 0x000
#define TASK 0x001 /* 1 = task, 0 = fiber */
#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */
#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
#define PREEMPTIBLE 0x020 /* 1 = preemptible thread */
#define ESSENTIAL 0x200 /* 1 = system thread that must not abort */
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
/* stacks */
#define STACK_ALIGN_SIZE 4
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
/*
* Reason a thread has relinquished control: fibers can only be in the NONE
* or COOP state, tasks can be one in the four.
*/
#define _CAUSE_NONE 0
#define _CAUSE_COOP 1
#define _CAUSE_RIRQ 2
#define _CAUSE_FIRQ 3
#ifndef _ASMLANGUAGE
struct tcs {
struct tcs *link; /* node in singly-linked list
* _nanokernel.fibers
*/
uint32_t flags; /* bitmask of flags above */
uint32_t intlock_key; /* interrupt key when relinquishing control */
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
unsigned int return_value;/* return value from _Swap */
int prio; /* fiber priority, -1 for a task */
#ifdef CONFIG_THREAD_CUSTOM_DATA
void *custom_data; /* available for custom use */
#endif
struct coop coopReg;
struct preempt preempReg;
#ifdef CONFIG_THREAD_MONITOR
struct __thread_entry *entry; /* thread entry and parameters description */
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
#endif
#ifdef CONFIG_NANO_TIMEOUTS
struct _nano_timeout nano_timeout;
#endif
#ifdef CONFIG_ERRNO
int errno_var;
#endif
#ifdef CONFIG_ARC_STACK_CHECKING
uint32_t stack_top;
#endif
#ifdef CONFIG_MICROKERNEL
void *uk_task_ptr;
#endif
};
struct s_NANO {
struct tcs *fiber; /* singly linked list of runnable fibers */
struct tcs *task; /* current task the nanokernel knows about */
struct tcs *current; /* currently scheduled thread (fiber or task) */
#ifdef CONFIG_THREAD_MONITOR
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
#endif
#ifdef CONFIG_FP_SHARING
struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */
#endif
#ifdef CONFIG_SYS_POWER_MANAGEMENT
int32_t idle; /* Number of ticks for kernel idling */
#endif
char *rirq_sp; /* regular IRQ stack pointer base */
/*
* FIRQ stack pointer is installed once in the second bank's SP, so
* there is no need to track it in _nanokernel.
*/
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
sys_dlist_t timeout_q;
int32_t task_timeout;
#endif
};
typedef struct s_NANO tNANO;
extern tNANO _nanokernel;
#ifdef CONFIG_CPU_ARCV2
#include <v2/cache.h>
#include <v2/irq.h>
#endif
static ALWAYS_INLINE void nanoArchInit(void)
{
_icache_setup();
_irq_setup();
}
/**
*
* @brief Set the return value for the specified fiber (inline)
*
* The register used to store the return value from a function call invocation
* to <value>. It is assumed that the specified <fiber> is pending, and thus
* the fiber's thread is stored in its struct tcs structure.
*
* @return N/A
*/
static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber, unsigned int value)
{
fiber->return_value = value;
}
/**
*
* @brief Indicates if kernel is handling interrupt
*
* @return 1 if interrupt handler is executed, 0 otherwise
*/
static ALWAYS_INLINE int _IS_IN_ISR(void)
{
uint32_t act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
#if CONFIG_IRQ_OFFLOAD
/* Check if we're in a TRAP_S exception as well */
if (_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE &&
_ARC_V2_ECR_VECTOR(_arc_v2_aux_reg_read(_ARC_V2_ECR)) == EXC_EV_TRAP) {
return 1;
}
#endif
return ((act & 0xffff) != 0);
}
/**
*
* @bried Indicates the interrupt number of the highest priority
* active interrupt
*
* @return IRQ number
*/
static ALWAYS_INLINE int _INTERRUPT_CAUSE(void)
{
uint32_t irq_num = _arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
return irq_num;
}
extern void nanoCpuAtomicIdle(unsigned int);
extern void _thread_entry_wrapper(void);
static inline void _IntLibInit(void)
{
/* nothing needed, here because the kernel requires it */
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _NANO_PRIVATE_H */

View File

@@ -1,40 +0,0 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_OFFSETS_SHORT_ARCH_H_
#include <offsets.h>
/* kernel */
/* nothing for now */
/* end - kernel */
/* threads */
#define _thread_offset_to_relinquish_cause \
(___thread_t_arch_OFFSET + ___thread_arch_t_relinquish_cause_OFFSET)
#define _thread_offset_to_k_stack_base \
(___thread_t_arch_OFFSET + ___thread_arch_t_k_stack_base_OFFSET)
#define _thread_offset_to_k_stack_top \
(___thread_t_arch_OFFSET + ___thread_arch_t_k_stack_top_OFFSET)
#define _thread_offset_to_u_stack_base \
(___thread_t_arch_OFFSET + ___thread_arch_t_u_stack_base_OFFSET)
#define _thread_offset_to_u_stack_top \
(___thread_t_arch_OFFSET + ___thread_arch_t_u_stack_top_OFFSET)
#define _thread_offset_to_sp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
/* end - threads */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_OFFSETS_SHORT_ARCH_H_ */

View File

@@ -0,0 +1,48 @@
/*
* Copyright (c) 2016 Synopsys, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief ARC nanokernel declarations to start a task
*
* ARC-specific parts of start_task().
*
* Currently empty, only here for abstraction.
*/
#ifndef _START_TASK_ARCH__H_
#define _START_TASK_ARCH__H_
#include <toolchain.h>
#include <sections.h>
#include <micro_private.h>
#include <nano_private.h>
#include <microkernel/task.h>
#ifdef __cplusplus
extern "C" {
#endif
#define _START_TASK_ARCH(task, opt_ptr) \
do {/* nothing */ \
} while ((0))
#ifdef __cplusplus
}
#endif
#endif /* _START_TASK_ARCH__H_ */

View File

@@ -1,382 +0,0 @@
/* swap_macros.h - helper macros for context switch */
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_
#include <kernel_structs.h>
#include <offsets_short.h>
#include <toolchain.h>
#include <arch/cpu.h>
#ifdef _ASMLANGUAGE
/* entering this macro, current is in r2 */
.macro _save_callee_saved_regs
sub_s sp, sp, ___callee_saved_stack_t_SIZEOF
/* save regs on stack */
st_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET]
st_s r14, [sp, ___callee_saved_stack_t_r14_OFFSET]
st_s r15, [sp, ___callee_saved_stack_t_r15_OFFSET]
st r16, [sp, ___callee_saved_stack_t_r16_OFFSET]
st r17, [sp, ___callee_saved_stack_t_r17_OFFSET]
st r18, [sp, ___callee_saved_stack_t_r18_OFFSET]
st r19, [sp, ___callee_saved_stack_t_r19_OFFSET]
st r20, [sp, ___callee_saved_stack_t_r20_OFFSET]
st r21, [sp, ___callee_saved_stack_t_r21_OFFSET]
st r22, [sp, ___callee_saved_stack_t_r22_OFFSET]
st r23, [sp, ___callee_saved_stack_t_r23_OFFSET]
st r24, [sp, ___callee_saved_stack_t_r24_OFFSET]
st r25, [sp, ___callee_saved_stack_t_r25_OFFSET]
st r26, [sp, ___callee_saved_stack_t_r26_OFFSET]
st fp, [sp, ___callee_saved_stack_t_fp_OFFSET]
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r13, [_ARC_V2_SEC_U_SP]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
lr r13, [_ARC_V2_SEC_K_SP]
st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#else
lr r13, [_ARC_V2_USER_SP]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
lr r13, [_ARC_V2_KERNEL_SP]
st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
lr r13, [_ARC_V2_USER_SP]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
#endif
#endif
st r30, [sp, ___callee_saved_stack_t_r30_OFFSET]
#ifdef CONFIG_ARC_HAS_ACCL_REGS
st r58, [sp, ___callee_saved_stack_t_r58_OFFSET]
st r59, [sp, ___callee_saved_stack_t_r59_OFFSET]
#endif
#ifdef CONFIG_FP_SHARING
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
/* K_FP_REGS is bit 1 */
bbit0 r13, 1, 1f
lr r13, [_ARC_V2_FPU_STATUS]
st_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
lr r13, [_ARC_V2_FPU_CTRL]
st_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]
#ifdef CONFIG_FP_FPU_DA
lr r13, [_ARC_V2_FPU_DPFP1L]
st_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
lr r13, [_ARC_V2_FPU_DPFP1H]
st_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
lr r13, [_ARC_V2_FPU_DPFP2L]
st_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
lr r13, [_ARC_V2_FPU_DPFP2H]
st_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
#endif
1 :
#endif
/* save stack pointer in struct k_thread */
st sp, [r2, _thread_offset_to_sp]
.endm
/* entering this macro, current is in r2 */
.macro _load_callee_saved_regs
/* restore stack pointer from struct k_thread */
ld sp, [r2, _thread_offset_to_sp]
#ifdef CONFIG_ARC_HAS_ACCL_REGS
ld r58, [sp, ___callee_saved_stack_t_r58_OFFSET]
ld r59, [sp, ___callee_saved_stack_t_r59_OFFSET]
#endif
#ifdef CONFIG_FP_SHARING
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
/* K_FP_REGS is bit 1 */
bbit0 r13, 1, 2f
ld_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
sr r13, [_ARC_V2_FPU_STATUS]
ld_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]
sr r13, [_ARC_V2_FPU_CTRL]
#ifdef CONFIG_FP_FPU_DA
ld_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
sr r13, [_ARC_V2_FPU_DPFP1L]
ld_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
sr r13, [_ARC_V2_FPU_DPFP1H]
ld_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
sr r13, [_ARC_V2_FPU_DPFP2L]
ld_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
sr r13, [_ARC_V2_FPU_DPFP2H]
#endif
2 :
#endif
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_SEC_U_SP]
ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
sr r13, [_ARC_V2_SEC_K_SP]
#else
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_USER_SP]
ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
sr r13, [_ARC_V2_KERNEL_SP]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_USER_SP]
#endif
#endif
ld_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET]
ld_s r14, [sp, ___callee_saved_stack_t_r14_OFFSET]
ld_s r15, [sp, ___callee_saved_stack_t_r15_OFFSET]
ld r16, [sp, ___callee_saved_stack_t_r16_OFFSET]
ld r17, [sp, ___callee_saved_stack_t_r17_OFFSET]
ld r18, [sp, ___callee_saved_stack_t_r18_OFFSET]
ld r19, [sp, ___callee_saved_stack_t_r19_OFFSET]
ld r20, [sp, ___callee_saved_stack_t_r20_OFFSET]
ld r21, [sp, ___callee_saved_stack_t_r21_OFFSET]
ld r22, [sp, ___callee_saved_stack_t_r22_OFFSET]
ld r23, [sp, ___callee_saved_stack_t_r23_OFFSET]
ld r24, [sp, ___callee_saved_stack_t_r24_OFFSET]
ld r25, [sp, ___callee_saved_stack_t_r25_OFFSET]
ld r26, [sp, ___callee_saved_stack_t_r26_OFFSET]
ld fp, [sp, ___callee_saved_stack_t_fp_OFFSET]
ld r30, [sp, ___callee_saved_stack_t_r30_OFFSET]
add_s sp, sp, ___callee_saved_stack_t_SIZEOF
.endm
.macro _discard_callee_saved_regs
add_s sp, sp, ___callee_saved_stack_t_SIZEOF
.endm
/*
* Must be called with interrupts locked or in P0.
* Upon exit, sp will be pointing to the stack frame.
*/
.macro _create_irq_stack_frame
sub_s sp, sp, ___isf_t_SIZEOF
st blink, [sp, ___isf_t_blink_OFFSET]
/* store these right away so we can use them if needed */
st_s r13, [sp, ___isf_t_r13_OFFSET]
st_s r12, [sp, ___isf_t_r12_OFFSET]
st r11, [sp, ___isf_t_r11_OFFSET]
st r10, [sp, ___isf_t_r10_OFFSET]
st r9, [sp, ___isf_t_r9_OFFSET]
st r8, [sp, ___isf_t_r8_OFFSET]
st r7, [sp, ___isf_t_r7_OFFSET]
st r6, [sp, ___isf_t_r6_OFFSET]
st r5, [sp, ___isf_t_r5_OFFSET]
st r4, [sp, ___isf_t_r4_OFFSET]
st_s r3, [sp, ___isf_t_r3_OFFSET]
st_s r2, [sp, ___isf_t_r2_OFFSET]
st_s r1, [sp, ___isf_t_r1_OFFSET]
st_s r0, [sp, ___isf_t_r0_OFFSET]
mov r0, lp_count
st_s r0, [sp, ___isf_t_lp_count_OFFSET]
lr r1, [_ARC_V2_LP_START]
lr r0, [_ARC_V2_LP_END]
st_s r1, [sp, ___isf_t_lp_start_OFFSET]
st_s r0, [sp, ___isf_t_lp_end_OFFSET]
#ifdef CONFIG_CODE_DENSITY
lr r1, [_ARC_V2_JLI_BASE]
lr r0, [_ARC_V2_LDI_BASE]
lr r2, [_ARC_V2_EI_BASE]
st_s r1, [sp, ___isf_t_jli_base_OFFSET]
st_s r0, [sp, ___isf_t_ldi_base_OFFSET]
st_s r2, [sp, ___isf_t_ei_base_OFFSET]
#endif
.endm
/*
* Must be called with interrupts locked or in P0.
* sp must be pointing the to stack frame.
*/
.macro _pop_irq_stack_frame
ld blink, [sp, ___isf_t_blink_OFFSET]
#ifdef CONFIG_CODE_DENSITY
ld_s r1, [sp, ___isf_t_jli_base_OFFSET]
ld_s r0, [sp, ___isf_t_ldi_base_OFFSET]
ld_s r2, [sp, ___isf_t_ei_base_OFFSET]
sr r1, [_ARC_V2_JLI_BASE]
sr r0, [_ARC_V2_LDI_BASE]
sr r2, [_ARC_V2_EI_BASE]
#endif
ld_s r0, [sp, ___isf_t_lp_count_OFFSET]
mov lp_count, r0
ld_s r1, [sp, ___isf_t_lp_start_OFFSET]
ld_s r0, [sp, ___isf_t_lp_end_OFFSET]
sr r1, [_ARC_V2_LP_START]
sr r0, [_ARC_V2_LP_END]
ld_s r13, [sp, ___isf_t_r13_OFFSET]
ld_s r12, [sp, ___isf_t_r12_OFFSET]
ld r11, [sp, ___isf_t_r11_OFFSET]
ld r10, [sp, ___isf_t_r10_OFFSET]
ld r9, [sp, ___isf_t_r9_OFFSET]
ld r8, [sp, ___isf_t_r8_OFFSET]
ld r7, [sp, ___isf_t_r7_OFFSET]
ld r6, [sp, ___isf_t_r6_OFFSET]
ld r5, [sp, ___isf_t_r5_OFFSET]
ld r4, [sp, ___isf_t_r4_OFFSET]
ld_s r3, [sp, ___isf_t_r3_OFFSET]
ld_s r2, [sp, ___isf_t_r2_OFFSET]
ld_s r1, [sp, ___isf_t_r1_OFFSET]
ld_s r0, [sp, ___isf_t_r0_OFFSET]
/*
* All gprs have been reloaded, the only one that is still usable is
* ilink.
*
* The pc and status32 values will still be on the stack. We cannot
* pop them yet because the callers of _pop_irq_stack_frame must reload
* status32 differently depending on the execution context they are
* running in (arch_switch(), firq or exception).
*/
add_s sp, sp, ___isf_t_SIZEOF
.endm
/*
* To use this macor, r2 should have the value of thread struct pointer to
* _kernel.current. r3 is a scratch reg.
*/
.macro _load_stack_check_regs
#if defined(CONFIG_ARC_SECURE_FIRMWARE)
ld r3, [r2, _thread_offset_to_k_stack_base]
sr r3, [_ARC_V2_S_KSTACK_BASE]
ld r3, [r2, _thread_offset_to_k_stack_top]
sr r3, [_ARC_V2_S_KSTACK_TOP]
#ifdef CONFIG_USERSPACE
ld r3, [r2, _thread_offset_to_u_stack_base]
sr r3, [_ARC_V2_S_USTACK_BASE]
ld r3, [r2, _thread_offset_to_u_stack_top]
sr r3, [_ARC_V2_S_USTACK_TOP]
#endif
#else /* CONFIG_ARC_HAS_SECURE */
ld r3, [r2, _thread_offset_to_k_stack_base]
sr r3, [_ARC_V2_KSTACK_BASE]
ld r3, [r2, _thread_offset_to_k_stack_top]
sr r3, [_ARC_V2_KSTACK_TOP]
#ifdef CONFIG_USERSPACE
ld r3, [r2, _thread_offset_to_u_stack_base]
sr r3, [_ARC_V2_USTACK_BASE]
ld r3, [r2, _thread_offset_to_u_stack_top]
sr r3, [_ARC_V2_USTACK_TOP]
#endif
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
.endm
/* check and increase the interrupt nest counter
* after increase, check whether nest counter == 1
* the result will be EQ bit of status32
*/
.macro _check_and_inc_int_nest_counter reg1 reg2
#ifdef CONFIG_SMP
_get_cpu_id \reg1
ld.as \reg1, [@_curr_cpu, \reg1]
ld \reg2, [\reg1, ___cpu_t_nested_OFFSET]
#else
mov \reg1, _kernel
ld \reg2, [\reg1, ___kernel_t_nested_OFFSET]
#endif
add \reg2, \reg2, 1
#ifdef CONFIG_SMP
st \reg2, [\reg1, ___cpu_t_nested_OFFSET]
#else
st \reg2, [\reg1, ___kernel_t_nested_OFFSET]
#endif
cmp \reg2, 1
.endm
/* decrease interrupt nest counter */
.macro _dec_int_nest_counter reg1 reg2
#ifdef CONFIG_SMP
_get_cpu_id \reg1
ld.as \reg1, [@_curr_cpu, \reg1]
ld \reg2, [\reg1, ___cpu_t_nested_OFFSET]
#else
mov \reg1, _kernel
ld \reg2, [\reg1, ___kernel_t_nested_OFFSET]
#endif
sub \reg2, \reg2, 1
#ifdef CONFIG_SMP
st \reg2, [\reg1, ___cpu_t_nested_OFFSET]
#else
st \reg2, [\reg1, ___kernel_t_nested_OFFSET]
#endif
.endm
/* If multi bits in IRQ_ACT are set, i.e. last bit != fist bit, it's
* in nest interrupt. The result will be EQ bit of status32
*/
.macro _check_nest_int_by_irq_act reg1, reg2
lr \reg1, [_ARC_V2_AUX_IRQ_ACT]
#ifdef CONFIG_ARC_SECURE_FIRMWARE
and \reg1, \reg1, ((1 << ARC_N_IRQ_START_LEVEL) - 1)
#else
and \reg1, \reg1, 0xffff
#endif
ffs \reg2, \reg1
fls \reg1, \reg1
cmp \reg1, \reg2
.endm
.macro _get_cpu_id reg
lr \reg, [_ARC_V2_IDENTITY]
xbfu \reg, \reg, 0xe8
.endm
.macro _get_curr_cpu_irq_stack irq_sp
#ifdef CONFIG_SMP
_get_cpu_id \irq_sp
ld.as \irq_sp, [@_curr_cpu, \irq_sp]
ld \irq_sp, [\irq_sp, ___cpu_t_irq_stack_OFFSET]
#else
mov \irq_sp, _kernel
ld \irq_sp, [\irq_sp, _kernel_offset_to_irq_stack]
#endif
.endm
/* macro to push aux reg through reg */
.macro PUSHAX reg aux
lr \reg, [\aux]
st.a \reg, [sp, -4]
.endm
/* macro to pop aux reg through reg */
.macro POPAX reg aux
ld.ab \reg, [sp, 4]
sr \reg, [\aux]
.endm
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_ */

View File

@@ -1,59 +1,68 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Cache helper functions and defines (ARC)
* @brief Cache helper functions (ARC)
*
* This file contains cache related functions and definitions for the
* ARCv2 processor architecture.
* This file contains private nanokernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_V2_CACHE_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_V2_CACHE_H_
#ifndef _ARCV2_CACHE__H_
#define _ARCV2_CACHE__H_
#include <arch/cpu.h>
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
/* i-cache defines for IC_CTRL register */
#define IC_CACHE_ENABLE 0x00
#define IC_CACHE_DISABLE 0x01
#define IC_CACHE_DIRECT 0x00
#define IC_CACHE_INDIRECT 0x20
#ifndef _ASMLANGUAGE
#define CACHE_ENABLE 0x00
#define CACHE_DISABLE 0x01
#define CACHE_DIRECT 0x00
#define CACHE_CACHE_CONTROLLED 0x20
/*
* @brief Initialize the I-cache
* @brief Sets the I-cache
*
* Enables the i-cache and sets it to direct access mode.
* Enables cache and sets the direct access.
*/
static ALWAYS_INLINE void z_icache_setup(void)
static ALWAYS_INLINE void _icache_setup(void)
{
u32_t icache_config = (
IC_CACHE_DIRECT | /* direct mapping (one-way assoc.) */
IC_CACHE_ENABLE /* i-cache enabled */
uint32_t icache_config = (
CACHE_DIRECT | /* direct mapping (one-way assoc.) */
CACHE_ENABLE /* i-cache enabled */
);
u32_t val;
uint32_t val;
val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
val = _arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
val &= 0xff;
if (val != 0U) { /* is i-cache present? */
/* configure i-cache */
z_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, icache_config);
if (val != 0) {
/* configure i-cache if present */
_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, icache_config);
}
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_V2_CACHE_H_ */
#endif /* _ARCV2_CACHE__H_ */

View File

@@ -1,21 +1,29 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Interrupt helper functions (ARC)
*
* This file contains private kernel structures definitions and various
* This file contains private nanokernel structures definitions and various
* other definitions for the ARCv2 processor architecture.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_V2_IRQ_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_V2_IRQ_H_
#include <arch/cpu.h>
#ifndef _ARCV2_IRQ__H_
#define _ARCV2_IRQ__H_
#ifdef __cplusplus
extern "C" {
@@ -23,56 +31,36 @@ extern "C" {
#define _ARC_V2_AUX_IRQ_CTRL_BLINK (1 << 9)
#define _ARC_V2_AUX_IRQ_CTRL_LOOP_REGS (1 << 10)
#define _ARC_V2_AUX_IRQ_CTRL_U (1 << 11)
#define _ARC_V2_AUX_IRQ_CTRL_LP (1 << 13)
#define _ARC_V2_AUX_IRQ_CTRL_14_REGS 7
#define _ARC_V2_AUX_IRQ_CTRL_16_REGS 8
#define _ARC_V2_AUX_IRQ_CTRL_32_REGS 16
#ifdef CONFIG_ARC_SECURE_FIRMWARE
#define _ARC_V2_DEF_IRQ_LEVEL (ARC_N_IRQ_START_LEVEL - 1)
#else
#define _ARC_V2_DEF_IRQ_LEVEL (CONFIG_NUM_IRQ_PRIO_LEVELS - 1)
#endif
#define _ARC_V2_WAKE_IRQ_LEVEL _ARC_V2_DEF_IRQ_LEVEL
/*
* INIT_IRQ_LOCK_KEY is init interrupt level setting of a thread.
* It's configured by seti instruction when a thread starts to run
*, i.e., z_thread_entry_wrapper and z_user_thread_entry_wrapper
*/
#define _ARC_V2_INIT_IRQ_LOCK_KEY (0x10 | _ARC_V2_DEF_IRQ_LEVEL)
#define _ARC_V2_DEF_IRQ_LEVEL 15
#define _ARC_V2_WAKE_IRQ_LEVEL 15
#ifndef _ASMLANGUAGE
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
extern void _firq_stack_setup(void);
extern char _interrupt_stack[];
/*
* z_irq_setup
* _irq_setup
*
* Configures interrupt handling parameters
*/
static ALWAYS_INLINE void z_irq_setup(void)
static ALWAYS_INLINE void _irq_setup(void)
{
u32_t aux_irq_ctrl_value = (
uint32_t aux_irq_ctrl_value = (
_ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */
#ifdef CONFIG_CODE_DENSITY
_ARC_V2_AUX_IRQ_CTRL_LP | /* save code density registers */
#endif
_ARC_V2_AUX_IRQ_CTRL_BLINK | /* save blink */
_ARC_V2_AUX_IRQ_CTRL_14_REGS /* save r0 -> r13 (caller-saved) */
);
z_arc_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
nano_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
/* normal mode cannot write irq_ctrl, ignore it */
aux_irq_ctrl_value = aux_irq_ctrl_value;
#else
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
#endif
_nanokernel.rirq_sp = _interrupt_stack + CONFIG_ISR_STACK_SIZE;
_firq_stack_setup();
}
#endif /* _ASMLANGUAGE */
@@ -81,4 +69,4 @@ static ALWAYS_INLINE void z_irq_setup(void)
}
#endif
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_V2_IRQ_H_ */
#endif /* _ARCV2_IRQ__H_ */

View File

@@ -1,7 +1,17 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
@@ -18,15 +28,20 @@
* Refer to the ARCv2 manual for an explanation of the exceptions.
*/
#ifndef ZEPHYR_ARCH_ARC_INCLUDE_VECTOR_TABLE_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_VECTOR_TABLE_H_
#ifndef _VECTOR_TABLE__H_
#define _VECTOR_TABLE__H_
#ifdef __cplusplus
extern "C" {
#endif
#define EXC_EV_TRAP 0x9
#ifdef _ASMLANGUAGE
#include <board.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <sections.h>
GTEXT(__start)
GTEXT(_VectorTable)
@@ -51,10 +66,6 @@ GTEXT(_isr_wrapper)
#else
#ifdef __cplusplus
extern "C" {
#endif
extern void __reset(void);
extern void __memory_error(void);
extern void __instruction_error(void);
@@ -70,10 +81,10 @@ extern void __ev_div_zero(void);
extern void __ev_dc_error(void);
extern void __ev_maligned(void);
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_VECTOR_TABLE_H_ */
#endif /* _VECTOR_TABLE__H_ */

View File

@@ -0,0 +1,8 @@
ccflags-y +=-I$(srctree)/arch/arc/soc/
ccflags-y +=-I$(srctree)/include
ccflags-y +=-I$(srctree)/include/drivers
ccflags-y +=-I$(srctree)/drivers
asflags-y := ${ccflags-y}
obj-y = soc.o soc_config.o

View File

@@ -0,0 +1,260 @@
#
# Copyright (c) 2014 Wind River Systems, Inc.
# Copyright (c) 2016 Synopsys, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if SOC_EM11D
config SOC
default em11d
config NUM_IRQ_PRIO_LEVELS
# This processor supports 4 priority levels:
# 0 for Fast Interrupts (FIRQs) and 1-3 for Regular Interrupts (IRQs).
# TODO: But regular irq nesting is not implemented --
# so this must be 2 for now.
default 2
config NUM_REGULAR_IRQ_PRIO_LEVELS
# This processor supports 3 Regular Interrupt priority level (1-3).
# TODO: But regular irq nesting is not implemented -- so use 1.
default 1
config NUM_IRQS
# must be > the highest interrupt number used
default 36
config SYS_CLOCK_HW_CYCLES_PER_SEC
default 20000000
config HARVARD
def_bool n
config FLASH_BASE_ADDRESS
default 0x00000000
config FLASH_SIZE
default 0
# em11d has no FLASH so size is 0.
config SRAM_BASE_ADDRESS
default 0x10000000 if NSIM
default 0x10000000
config SRAM_SIZE
default 131072 if NSIM
default 131072
config ICCM_BASE_ADDRESS
default 0x00000000
config ICCM_SIZE
default 64
config DCCM_BASE_ADDRESS
default 0x80000000
config DCCM_SIZE
default 64
if GPIO
config GPIO_DW
def_bool y
if GPIO_DW
config GPIO_DW_0
def_bool y
if GPIO_DW_0
config GPIO_DW_0_NAME
default "GPIO_PORTA"
config GPIO_DW_0_IRQ_PRI
default 1
endif # GPIO_DW_0
config GPIO_DW_1
def_bool y
if GPIO_DW_1
config GPIO_DW_1_NAME
default "GPIO_PORTB"
config GPIO_DW_1_IRQ_PRI
default 1
endif # GPIO_DW_1
config GPIO_DW_2
def_bool y
if GPIO_DW_2
config GPIO_DW_2_IRQ_PRI
default 1
config GPIO_DW_2_NAME
default "GPIO_PORTC"
endif # GPIO_DW_2
config GPIO_DW_3
def_bool y
if GPIO_DW_3
config GPIO_DW_3_IRQ_PRI
default 1
config GPIO_DW_3_NAME
default "GPIO_PORTD"
endif # GPIO_DW_3
endif # GPIO_DW
endif # GPIO
if I2C
config I2C_CLOCK_SPEED
default 100
config I2C_DW
def_bool y
if I2C_DW
config I2C_0
def_bool y
if I2C_0
config I2C_0_NAME
default "I2C_0"
config I2C_0_DEFAULT_CFG
default 0x3
config I2C_0_IRQ_PRI
default 1
endif # I2C_0
config I2C_1
def_bool y
if I2C_1
config I2C_1_NAME
default "I2C_1"
config I2C_1_DEFAULT_CFG
default 0x3
config I2C_1_IRQ_PRI
default 1
endif # I2C_1
endif # I2C_DW
endif # I2C
if UART_NS16550
config UART_NS16550_PORT_0
def_bool n
if UART_NS16550_PORT_0
config UART_NS16550_PORT_0_NAME
default "UART_0"
config UART_NS16550_PORT_0_IRQ_PRI
default 1
config UART_NS16550_PORT_0_BAUD_RATE
default 115200
config UART_NS16550_PORT_0_OPTIONS
default 0
endif # UART_NS16550_PORT_0
config UART_NS16550_PORT_1
def_bool y
if UART_NS16550_PORT_1
config UART_NS16550_PORT_1_NAME
default "UART_1"
config UART_NS16550_PORT_1_IRQ_PRI
default 1
config UART_NS16550_PORT_1_BAUD_RATE
default 115200
config UART_NS16550_PORT_1_OPTIONS
default 0
endif # UART_NS16550_PORT_1
endif # UART_NS16550
if UART_CONSOLE
config UART_CONSOLE_ON_DEV_NAME
default "UART_1"
endif
if SPI
config SPI_DW
def_bool y
if SPI_DW
config SPI_DW_CLOCK_GATE
def_bool n
config SPI_DW_FIFO_DEPTH
default 32
config SPI_DW_ARC_AUX_REGS
def_bool n
config SPI_0
def_bool y
if SPI_0
config SPI_0_IRQ_PRI
default 0
endif # SPI_0
config SPI_1
def_bool y
if SPI_1
config SPI_1_IRQ_PRI
default 0
endif # SPI_1
endif # SPI_DW
endif # SPI
endif #SOC_EM11D

View File

@@ -0,0 +1,3 @@
config SOC_EM11D
bool "Synopsys ARC EM11D"

View File

@@ -0,0 +1,2 @@
soc-cflags = $(call cc-option,-mARCv2EM) \
$(call cc-option,-mav2em,) $(call cc-option,-mno-sdata)

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2016 Synopsys, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief Linker script for the Synopsys EM Starterkit v2.2 EM11D platform.
*/
/*
* DRAM base address and size
*
* DRAM includes the exception vector table at reset, which is at
* the beginning of the region.
*/
#define SRAM_START CONFIG_SRAM_BASE_ADDRESS
#define SRAM_SIZE CONFIG_SRAM_SIZE
/* TODO: Using SRAM config for now, even though this is really DRAM. */
/* Instruction Closely Coupled Memory (ICCM) base address and size */
#define ICCM_START CONFIG_ICCM_BASE_ADDRESS
#define ICCM_SIZE CONFIG_ICCM_SIZE
/*
* DCCM base address and size. DCCM is the data memory.
*/
/* Data Closely Coupled Memory (DCCM) base address and size */
#define DCCM_START CONFIG_DCCM_BASE_ADDRESS
#define DCCM_SIZE CONFIG_DCCM_SIZE
#include <arch/arc/v2/linker.ld>

Some files were not shown because too many files have changed in this diff Show More