scripts: ci: test_plan: fix ruff reports

Fix problems reported by `ruff` and remove exclusions from the config
file.

Signed-off-by: Jordan Yates <jordan@embeint.com>
This commit is contained in:
Jordan Yates
2025-11-19 11:51:15 +10:00
committed by Johan Hedberg
parent 2186558f37
commit 3f77560b92
2 changed files with 199 additions and 122 deletions

View File

@@ -273,17 +273,6 @@
"UP031", # https://docs.astral.sh/ruff/rules/printf-string-formatting
"UP032", # https://docs.astral.sh/ruff/rules/f-string
]
"./scripts/ci/test_plan.py" = [
"B006", # https://docs.astral.sh/ruff/rules/mutable-argument-default
"E401", # https://docs.astral.sh/ruff/rules/multiple-imports-on-one-line
"E402", # https://docs.astral.sh/ruff/rules/module-import-not-at-top-of-file
"E501", # https://docs.astral.sh/ruff/rules/line-too-long
"F541", # https://docs.astral.sh/ruff/rules/f-string-missing-placeholders
"I001", # https://docs.astral.sh/ruff/rules/unsorted-imports
"SIM102", # https://docs.astral.sh/ruff/rules/collapsible-if
"UP015", # https://docs.astral.sh/ruff/rules/redundant-open-modes
"UP032", # https://docs.astral.sh/ruff/rules/f-string
]
"./scripts/ci/upload_test_results_es.py" = [
"E501", # https://docs.astral.sh/ruff/rules/line-too-long
"E713", # https://docs.astral.sh/ruff/rules/not-in-test
@@ -1152,7 +1141,6 @@ exclude = [
"./scripts/build/uf2conv.py",
"./scripts/check_maintainers.py",
"./scripts/ci/set_assignees.py",
"./scripts/ci/test_plan.py",
"./scripts/ci/twister_report_analyzer.py",
"./scripts/ci/upload_test_results_es.py",
"./scripts/coredump/coredump_gdbserver.py",

View File

@@ -4,16 +4,18 @@
# A script to generate twister options based on modified files.
import re, os
import argparse
import yaml
import fnmatch
import subprocess
import glob
import json
import logging
import os
import re
import subprocess
import sys
import glob
from pathlib import Path
import yaml
from git import Repo
from west.manifest import Manifest
@@ -35,9 +37,8 @@ logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
logging.getLogger("pykwalify.core").setLevel(50)
sys.path.append(os.path.join(zephyr_base, 'scripts'))
import list_boards
from pylib.twister.twisterlib.statuses import TwisterStatus
import list_boards # noqa: E402
from pylib.twister.twisterlib.statuses import TwisterStatus # noqa: E402
def _get_match_fn(globs, regexes):
@@ -57,8 +58,7 @@ def _get_match_fn(globs, regexes):
glob_regexes = []
for glob in globs:
# Construct a regex equivalent to the glob
glob_regex = glob.replace(".", "\\.").replace("*", "[^/]*") \
.replace("?", "[^/]")
glob_regex = glob.replace(".", "\\.").replace("*", "[^/]*").replace("?", "[^/]")
if not glob.endswith("/"):
# Require a full match for globs that don't end in /
@@ -77,6 +77,7 @@ def _get_match_fn(globs, regexes):
return re.compile(regex).search
class Tag:
"""
Represents an entry for a tag in tags.yaml.
@@ -91,18 +92,33 @@ class Tag:
Text from 'description' key, or None if the area has no 'description'
key
"""
def _contains(self, path):
# Returns True if the area contains 'path', and False otherwise
return self._match_fn and self._match_fn(path) and not \
(self._exclude_match_fn and self._exclude_match_fn(path))
return (
self._match_fn
and self._match_fn(path)
and not (self._exclude_match_fn and self._exclude_match_fn(path))
)
def __repr__(self):
return "<Tag {}>".format(self.name)
return f"<Tag {self.name}>"
class Filters:
def __init__(self, modified_files, ignore_path, alt_tags, testsuite_root,
pull_request=False, platforms=[], detailed_test_id=False, quarantine_list=None, tc_roots_th=20):
def __init__(
self,
modified_files,
ignore_path,
alt_tags,
testsuite_root,
pull_request=False,
platforms=None,
detailed_test_id=False,
quarantine_list=None,
tc_roots_th=20,
):
self.modified_files = modified_files
self.testsuite_root = testsuite_root
self.resolved_files = []
@@ -111,7 +127,7 @@ class Filters:
self.all_tests = []
self.tag_options = []
self.pull_request = pull_request
self.platforms = platforms
self.platforms = platforms if platforms else []
self.detailed_test_id = detailed_test_id
self.ignore_path = ignore_path
self.tag_cfg_file = alt_tags
@@ -125,7 +141,7 @@ class Filters:
if not self.platforms:
# disable for now, this is generating lots of churn when changing
# architectures that is otherwise covered elsewhere.
#self.find_archs()
# self.find_archs()
self.find_boards()
else:
for file in self.modified_files:
@@ -136,12 +152,12 @@ class Filters:
def get_plan(self, options, integration=False, use_testsuite_root=True):
fname = "_test_plan_partial.json"
cmd = [f"{zephyr_base}/scripts/twister", "-c"] + options + ["--save-tests", fname ]
cmd = [f"{zephyr_base}/scripts/twister", "-c"] + options + ["--save-tests", fname]
if self.detailed_test_id:
cmd += ["--detailed-test-id"]
if self.testsuite_root and use_testsuite_root:
for root in self.testsuite_root:
cmd+=["-T", root]
cmd += ["-T", root]
if integration:
cmd.append("--integration")
if self.quarantine_list:
@@ -159,7 +175,7 @@ class Filters:
def find_modules(self):
if 'west.yml' in self.modified_files and args.commits is not None:
print(f"Manifest file 'west.yml' changed")
print("Manifest file 'west.yml' changed")
print("=========")
old_manifest_content = repo_to_scan.git.show(f"{args.commits[:-2]}:west.yml")
with open("west_old.yml", "w") as manifest:
@@ -171,11 +187,13 @@ class Filters:
logging.debug(f'old_projs: {old_projs}')
logging.debug(f'new_projs: {new_projs}')
# Removed projects
rprojs = set(filter(lambda p: p[0] not in list(p[0] for p in new_projs),
old_projs - new_projs))
rprojs = set(
filter(lambda p: p[0] not in list(p[0] for p in new_projs), old_projs - new_projs)
)
# Updated projects
uprojs = set(filter(lambda p: p[0] in list(p[0] for p in old_projs),
new_projs - old_projs))
uprojs = set(
filter(lambda p: p[0] in list(p[0] for p in old_projs), new_projs - old_projs)
)
# Added projects
aprojs = new_projs - old_projs - uprojs
@@ -192,7 +210,7 @@ class Filters:
return
_options = []
for p in projs_names:
_options.extend(["-t", p ])
_options.extend(["-t", p])
if self.platforms:
for platform in self.platforms:
@@ -200,7 +218,6 @@ class Filters:
self.get_plan(_options, True)
def find_archs(self):
# we match both arch/<arch>/* and include/zephyr/arch/<arch> and skip common.
archs = set()
@@ -209,18 +226,17 @@ class Filters:
p = re.match(r"^arch\/([^/]+)\/", f)
if not p:
p = re.match(r"^include\/zephyr\/arch\/([^/]+)\/", f)
if p:
if p.group(1) != 'common':
archs.add(p.group(1))
# Modified file is treated as resolved, since a matching scope was found
self.resolved_files.append(f)
if p and p.group(1) != 'common':
archs.add(p.group(1))
# Modified file is treated as resolved, since a matching scope was found
self.resolved_files.append(f)
_options = []
for arch in archs:
_options.extend(["-a", arch ])
_options.extend(["-a", arch])
if _options:
logging.info(f'Potential architecture filters...')
logging.info('Potential architecture filters...')
if self.platforms:
for platform in self.platforms:
_options.extend(["-p", platform])
@@ -246,8 +262,15 @@ class Filters:
roots.append(repository_path)
# Look for boards in monitored repositories
lb_args = argparse.Namespace(**{'arch_roots': roots, 'board_roots': roots, 'board': None, 'soc_roots':roots,
'board_dir': None})
lb_args = argparse.Namespace(
**{
'arch_roots': roots,
'board_roots': roots,
'board': None,
'soc_roots': roots,
'board_dir': None,
}
)
known_boards = list_boards.find_v2_boards(lb_args).values()
for changed in changed_boards:
@@ -255,29 +278,32 @@ class Filters:
c = (zephyr_base / changed).resolve()
if c.is_relative_to(board.dir.resolve()):
for file in glob.glob(os.path.join(board.dir, f"{board.name}*.yaml")):
with open(file, 'r', encoding='utf-8') as f:
with open(file, encoding='utf-8') as f:
b = yaml.load(f.read(), Loader=SafeLoader)
matched_boards[b['identifier']] = board
logging.info(f"found boards: {','.join(matched_boards.keys())}")
# If modified file is caught by "find_boards" workflow (change in "boards" dir AND board recognized)
# it means a proper testing scope for this file was found and this file can be removed
# from further consideration
# If modified file is caught by "find_boards" workflow (change in "boards" dir AND board
# recognized) it means a proper testing scope for this file was found and this file can
# be removed from further consideration
for _, board in matched_boards.items():
self.resolved_files.extend(list(filter(lambda f: str(board.dir.relative_to(zephyr_base)) in f, resolved_files)))
relative_board_dir = str(board.dir.relative_to(zephyr_base))
resolved_filter = lambda f, rel_dir=relative_board_dir: rel_dir in f, resolved_files
self.resolved_files.extend(list(filter(resolved_filter)))
_options = []
if len(matched_boards) > 20:
logging.warning(f"{len(matched_boards)} boards changed, this looks like a global change, skipping test handling, revert to default.")
msg = f"{len(matched_boards)} boards changed, this looks like a global change, "
msg += "skipping test handling, revert to default."
logging.warning(msg)
self.full_twister = True
return
for board in matched_boards:
_options.extend(["-p", board ])
_options.extend(["-p", board])
if _options:
logging.info(f'Potential board filters...')
logging.info('Potential board filters...')
self.get_plan(_options)
def find_tests(self):
@@ -289,17 +315,19 @@ class Filters:
scope_found = False
while not scope_found and d:
head, tail = os.path.split(d)
if os.path.exists(os.path.join(d, "testcase.yaml")) or \
os.path.exists(os.path.join(d, "sample.yaml")):
if os.path.exists(os.path.join(d, "testcase.yaml")) or os.path.exists(
os.path.join(d, "sample.yaml")
):
tests.add(d)
# Modified file is treated as resolved, since a matching scope was found
self.resolved_files.append(f)
scope_found = True
elif tail == "common":
# Look for yamls in directories collocated with common
testcase_yamls = glob.iglob(head + '/**/testcase.yaml', recursive=True)
sample_yamls = glob.iglob(head + '/**/sample.yaml', recursive=True)
yamls_found = [yaml for yaml in glob.iglob(head + '/**/testcase.yaml', recursive=True)]
yamls_found.extend([yaml for yaml in glob.iglob(head + '/**/sample.yaml', recursive=True)])
yamls_found = [*testcase_yamls, *sample_yamls]
if yamls_found:
for yaml in yamls_found:
tests.add(os.path.dirname(yaml))
@@ -312,10 +340,12 @@ class Filters:
_options = []
for t in tests:
_options.extend(["-T", t ])
_options.extend(["-T", t])
if len(tests) > self.tc_roots_th:
logging.warning(f"{len(tests)} tests changed, this looks like a global change, skipping test handling, revert to default")
msg = f"{len(tests)} tests changed, this looks like a global change, "
msg += "skipping test handling, revert to default"
logging.warning(msg)
self.full_twister = True
return
@@ -327,12 +357,11 @@ class Filters:
self.get_plan(_options, use_testsuite_root=False)
def find_tags(self):
with open(self.tag_cfg_file, 'r') as ymlfile:
with open(self.tag_cfg_file) as ymlfile:
tags_config = yaml.safe_load(ymlfile)
tags = {}
for t,x in tags_config.items():
for t, x in tags_config.items():
tag = Tag()
tag.exclude = True
tag.name = t
@@ -343,8 +372,9 @@ class Filters:
# Like tag._match_fn(path), but for files-exclude and
# files-regex-exclude
tag._exclude_match_fn = \
_get_match_fn(x.get("files-exclude"), x.get("files-regex-exclude"))
tag._exclude_match_fn = _get_match_fn(
x.get("files-exclude"), x.get("files-regex-exclude")
)
tags[tag.name] = tag
@@ -359,18 +389,19 @@ class Filters:
exclude_tags.add(t.name)
for tag in exclude_tags:
self.tag_options.extend(["-e", tag ])
self.tag_options.extend(["-e", tag])
if exclude_tags:
logging.info(f'Potential tag based filters: {exclude_tags}')
def find_excludes(self, skip=[]):
with open(self.ignore_path, "r") as twister_ignore:
def find_excludes(self):
with open(self.ignore_path) as twister_ignore:
ignores = twister_ignore.read().splitlines()
ignores = filter(lambda x: not x.startswith("#"), ignores)
found = set()
files_not_resolved = list(filter(lambda x: x not in self.resolved_files, self.modified_files))
not_resolved = lambda x: x not in self.resolved_files # noqa: E731
files_not_resolved = list(filter(not_resolved, self.modified_files))
for pattern in ignores:
if pattern:
@@ -385,7 +416,7 @@ class Filters:
if self.full_twister:
_options = []
logging.info(f'Need to run full or partial twister...')
logging.info('Need to run full or partial twister...')
if self.platforms:
for platform in self.platforms:
_options.extend(["-p", platform])
@@ -396,53 +427,88 @@ class Filters:
_options.extend(self.tag_options)
self.get_plan(_options, True)
else:
logging.info(f'No twister needed or partial twister run only...')
logging.info('No twister needed or partial twister run only...')
def parse_args():
parser = argparse.ArgumentParser(
description="Generate twister argument files based on modified file",
allow_abbrev=False)
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
parser.add_argument('-m', '--modified-files', default=None,
help="File with information about changed/deleted/added files.")
parser.add_argument('-o', '--output-file', default="testplan.json",
help="JSON file with the test plan to be passed to twister")
parser.add_argument('-P', '--pull-request', action="store_true",
help="This is a pull request")
parser.add_argument('-p', '--platform', action="append",
help="Limit this for a platform or a list of platforms.")
parser.add_argument('-t', '--tests_per_builder', default=700, type=int,
help="Number of tests per builder")
parser.add_argument('-n', '--default-matrix', default=10, type=int,
help="Number of tests per builder")
parser.add_argument('--testcase-roots-threshold', default=20, type=int,
help="Threshold value for number of modified testcase roots, up to which an optimized scope is still applied."
"When exceeded, full scope will be triggered")
parser.add_argument('--detailed-test-id', action='store_true',
help="Include paths to tests' locations in tests' names.")
parser.add_argument("--no-detailed-test-id", dest='detailed_test_id', action="store_false",
help="Don't put paths into tests' names.")
parser.add_argument('-r', '--repo-to-scan', default=None,
help="Repo to scan")
parser.add_argument('--ignore-path',
default=os.path.join(zephyr_base, 'scripts', 'ci', 'twister_ignore.txt'),
help="Path to a text file with patterns of files to be matched against changed files")
parser.add_argument('--alt-tags',
default=os.path.join(zephyr_base, 'scripts', 'ci', 'tags.yaml'),
help="Path to a file describing relations between directories and tags")
description="Generate twister argument files based on modified file", allow_abbrev=False
)
parser.add_argument('-c', '--commits', default=None, help="Commit range in the form: a..b")
parser.add_argument(
"-T", "--testsuite-root", action="append", default=[],
help="Base directory to recursively search for test cases. All "
"testcase.yaml files under here will be processed. May be "
"called multiple times. Defaults to the 'samples/' and "
"'tests/' directories at the base of the Zephyr tree.")
'-m',
'--modified-files',
default=None,
help="File with information about changed/deleted/added files.",
)
parser.add_argument(
"--quarantine-list", action="append", metavar="FILENAME",
help="Load list of test scenarios under quarantine. The entries in "
"the file need to correspond to the test scenarios names as in "
"corresponding tests .yaml files. These scenarios "
"will be skipped with quarantine as the reason.")
'-o',
'--output-file',
default="testplan.json",
help="JSON file with the test plan to be passed to twister",
)
parser.add_argument('-P', '--pull-request', action="store_true", help="This is a pull request")
parser.add_argument(
'-p',
'--platform',
action="append",
help="Limit this for a platform or a list of platforms.",
)
parser.add_argument(
'-t', '--tests_per_builder', default=700, type=int, help="Number of tests per builder"
)
parser.add_argument(
'-n', '--default-matrix', default=10, type=int, help="Number of tests per builder"
)
parser.add_argument(
'--testcase-roots-threshold',
default=20,
type=int,
help="Threshold value for number of modified testcase roots, "
"up to which an optimized scope is still applied. "
"When exceeded, full scope will be triggered",
)
parser.add_argument(
'--detailed-test-id',
action='store_true',
help="Include paths to tests' locations in tests' names.",
)
parser.add_argument(
"--no-detailed-test-id",
dest='detailed_test_id',
action="store_false",
help="Don't put paths into tests' names.",
)
parser.add_argument('-r', '--repo-to-scan', default=None, help="Repo to scan")
parser.add_argument(
'--ignore-path',
default=os.path.join(zephyr_base, 'scripts', 'ci', 'twister_ignore.txt'),
help="Path to a text file with patterns of files to be matched against changed files",
)
parser.add_argument(
'--alt-tags',
default=os.path.join(zephyr_base, 'scripts', 'ci', 'tags.yaml'),
help="Path to a file describing relations between directories and tags",
)
parser.add_argument(
"-T",
"--testsuite-root",
action="append",
default=[],
help="Base directory to recursively search for test cases. All "
"testcase.yaml files under here will be processed. May be "
"called multiple times. Defaults to the 'samples/' and "
"'tests/' directories at the base of the Zephyr tree.",
)
parser.add_argument(
"--quarantine-list",
action="append",
metavar="FILENAME",
help="Load list of test scenarios under quarantine. The entries in "
"the file need to correspond to the test scenarios names as in "
"corresponding tests .yaml files. These scenarios "
"will be skipped with quarantine as the reason.",
)
# Do not include paths in names by default.
parser.set_defaults(detailed_test_id=False)
@@ -451,7 +517,6 @@ def parse_args():
if __name__ == "__main__":
args = parse_args()
files = []
errors = 0
@@ -462,7 +527,7 @@ if __name__ == "__main__":
commit = repo_to_scan.git.diff("--name-only", args.commits)
files = commit.split("\n")
elif args.modified_files:
with open(args.modified_files, "r") as fp:
with open(args.modified_files) as fp:
files = json.load(fp)
if files:
@@ -470,9 +535,17 @@ if __name__ == "__main__":
print("\n".join(files))
print("=========")
f = Filters(files, args.ignore_path, args.alt_tags, args.testsuite_root,
args.pull_request, args.platform, args.detailed_test_id, args.quarantine_list,
args.testcase_roots_threshold)
f = Filters(
files,
args.ignore_path,
args.alt_tags,
args.testsuite_root,
args.pull_request,
args.platform,
args.detailed_test_id,
args.quarantine_list,
args.testcase_roots_threshold,
)
f.process()
# remove dupes and filtered cases
@@ -491,7 +564,14 @@ if __name__ == "__main__":
errors += 1
if (n, a, p, t) not in dup_free_set:
dup_free.append(ts)
dup_free_set.add((n, a, p, t,))
dup_free_set.add(
(
n,
a,
p,
t,
)
)
logging.info(f'Total tests to be run: {len(dup_free)}')
with open(".testplan", "w") as tp:
@@ -506,14 +586,23 @@ if __name__ == "__main__":
tp.write(f"TWISTER_FULL={f.full_twister}\n")
logging.info(f'Total nodes to launch: {nodes}')
header = ['test', 'arch', 'platform', 'status', 'extra_args', 'handler',
'handler_time', 'used_ram', 'used_rom']
header = [
'test',
'arch',
'platform',
'status',
'extra_args',
'handler',
'handler_time',
'used_ram',
'used_rom',
]
# write plan
if dup_free:
data = {}
data['testsuites'] = dup_free
with open(args.output_file, 'w', newline='') as json_file:
json.dump(data, json_file, indent=4, separators=(',',':'))
json.dump(data, json_file, indent=4, separators=(',', ':'))
sys.exit(errors)