summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/Makefile1
-rw-r--r--scripts/Makefile.kasan45
-rw-r--r--scripts/Makefile.lib13
-rwxr-xr-xscripts/checktransupdate.py214
-rw-r--r--scripts/coccinelle/api/string_choices.cocci46
-rwxr-xr-xscripts/decode_stacktrace.sh51
-rw-r--r--scripts/dtc/checks.c16
-rwxr-xr-xscripts/dtc/dt-extract-compatibles13
-rw-r--r--scripts/dtc/fdtoverlay.c2
-rw-r--r--scripts/dtc/version_gen.h2
-rw-r--r--scripts/gdb/linux/kasan.py44
-rw-r--r--scripts/gdb/linux/proc.py4
-rw-r--r--scripts/gdb/linux/rbtree.py12
-rw-r--r--scripts/gdb/linux/stackdepot.py27
-rw-r--r--scripts/gdb/linux/timerlist.py31
-rw-r--r--scripts/gdb/vmlinux-gdb.py1
-rwxr-xr-xscripts/get_maintainer.pl17
-rwxr-xr-xscripts/gfp-translate66
-rw-r--r--scripts/ipe/Makefile2
-rw-r--r--scripts/ipe/polgen/.gitignore2
-rw-r--r--scripts/ipe/polgen/Makefile5
-rw-r--r--scripts/ipe/polgen/polgen.c145
-rwxr-xr-xscripts/link-vmlinux.sh14
-rwxr-xr-xscripts/macro_checker.py131
-rwxr-xr-xscripts/remove-stale-files2
-rwxr-xr-xscripts/sphinx-pre-install2
-rw-r--r--scripts/test_fortify.sh66
-rwxr-xr-xscripts/xz_wrap.sh158
28 files changed, 877 insertions, 255 deletions
diff --git a/scripts/Makefile b/scripts/Makefile
index dccef663ca82..6bcda4b9d054 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -55,6 +55,7 @@ targets += module.lds
subdir-$(CONFIG_GCC_PLUGINS) += gcc-plugins
subdir-$(CONFIG_MODVERSIONS) += genksyms
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
+subdir-$(CONFIG_SECURITY_IPE) += ipe
# Let clean descend into subdirs
subdir- += basic dtc gdb kconfig mod
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 390658a2d5b7..aab4154af00a 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -22,30 +22,31 @@ endif
ifdef CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_INLINE
+ # When the number of memory accesses in a function is less than this
+ # call threshold number, the compiler will use inline instrumentation.
+ # 10000 is chosen offhand as a sufficiently large number to make all
+ # kernel functions to be instrumented inline.
call_threshold := 10000
else
call_threshold := 0
endif
-CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
-
-# -fasan-shadow-offset fails without -fsanitize
-CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
- -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
- $(call cc-option, -fsanitize=kernel-address \
- -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET)))
-
-ifeq ($(strip $(CFLAGS_KASAN_SHADOW)),)
- CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
-else
- # Now add all the compiler specific options that are valid standalone
- CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
- $(call cc-param,asan-globals=1) \
- $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
- $(call cc-param,asan-instrument-allocas=1)
-endif
-
-CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable))
+# First, enable -fsanitize=kernel-address together with providing the shadow
+# mapping offset, as for GCC, -fasan-shadow-offset fails without -fsanitize
+# (GCC accepts the shadow mapping offset via -fasan-shadow-offset instead of
+# a --param like the other KASAN parameters).
+# Instead of ifdef-checking the compiler, rely on cc-option.
+CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
+ -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
+ $(call cc-option, -fsanitize=kernel-address \
+ -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET)))
+
+# Now, add other parameters enabled similarly in both GCC and Clang.
+# As some of them are not supported by older compilers, use cc-param.
+CFLAGS_KASAN += $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
+ $(call cc-param,asan-stack=$(stack_enable)) \
+ $(call cc-param,asan-instrument-allocas=1) \
+ $(call cc-param,asan-globals=1)
# Instrument memcpy/memset/memmove calls by using instrumented __asan_mem*()
# instead. With compilers that don't support this option, compiler-inserted
@@ -57,9 +58,9 @@ endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS
ifdef CONFIG_KASAN_INLINE
- instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
+ instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
else
- instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
+ instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
endif
CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
@@ -70,7 +71,7 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
# Instrument memcpy/memset/memmove calls by using instrumented __hwasan_mem*().
ifeq ($(call clang-min-version, 150000)$(call gcc-min-version, 130000),y)
-CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1)
+ CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1)
endif
endif # CONFIG_KASAN_SW_TAGS
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 207325eaf1d1..dae2089e7bc6 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -530,14 +530,17 @@ quiet_cmd_fit = FIT $@
# XZ
# ---------------------------------------------------------------------------
-# Use xzkern to compress the kernel image and xzmisc to compress other things.
+# Use xzkern or xzkern_with_size to compress the kernel image and xzmisc to
+# compress other things.
#
# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage
# of the kernel decompressor. A BCJ filter is used if it is available for
-# the target architecture. xzkern also appends uncompressed size of the data
-# using size_append. The .xz format has the size information available at
-# the end of the file too, but it's in more complex format and it's good to
-# avoid changing the part of the boot code that reads the uncompressed size.
+# the target architecture.
+#
+# xzkern_with_size also appends uncompressed size of the data using
+# size_append. The .xz format has the size information available at the end
+# of the file too, but it's in more complex format and it's good to avoid
+# changing the part of the boot code that reads the uncompressed size.
# Note that the bytes added by size_append will make the xz tool think that
# the file is corrupt. This is expected.
#
diff --git a/scripts/checktransupdate.py b/scripts/checktransupdate.py
index 5a0fc99e3f93..578c3fecfdfd 100755
--- a/scripts/checktransupdate.py
+++ b/scripts/checktransupdate.py
@@ -10,31 +10,28 @@ differences occur, report the file and commits that need to be updated.
The usage is as follows:
- ./scripts/checktransupdate.py -l zh_CN
-This will print all the files that need to be updated in the zh_CN locale.
+This will print all the files that need to be updated or translated in the zh_CN locale.
- ./scripts/checktransupdate.py Documentation/translations/zh_CN/dev-tools/testing-overview.rst
This will only print the status of the specified file.
The output is something like:
-Documentation/translations/zh_CN/dev-tools/testing-overview.rst (1 commits)
+Documentation/dev-tools/kfence.rst
+No translation in the locale of zh_CN
+
+Documentation/translations/zh_CN/dev-tools/testing-overview.rst
commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs")
+1 commits needs resolving in total
"""
import os
-from argparse import ArgumentParser, BooleanOptionalAction
+import time
+import logging
+from argparse import ArgumentParser, ArgumentTypeError, BooleanOptionalAction
from datetime import datetime
-flag_p_c = False
-flag_p_uf = False
-flag_debug = False
-
-
-def dprint(*args, **kwargs):
- if flag_debug:
- print("[DEBUG] ", end="")
- print(*args, **kwargs)
-
def get_origin_path(file_path):
+ """Get the origin path from the translation path"""
paths = file_path.split("/")
tidx = paths.index("translations")
opaths = paths[:tidx]
@@ -43,17 +40,16 @@ def get_origin_path(file_path):
def get_latest_commit_from(file_path, commit):
- command = "git log --pretty=format:%H%n%aD%n%cD%n%n%B {} -1 -- {}".format(
- commit, file_path
- )
- dprint(command)
+ """Get the latest commit from the specified commit for the specified file"""
+ command = f"git log --pretty=format:%H%n%aD%n%cD%n%n%B {commit} -1 -- {file_path}"
+ logging.debug(command)
pipe = os.popen(command)
result = pipe.read()
result = result.split("\n")
if len(result) <= 1:
return None
- dprint("Result: {}".format(result[0]))
+ logging.debug("Result: %s", result[0])
return {
"hash": result[0],
@@ -64,17 +60,19 @@ def get_latest_commit_from(file_path, commit):
def get_origin_from_trans(origin_path, t_from_head):
+ """Get the latest origin commit from the translation commit"""
o_from_t = get_latest_commit_from(origin_path, t_from_head["hash"])
while o_from_t is not None and o_from_t["author_date"] > t_from_head["author_date"]:
o_from_t = get_latest_commit_from(origin_path, o_from_t["hash"] + "^")
if o_from_t is not None:
- dprint("tracked origin commit id: {}".format(o_from_t["hash"]))
+ logging.debug("tracked origin commit id: %s", o_from_t["hash"])
return o_from_t
def get_commits_count_between(opath, commit1, commit2):
- command = "git log --pretty=format:%H {}...{} -- {}".format(commit1, commit2, opath)
- dprint(command)
+ """Get the commits count between two commits for the specified file"""
+ command = f"git log --pretty=format:%H {commit1}...{commit2} -- {opath}"
+ logging.debug(command)
pipe = os.popen(command)
result = pipe.read().split("\n")
# filter out empty lines
@@ -83,50 +81,120 @@ def get_commits_count_between(opath, commit1, commit2):
def pretty_output(commit):
- command = "git log --pretty='format:%h (\"%s\")' -1 {}".format(commit)
- dprint(command)
+ """Pretty print the commit message"""
+ command = f"git log --pretty='format:%h (\"%s\")' -1 {commit}"
+ logging.debug(command)
pipe = os.popen(command)
return pipe.read()
+def valid_commit(commit):
+ """Check if the commit is valid or not"""
+ msg = pretty_output(commit)
+ return "Merge tag" not in msg
+
def check_per_file(file_path):
+ """Check the translation status for the specified file"""
opath = get_origin_path(file_path)
if not os.path.isfile(opath):
- dprint("Error: Cannot find the origin path for {}".format(file_path))
+ logging.error("Cannot find the origin path for {file_path}")
return
o_from_head = get_latest_commit_from(opath, "HEAD")
t_from_head = get_latest_commit_from(file_path, "HEAD")
if o_from_head is None or t_from_head is None:
- print("Error: Cannot find the latest commit for {}".format(file_path))
+ logging.error("Cannot find the latest commit for %s", file_path)
return
o_from_t = get_origin_from_trans(opath, t_from_head)
if o_from_t is None:
- print("Error: Cannot find the latest origin commit for {}".format(file_path))
+ logging.error("Error: Cannot find the latest origin commit for %s", file_path)
return
if o_from_head["hash"] == o_from_t["hash"]:
- if flag_p_uf:
- print("No update needed for {}".format(file_path))
- return
+ logging.debug("No update needed for %s", file_path)
else:
- print("{}".format(file_path), end="\t")
+ logging.info(file_path)
commits = get_commits_count_between(
opath, o_from_t["hash"], o_from_head["hash"]
)
- print("({} commits)".format(len(commits)))
- if flag_p_c:
- for commit in commits:
- msg = pretty_output(commit)
- if "Merge tag" not in msg:
- print("commit", msg)
+ count = 0
+ for commit in commits:
+ if valid_commit(commit):
+ logging.info("commit %s", pretty_output(commit))
+ count += 1
+ logging.info("%d commits needs resolving in total\n", count)
+
+
+def valid_locales(locale):
+ """Check if the locale is valid or not"""
+ script_path = os.path.dirname(os.path.abspath(__file__))
+ linux_path = os.path.join(script_path, "..")
+ if not os.path.isdir(f"{linux_path}/Documentation/translations/{locale}"):
+ raise ArgumentTypeError("Invalid locale: {locale}")
+ return locale
+
+
+def list_files_with_excluding_folders(folder, exclude_folders, include_suffix):
+ """List all files with the specified suffix in the folder and its subfolders"""
+ files = []
+ stack = [folder]
+
+ while stack:
+ pwd = stack.pop()
+ # filter out the exclude folders
+ if os.path.basename(pwd) in exclude_folders:
+ continue
+ # list all files and folders
+ for item in os.listdir(pwd):
+ ab_item = os.path.join(pwd, item)
+ if os.path.isdir(ab_item):
+ stack.append(ab_item)
+ else:
+ if ab_item.endswith(include_suffix):
+ files.append(ab_item)
+
+ return files
+
+
+class DmesgFormatter(logging.Formatter):
+ """Custom dmesg logging formatter"""
+ def format(self, record):
+ timestamp = time.time()
+ formatted_time = f"[{timestamp:>10.6f}]"
+ log_message = f"{formatted_time} {record.getMessage()}"
+ return log_message
+
+
+def config_logging(log_level, log_file="checktransupdate.log"):
+ """configure logging based on the log level"""
+ # set up the root logger
+ logger = logging.getLogger()
+ logger.setLevel(log_level)
+
+ # Create console handler
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(log_level)
+
+ # Create file handler
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(log_level)
+
+ # Create formatter and add it to the handlers
+ formatter = DmesgFormatter()
+ console_handler.setFormatter(formatter)
+ file_handler.setFormatter(formatter)
+
+ # Add the handler to the logger
+ logger.addHandler(console_handler)
+ logger.addHandler(file_handler)
def main():
+ """Main function of the script"""
script_path = os.path.dirname(os.path.abspath(__file__))
linux_path = os.path.join(script_path, "..")
@@ -134,62 +202,62 @@ def main():
parser.add_argument(
"-l",
"--locale",
+ default="zh_CN",
+ type=valid_locales,
help="Locale to check when files are not specified",
)
+
parser.add_argument(
- "--print-commits",
+ "--print-missing-translations",
action=BooleanOptionalAction,
default=True,
- help="Print commits between the origin and the translation",
+ help="Print files that do not have translations",
)
parser.add_argument(
- "--print-updated-files",
- action=BooleanOptionalAction,
- default=False,
- help="Print files that do no need to be updated",
- )
+ '--log',
+ default='INFO',
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
+ help='Set the logging level')
parser.add_argument(
- "--debug",
- action=BooleanOptionalAction,
- help="Print debug information",
- default=False,
- )
+ '--logfile',
+ default='checktransupdate.log',
+ help='Set the logging file (default: checktransupdate.log)')
parser.add_argument(
"files", nargs="*", help="Files to check, if not specified, check all files"
)
args = parser.parse_args()
- global flag_p_c, flag_p_uf, flag_debug
- flag_p_c = args.print_commits
- flag_p_uf = args.print_updated_files
- flag_debug = args.debug
+ # Configure logging based on the --log argument
+ log_level = getattr(logging, args.log.upper(), logging.INFO)
+ config_logging(log_level)
- # get files related to linux path
+ # Get files related to linux path
files = args.files
if len(files) == 0:
- if args.locale is not None:
- files = (
- os.popen(
- "find {}/Documentation/translations/{} -type f".format(
- linux_path, args.locale
- )
- )
- .read()
- .split("\n")
- )
- else:
- files = (
- os.popen(
- "find {}/Documentation/translations -type f".format(linux_path)
- )
- .read()
- .split("\n")
- )
-
- files = list(filter(lambda x: x != "", files))
+ offical_files = list_files_with_excluding_folders(
+ os.path.join(linux_path, "Documentation"), ["translations", "output"], "rst"
+ )
+
+ for file in offical_files:
+ # split the path into parts
+ path_parts = file.split(os.sep)
+ # find the index of the "Documentation" directory
+ kindex = path_parts.index("Documentation")
+ # insert the translations and locale after the Documentation directory
+ new_path_parts = path_parts[:kindex + 1] + ["translations", args.locale] \
+ + path_parts[kindex + 1 :]
+ # join the path parts back together
+ new_file = os.sep.join(new_path_parts)
+ if os.path.isfile(new_file):
+ files.append(new_file)
+ else:
+ if args.print_missing_translations:
+ logging.info(os.path.relpath(os.path.abspath(file), linux_path))
+ logging.info("No translation in the locale of %s\n", args.locale)
+
files = list(map(lambda x: os.path.relpath(os.path.abspath(x), linux_path), files))
# cd to linux root directory
diff --git a/scripts/coccinelle/api/string_choices.cocci b/scripts/coccinelle/api/string_choices.cocci
index a71966c0494e..5e729f187f22 100644
--- a/scripts/coccinelle/api/string_choices.cocci
+++ b/scripts/coccinelle/api/string_choices.cocci
@@ -39,3 +39,49 @@ e << str_plural_r.E;
@@
coccilib.report.print_report(p[0], "opportunity for str_plural(%s)" % e)
+
+@str_up_down depends on patch@
+expression E;
+@@
+(
+- ((E) ? "up" : "down")
++ str_up_down(E)
+)
+
+@str_up_down_r depends on !patch exists@
+expression E;
+position P;
+@@
+(
+* ((E@P) ? "up" : "down")
+)
+
+@script:python depends on report@
+p << str_up_down_r.P;
+e << str_up_down_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_up_down(%s)" % e)
+
+@str_down_up depends on patch@
+expression E;
+@@
+(
+- ((E) ? "down" : "up")
++ str_down_up(E)
+)
+
+@str_down_up_r depends on !patch exists@
+expression E;
+position P;
+@@
+(
+* ((E@P) ? "down" : "up")
+)
+
+@script:python depends on report@
+p << str_down_up_r.P;
+e << str_down_up_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_down_up(%s)" % e)
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index a0f50a5b4f7c..826836d264c6 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -1,11 +1,13 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# (c) 2014, Sasha Levin <sasha.levin@oracle.com>
#set -x
usage() {
echo "Usage:"
- echo " $0 -r <release> | <vmlinux> [<base path>|auto] [<modules path>]"
+ echo " $0 -r <release>"
+ echo " $0 [<vmlinux> [<base_path>|auto [<modules_path>]]]"
+ echo " $0 -h"
}
# Try to find a Rust demangler
@@ -32,7 +34,10 @@ READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX}
ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX}
NM=${UTIL_PREFIX}nm${UTIL_SUFFIX}
-if [[ $1 == "-r" ]] ; then
+if [[ $1 == "-h" ]] ; then
+ usage
+ exit 0
+elif [[ $1 == "-r" ]] ; then
vmlinux=""
basepath="auto"
modpath=""
@@ -89,31 +94,32 @@ find_module() {
fi
fi
- if [[ "$modpath" != "" ]] ; then
- for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do
- if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
- echo $fn
- return
- fi
- done
- return 1
- fi
-
- modpath=$(dirname "$vmlinux")
- find_module && return
-
- if [[ $release == "" ]] ; then
+ if [ -z $release ] ; then
release=$(gdb -ex 'print init_uts_ns.name.release' -ex 'quit' -quiet -batch "$vmlinux" 2>/dev/null | sed -n 's/\$1 = "\(.*\)".*/\1/p')
fi
+ if [ -n "${release}" ] ; then
+ release_dirs="/usr/lib/debug/lib/modules/$release /lib/modules/$release"
+ fi
- for dn in {/usr/lib/debug,}/lib/modules/$release ; do
- if [ -e "$dn" ] ; then
- modpath="$dn"
- find_module && return
+ found_without_debug_info=false
+ for dir in "$modpath" "$(dirname "$vmlinux")" ${release_dirs}; do
+ if [ -n "${dir}" ] && [ -e "${dir}" ]; then
+ for fn in $(find "$dir" -name "${module//_/[-_]}.ko*") ; do
+ if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
+ echo $fn
+ return
+ fi
+ found_without_debug_info=true
+ done
fi
done
- modpath=""
+ if [[ ${found_without_debug_info} == true ]]; then
+ echo "WARNING! No debugging info in module ${module}, rebuild with DEBUG_KERNEL and DEBUG_INFO" >&2
+ else
+ echo "WARNING! Cannot find .ko for module ${module}, please pass a valid module path" >&2
+ fi
+
return 1
}
@@ -131,7 +137,6 @@ parse_symbol() {
else
local objfile=$(find_module)
if [[ $objfile == "" ]] ; then
- echo "WARNING! Modules path isn't set, but is needed to parse this symbol" >&2
return
fi
if [[ $aarray_support == true ]]; then
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 10fb63894369..6e06aeab5503 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -1826,10 +1826,14 @@ static void check_graph_port(struct check *c, struct dt_info *dti,
if (node->bus != &graph_port_bus)
return;
+ check_graph_reg(c, dti, node);
+
+ /* skip checks below for overlays */
+ if (dti->dtsflags & DTSF_PLUGIN)
+ return;
+
if (!strprefixeq(node->name, node->basenamelen, "port"))
FAIL(c, dti, node, "graph port node name should be 'port'");
-
- check_graph_reg(c, dti, node);
}
WARNING(graph_port, check_graph_port, NULL, &graph_nodes);
@@ -1864,11 +1868,15 @@ static void check_graph_endpoint(struct check *c, struct dt_info *dti,
if (!node->parent || node->parent->bus != &graph_port_bus)
return;
+ check_graph_reg(c, dti, node);
+
+ /* skip checks below for overlays */
+ if (dti->dtsflags & DTSF_PLUGIN)
+ return;
+
if (!strprefixeq(node->name, node->basenamelen, "endpoint"))
FAIL(c, dti, node, "graph endpoint node name should be 'endpoint'");
- check_graph_reg(c, dti, node);
-
remote_node = get_remote_endpoint(c, dti, node);
if (!remote_node)
return;
diff --git a/scripts/dtc/dt-extract-compatibles b/scripts/dtc/dt-extract-compatibles
index 5ffb2364409b..6570efabaa64 100755
--- a/scripts/dtc/dt-extract-compatibles
+++ b/scripts/dtc/dt-extract-compatibles
@@ -46,6 +46,15 @@ def parse_of_match_table(data):
return match_table_list
+def parse_of_functions(data, func_name):
+ """ Find all compatibles in the last argument of a given function """
+ compat_list = []
+ for m in re.finditer(rf'{func_name}\(([a-zA-Z0-9_>\(\)"\-]+,\s)*"([a-zA-Z0-9_,-]+)"\)', data):
+ compat_list.append(m[2])
+
+ return compat_list
+
+
def parse_compatibles(file, compat_ignore_list):
with open(file, 'r', encoding='utf-8') as f:
data = f.read().replace('\n', '')
@@ -60,6 +69,10 @@ def parse_compatibles(file, compat_ignore_list):
else:
compat_list = parse_of_declare_macros(data)
compat_list += parse_of_device_id(data)
+ compat_list += parse_of_functions(data, "_is_compatible")
+ compat_list += parse_of_functions(data, "of_find_compatible_node")
+ compat_list += parse_of_functions(data, "for_each_compatible_node")
+ compat_list += parse_of_functions(data, "of_get_compatible_child")
return compat_list
diff --git a/scripts/dtc/fdtoverlay.c b/scripts/dtc/fdtoverlay.c
index 4eba0460f240..699b4f616502 100644
--- a/scripts/dtc/fdtoverlay.c
+++ b/scripts/dtc/fdtoverlay.c
@@ -48,7 +48,7 @@ static void *apply_one(char *base, const char *overlay, size_t *buf_len,
int ret;
/*
- * We take a copies first, because a failed apply can trash
+ * We take copies first, because a failed apply can trash
* both the base blob and the overlay
*/
tmpo = xmalloc(fdt_totalsize(overlay));
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index 4c5e17639d2b..bf81ce593685 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.7.0-g1df7b047"
+#define DTC_VERSION "DTC 1.7.0-gbcd02b52"
diff --git a/scripts/gdb/linux/kasan.py b/scripts/gdb/linux/kasan.py
new file mode 100644
index 000000000000..56730b3fde0b
--- /dev/null
+++ b/scripts/gdb/linux/kasan.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2024 Canonical Ltd.
+#
+# Authors:
+# Kuan-Ying Lee <kuan-ying.lee@canonical.com>
+#
+
+import gdb
+from linux import constants, mm
+
+def help():
+ t = """Usage: lx-kasan_mem_to_shadow [Hex memory addr]
+ Example:
+ lx-kasan_mem_to_shadow 0xffff000008eca008\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
+class KasanMemToShadow(gdb.Command):
+ """Translate memory address to kasan shadow address"""
+
+ p_ops = None
+
+ def __init__(self):
+ if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ super(KasanMemToShadow, self).__init__("lx-kasan_mem_to_shadow", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ raise gdb.GdbError('CONFIG_KASAN_GENERIC or CONFIG_KASAN_SW_TAGS is not set')
+
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 1:
+ if self.p_ops is None:
+ self.p_ops = mm.page_ops().ops
+ addr = int(argv[0], 16)
+ shadow_addr = self.kasan_mem_to_shadow(addr)
+ gdb.write('shadow addr: 0x%x\n' % shadow_addr)
+ else:
+ help()
+ def kasan_mem_to_shadow(self, addr):
+ return (addr >> self.p_ops.KASAN_SHADOW_SCALE_SHIFT) + self.p_ops.KASAN_SHADOW_OFFSET
+
+KasanMemToShadow()
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
index 43c687e7a69d..65dd1bd12964 100644
--- a/scripts/gdb/linux/proc.py
+++ b/scripts/gdb/linux/proc.py
@@ -18,6 +18,7 @@ from linux import utils
from linux import tasks
from linux import lists
from linux import vfs
+from linux import rbtree
from struct import *
@@ -172,8 +173,7 @@ values of that process namespace"""
gdb.write("{:^18} {:^15} {:>9} {} {} options\n".format(
"mount", "super_block", "devname", "pathname", "fstype"))
- for mnt in lists.list_for_each_entry(namespace['list'],
- mount_ptr_type, "mnt_list"):
+ for mnt in rbtree.rb_inorder_for_each_entry(namespace['mounts'], mount_ptr_type, "mnt_node"):
devname = mnt['mnt_devname'].string()
devname = devname if devname else "none"
diff --git a/scripts/gdb/linux/rbtree.py b/scripts/gdb/linux/rbtree.py
index fe462855eefd..fcbcc5f4153c 100644
--- a/scripts/gdb/linux/rbtree.py
+++ b/scripts/gdb/linux/rbtree.py
@@ -9,6 +9,18 @@ from linux import utils
rb_root_type = utils.CachedType("struct rb_root")
rb_node_type = utils.CachedType("struct rb_node")
+def rb_inorder_for_each(root):
+ def inorder(node):
+ if node:
+ yield from inorder(node['rb_left'])
+ yield node
+ yield from inorder(node['rb_right'])
+
+ yield from inorder(root['rb_node'])
+
+def rb_inorder_for_each_entry(root, gdbtype, member):
+ for node in rb_inorder_for_each(root):
+ yield utils.container_of(node, gdbtype, member)
def rb_first(root):
if root.type == rb_root_type.get_type():
diff --git a/scripts/gdb/linux/stackdepot.py b/scripts/gdb/linux/stackdepot.py
index bb3a0f843931..37313a5a51a0 100644
--- a/scripts/gdb/linux/stackdepot.py
+++ b/scripts/gdb/linux/stackdepot.py
@@ -13,6 +13,13 @@ if constants.LX_CONFIG_STACKDEPOT:
stack_record_type = utils.CachedType('struct stack_record')
DEPOT_STACK_ALIGN = 4
+def help():
+ t = """Usage: lx-stack_depot_lookup [Hex handle value]
+ Example:
+ lx-stack_depot_lookup 0x00c80300\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
def stack_depot_fetch(handle):
global DEPOT_STACK_ALIGN
global stack_record_type
@@ -57,3 +64,23 @@ def stack_depot_print(handle):
gdb.execute("x /i 0x%x" % (int(entries[i])))
except Exception as e:
gdb.write("%s\n" % e)
+
+class StackDepotLookup(gdb.Command):
+ """Search backtrace by handle"""
+
+ def __init__(self):
+ if constants.LX_CONFIG_STACKDEPOT:
+ super(StackDepotLookup, self).__init__("lx-stack_depot_lookup", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_STACKDEPOT:
+ raise gdb.GdbError('CONFIG_STACKDEPOT is not set')
+
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 1:
+ handle = int(argv[0], 16)
+ stack_depot_print(gdb.Value(handle).cast(utils.get_uint_type()))
+ else:
+ help()
+
+StackDepotLookup()
diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
index 64bc87191003..98445671fe83 100644
--- a/scripts/gdb/linux/timerlist.py
+++ b/scripts/gdb/linux/timerlist.py
@@ -87,21 +87,22 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
text += "\n"
if constants.LX_CONFIG_TICK_ONESHOT:
- fmts = [(" .{} : {}", 'nohz_mode'),
- (" .{} : {} nsecs", 'last_tick'),
- (" .{} : {}", 'tick_stopped'),
- (" .{} : {}", 'idle_jiffies'),
- (" .{} : {}", 'idle_calls'),
- (" .{} : {}", 'idle_sleeps'),
- (" .{} : {} nsecs", 'idle_entrytime'),
- (" .{} : {} nsecs", 'idle_waketime'),
- (" .{} : {} nsecs", 'idle_exittime'),
- (" .{} : {} nsecs", 'idle_sleeptime'),
- (" .{}: {} nsecs", 'iowait_sleeptime'),
- (" .{} : {}", 'last_jiffies'),
- (" .{} : {}", 'next_timer'),
- (" .{} : {} nsecs", 'idle_expires')]
- text += "\n".join([s.format(f, ts[f]) for s, f in fmts])
+ TS_FLAG_STOPPED = 1 << 1
+ TS_FLAG_NOHZ = 1 << 4
+ text += f" .{'nohz':15s}: {int(bool(ts['flags'] & TS_FLAG_NOHZ))}\n"
+ text += f" .{'last_tick':15s}: {ts['last_tick']}\n"
+ text += f" .{'tick_stopped':15s}: {int(bool(ts['flags'] & TS_FLAG_STOPPED))}\n"
+ text += f" .{'idle_jiffies':15s}: {ts['idle_jiffies']}\n"
+ text += f" .{'idle_calls':15s}: {ts['idle_calls']}\n"
+ text += f" .{'idle_sleeps':15s}: {ts['idle_sleeps']}\n"
+ text += f" .{'idle_entrytime':15s}: {ts['idle_entrytime']} nsecs\n"
+ text += f" .{'idle_waketime':15s}: {ts['idle_waketime']} nsecs\n"
+ text += f" .{'idle_exittime':15s}: {ts['idle_exittime']} nsecs\n"
+ text += f" .{'idle_sleeptime':15s}: {ts['idle_sleeptime']} nsecs\n"
+ text += f" .{'iowait_sleeptime':15s}: {ts['iowait_sleeptime']} nsecs\n"
+ text += f" .{'last_jiffies':15s}: {ts['last_jiffies']}\n"
+ text += f" .{'next_timer':15s}: {ts['next_timer']}\n"
+ text += f" .{'idle_expires':15s}: {ts['idle_expires']} nsecs\n"
text += "\njiffies: {}\n".format(jiffies)
text += "\n"
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
index fc53cdf286f1..d4eeed4506fd 100644
--- a/scripts/gdb/vmlinux-gdb.py
+++ b/scripts/gdb/vmlinux-gdb.py
@@ -49,3 +49,4 @@ else:
import linux.page_owner
import linux.slab
import linux.vmalloc
+ import linux.kasan
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index ee1aed7e090c..5ac02e198737 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -54,6 +54,7 @@ my $output_section_maxlen = 50;
my $scm = 0;
my $tree = 1;
my $web = 0;
+my $bug = 0;
my $subsystem = 0;
my $status = 0;
my $letters = "";
@@ -271,6 +272,7 @@ if (!GetOptions(
'scm!' => \$scm,
'tree!' => \$tree,
'web!' => \$web,
+ 'bug!' => \$bug,
'letters=s' => \$letters,
'pattern-depth=i' => \$pattern_depth,
'k|keywords!' => \$keywords,
@@ -320,13 +322,14 @@ if ($sections || $letters ne "") {
$status = 0;
$subsystem = 0;
$web = 0;
+ $bug = 0;
$keywords = 0;
$keywords_in_file = 0;
$interactive = 0;
} else {
- my $selections = $email + $scm + $status + $subsystem + $web;
+ my $selections = $email + $scm + $status + $subsystem + $web + $bug;
if ($selections == 0) {
- die "$P: Missing required option: email, scm, status, subsystem or web\n";
+ die "$P: Missing required option: email, scm, status, subsystem, web or bug\n";
}
}
@@ -631,6 +634,7 @@ my %hash_list_to;
my @list_to = ();
my @scm = ();
my @web = ();
+my @bug = ();
my @subsystem = ();
my @status = ();
my %deduplicate_name_hash = ();
@@ -662,6 +666,11 @@ if ($web) {
output(@web);
}
+if ($bug) {
+ @bug = uniq(@bug);
+ output(@bug);
+}
+
exit($exit);
sub self_test {
@@ -847,6 +856,7 @@ sub get_maintainers {
@list_to = ();
@scm = ();
@web = ();
+ @bug = ();
@subsystem = ();
@status = ();
%deduplicate_name_hash = ();
@@ -1069,6 +1079,7 @@ MAINTAINER field selection options:
--status => print status if any
--subsystem => print subsystem name if any
--web => print website(s) if any
+ --bug => print bug reporting info if any
Output type options:
--separator [, ] => separator for multiple entries on 1 line
@@ -1382,6 +1393,8 @@ sub add_categories {
push(@scm, $pvalue . $suffix);
} elsif ($ptype eq "W") {
push(@web, $pvalue . $suffix);
+ } elsif ($ptype eq "B") {
+ push(@bug, $pvalue . $suffix);
} elsif ($ptype eq "S") {
push(@status, $pvalue . $suffix);
}
diff --git a/scripts/gfp-translate b/scripts/gfp-translate
index 6c9aed17cf56..8385ae0d5af9 100755
--- a/scripts/gfp-translate
+++ b/scripts/gfp-translate
@@ -62,25 +62,57 @@ if [ "$GFPMASK" = "none" ]; then
fi
# Extract GFP flags from the kernel source
-TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1
-grep -q ___GFP $SOURCE/include/linux/gfp_types.h
-if [ $? -eq 0 ]; then
- grep "^#define ___GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE
-else
- grep "^#define __GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
-fi
+TMPFILE=`mktemp -t gfptranslate-XXXXXX.c` || exit 1
-# Parse the flags
-IFS="
-"
echo Source: $SOURCE
echo Parsing: $GFPMASK
-for LINE in `cat $TMPFILE`; do
- MASK=`echo $LINE | awk '{print $3}'`
- if [ $(($GFPMASK&$MASK)) -ne 0 ]; then
- echo $LINE
- fi
-done
-rm -f $TMPFILE
+(
+ cat <<EOF
+#include <stdint.h>
+#include <stdio.h>
+
+// Try to fool compiler.h into not including extra stuff
+#define __ASSEMBLY__ 1
+
+#include <generated/autoconf.h>
+#include <linux/gfp_types.h>
+
+static const char *masks[] = {
+EOF
+
+ sed -nEe 's/^[[:space:]]+(___GFP_.*)_BIT,.*$/\1/p' $SOURCE/include/linux/gfp_types.h |
+ while read b; do
+ cat <<EOF
+#if defined($b) && ($b > 0)
+ [${b}_BIT] = "$b",
+#endif
+EOF
+ done
+
+ cat <<EOF
+};
+
+int main(int argc, char *argv[])
+{
+ unsigned long long mask = $GFPMASK;
+
+ for (int i = 0; i < sizeof(mask) * 8; i++) {
+ unsigned long long bit = 1ULL << i;
+ if (mask & bit)
+ printf("\t%-25s0x%llx\n",
+ (i < ___GFP_LAST_BIT && masks[i]) ?
+ masks[i] : "*** INVALID ***",
+ bit);
+ }
+
+ return 0;
+}
+EOF
+) > $TMPFILE
+
+${CC:-gcc} -Wall -o ${TMPFILE}.bin -I $SOURCE/include $TMPFILE && ${TMPFILE}.bin
+
+rm -f $TMPFILE ${TMPFILE}.bin
+
exit 0
diff --git a/scripts/ipe/Makefile b/scripts/ipe/Makefile
new file mode 100644
index 000000000000..e87553fbb8d6
--- /dev/null
+++ b/scripts/ipe/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-y := polgen
diff --git a/scripts/ipe/polgen/.gitignore b/scripts/ipe/polgen/.gitignore
new file mode 100644
index 000000000000..b6f05cf3dc0e
--- /dev/null
+++ b/scripts/ipe/polgen/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+polgen
diff --git a/scripts/ipe/polgen/Makefile b/scripts/ipe/polgen/Makefile
new file mode 100644
index 000000000000..c20456a2f2e9
--- /dev/null
+++ b/scripts/ipe/polgen/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+hostprogs-always-y := polgen
+HOST_EXTRACFLAGS += \
+ -I$(srctree)/include \
+ -I$(srctree)/include/uapi \
diff --git a/scripts/ipe/polgen/polgen.c b/scripts/ipe/polgen/polgen.c
new file mode 100644
index 000000000000..c6283b3ff006
--- /dev/null
+++ b/scripts/ipe/polgen/polgen.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2024 Microsoft Corporation. All rights reserved.
+ */
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+
+static void usage(const char *const name)
+{
+ printf("Usage: %s OutputFile (PolicyFile)\n", name);
+ exit(EINVAL);
+}
+
+static int policy_to_buffer(const char *pathname, char **buffer, size_t *size)
+{
+ size_t fsize;
+ size_t read;
+ char *lbuf;
+ int rc = 0;
+ FILE *fd;
+
+ fd = fopen(pathname, "r");
+ if (!fd) {
+ rc = errno;
+ goto out;
+ }
+
+ fseek(fd, 0, SEEK_END);
+ fsize = ftell(fd);
+ rewind(fd);
+
+ lbuf = malloc(fsize);
+ if (!lbuf) {
+ rc = ENOMEM;
+ goto out_close;
+ }
+
+ read = fread((void *)lbuf, sizeof(*lbuf), fsize, fd);
+ if (read != fsize) {
+ rc = -1;
+ goto out_free;
+ }
+
+ *buffer = lbuf;
+ *size = fsize;
+ fclose(fd);
+
+ return rc;
+
+out_free:
+ free(lbuf);
+out_close:
+ fclose(fd);
+out:
+ return rc;
+}
+
+static int write_boot_policy(const char *pathname, const char *buf, size_t size)
+{
+ int rc = 0;
+ FILE *fd;
+ size_t i;
+
+ fd = fopen(pathname, "w");
+ if (!fd) {
+ rc = errno;
+ goto err;
+ }
+
+ fprintf(fd, "/* This file is automatically generated.");
+ fprintf(fd, " Do not edit. */\n");
+ fprintf(fd, "#include <linux/stddef.h>\n");
+ fprintf(fd, "\nextern const char *const ipe_boot_policy;\n\n");
+ fprintf(fd, "const char *const ipe_boot_policy =\n");
+
+ if (!buf || size == 0) {
+ fprintf(fd, "\tNULL;\n");
+ fclose(fd);
+ return 0;
+ }
+
+ fprintf(fd, "\t\"");
+
+ for (i = 0; i < size; ++i) {
+ switch (buf[i]) {
+ case '"':
+ fprintf(fd, "\\\"");
+ break;
+ case '\'':
+ fprintf(fd, "'");
+ break;
+ case '\n':
+ fprintf(fd, "\\n\"\n\t\"");
+ break;
+ case '\\':
+ fprintf(fd, "\\\\");
+ break;
+ case '\t':
+ fprintf(fd, "\\t");
+ break;
+ case '\?':
+ fprintf(fd, "\\?");
+ break;
+ default:
+ fprintf(fd, "%c", buf[i]);
+ }
+ }
+ fprintf(fd, "\";\n");
+ fclose(fd);
+
+ return 0;
+
+err:
+ if (fd)
+ fclose(fd);
+ return rc;
+}
+
+int main(int argc, const char *const argv[])
+{
+ char *policy = NULL;
+ size_t len = 0;
+ int rc = 0;
+
+ if (argc < 2)
+ usage(argv[0]);
+
+ if (argc > 2) {
+ rc = policy_to_buffer(argv[2], &policy, &len);
+ if (rc != 0)
+ goto cleanup;
+ }
+
+ rc = write_boot_policy(argv[1], policy, len);
+cleanup:
+ if (policy)
+ free(policy);
+ if (rc != 0)
+ perror("An error occurred during policy conversion: ");
+ return rc;
+}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 070a319140e8..8445b1390126 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -107,20 +107,8 @@ vmlinux_link()
# ${1} - vmlinux image
gen_btf()
{
- local pahole_ver
local btf_data=${1}.btf.o
- if ! [ -x "$(command -v ${PAHOLE})" ]; then
- echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available"
- return 1
- fi
-
- pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/')
- if [ "${pahole_ver}" -lt "116" ]; then
- echo >&2 "BTF: ${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.16"
- return 1
- fi
-
info BTF "${btf_data}"
LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1}
@@ -284,7 +272,7 @@ strip_debug=
vmlinux_link vmlinux
# fill in BTF IDs
-if is_enabled CONFIG_DEBUG_INFO_BTF && is_enabled CONFIG_BPF; then
+if is_enabled CONFIG_DEBUG_INFO_BTF; then
info BTFIDS vmlinux
${RESOLVE_BTFIDS} vmlinux
fi
diff --git a/scripts/macro_checker.py b/scripts/macro_checker.py
new file mode 100755
index 000000000000..ba550982e98f
--- /dev/null
+++ b/scripts/macro_checker.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+# Author: Julian Sun <sunjunchao2870@gmail.com>
+
+""" Find macro definitions with unused parameters. """
+
+import argparse
+import os
+import re
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument("path", type=str, help="The file or dir path that needs check")
+parser.add_argument("-v", "--verbose", action="store_true",
+ help="Check conditional macros, but may lead to more false positives")
+args = parser.parse_args()
+
+macro_pattern = r"#define\s+(\w+)\(([^)]*)\)"
+# below vars were used to reduce false positives
+fp_patterns = [r"\s*do\s*\{\s*\}\s*while\s*\(\s*0\s*\)",
+ r"\(?0\)?", r"\(?1\)?"]
+correct_macros = []
+cond_compile_mark = "#if"
+cond_compile_end = "#endif"
+
+def check_macro(macro_line, report):
+ match = re.match(macro_pattern, macro_line)
+ if match:
+ macro_def = re.sub(macro_pattern, '', macro_line)
+ identifier = match.group(1)
+ content = match.group(2)
+ arguments = [item.strip() for item in content.split(',') if item.strip()]
+
+ macro_def = macro_def.strip()
+ if not macro_def:
+ return
+ # used to reduce false positives, like #define endfor_nexthops(rt) }
+ if len(macro_def) == 1:
+ return
+
+ for fp_pattern in fp_patterns:
+ if (re.match(fp_pattern, macro_def)):
+ return
+
+ for arg in arguments:
+ # used to reduce false positives
+ if "..." in arg:
+ return
+ for arg in arguments:
+ if not arg in macro_def and report == False:
+ return
+ # if there is a correct macro with the same name, do not report it.
+ if not arg in macro_def and identifier not in correct_macros:
+ print(f"Argument {arg} is not used in function-line macro {identifier}")
+ return
+
+ correct_macros.append(identifier)
+
+
+# remove comment and whitespace
+def macro_strip(macro):
+ comment_pattern1 = r"\/\/*"
+ comment_pattern2 = r"\/\**\*\/"
+
+ macro = macro.strip()
+ macro = re.sub(comment_pattern1, '', macro)
+ macro = re.sub(comment_pattern2, '', macro)
+
+ return macro
+
+def file_check_macro(file_path, report):
+ # number of conditional compiling
+ cond_compile = 0
+ # only check .c and .h file
+ if not file_path.endswith(".c") and not file_path.endswith(".h"):
+ return
+
+ with open(file_path, "r") as f:
+ while True:
+ line = f.readline()
+ if not line:
+ break
+ line = line.strip()
+ if line.startswith(cond_compile_mark):
+ cond_compile += 1
+ continue
+ if line.startswith(cond_compile_end):
+ cond_compile -= 1
+ continue
+
+ macro = re.match(macro_pattern, line)
+ if macro:
+ macro = macro_strip(macro.string)
+ while macro[-1] == '\\':
+ macro = macro[0:-1]
+ macro = macro.strip()
+ macro += f.readline()
+ macro = macro_strip(macro)
+ if not args.verbose:
+ if file_path.endswith(".c") and cond_compile != 0:
+ continue
+ # 1 is for #ifdef xxx at the beginning of the header file
+ if file_path.endswith(".h") and cond_compile != 1:
+ continue
+ check_macro(macro, report)
+
+def get_correct_macros(path):
+ file_check_macro(path, False)
+
+def dir_check_macro(dir_path):
+
+ for dentry in os.listdir(dir_path):
+ path = os.path.join(dir_path, dentry)
+ if os.path.isdir(path):
+ dir_check_macro(path)
+ elif os.path.isfile(path):
+ get_correct_macros(path)
+ file_check_macro(path, True)
+
+
+def main():
+ if os.path.isfile(args.path):
+ get_correct_macros(args.path)
+ file_check_macro(args.path, True)
+ elif os.path.isdir(args.path):
+ dir_check_macro(args.path)
+ else:
+ print(f"{args.path} doesn't exit or is neither a file nor a dir")
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files
index f38d26b78c2a..8fc55a749ccc 100755
--- a/scripts/remove-stale-files
+++ b/scripts/remove-stale-files
@@ -21,3 +21,5 @@ set -e
# then will be really dead and removed from the code base entirely.
rm -f *.spec
+
+rm -f lib/test_fortify.log
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index c1121f098542..ad9945ccb0cf 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -300,8 +300,6 @@ sub check_sphinx()
}
$cur_version = get_sphinx_version($sphinx);
- die ("$sphinx returned an error") if (!$cur_version);
-
die "$sphinx didn't return its version" if (!$cur_version);
if ($cur_version lt $min_version) {
diff --git a/scripts/test_fortify.sh b/scripts/test_fortify.sh
deleted file mode 100644
index c2688ab8281d..000000000000
--- a/scripts/test_fortify.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0-only
-set -e
-
-# Argument 1: Source file to build.
-IN="$1"
-shift
-# Extract just the filename for error messages below.
-FILE="${IN##*/}"
-# Extract the function name for error messages below.
-FUNC="${FILE#*-}"
-FUNC="${FUNC%%-*}"
-FUNC="${FUNC%%.*}"
-# Extract the symbol to test for in build/symbol test below.
-WANT="__${FILE%%-*}"
-
-# Argument 2: Where to write the build log.
-OUT="$1"
-shift
-TMP="${OUT}.tmp"
-
-# Argument 3: Path to "nm" tool.
-NM="$1"
-shift
-
-# Remaining arguments are: $(CC) $(c_flags)
-
-# Clean up temporary file at exit.
-__cleanup() {
- rm -f "$TMP"
-}
-trap __cleanup EXIT
-
-# Function names in warnings are wrapped in backticks under UTF-8 locales.
-# Run the commands with LANG=C so that grep output will not change.
-export LANG=C
-
-status=
-# Attempt to build a source that is expected to fail with a specific warning.
-if "$@" -Werror -c "$IN" -o "$OUT".o 2> "$TMP" ; then
- # If the build succeeds, either the test has failed or the
- # warning may only happen at link time (Clang). In that case,
- # make sure the expected symbol is unresolved in the symbol list.
- # If so, FORTIFY is working for this case.
- if ! $NM -A "$OUT".o | grep -m1 "\bU ${WANT}$" >>"$TMP" ; then
- status="warning: unsafe ${FUNC}() usage lacked '$WANT' symbol in $IN"
- fi
-else
- # If the build failed, check for the warning in the stderr.
- # GCC:
- # ./include/linux/fortify-string.h:316:25: error: call to '__write_overflow_field' declared with attribute warning: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Werror=attribute-warning]
- # Clang 14:
- # ./include/linux/fortify-string.h:316:4: error: call to __write_overflow_field declared with 'warning' attribute: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Werror,-Wattribute-warning]
- if ! grep -Eq -m1 "error: call to .?\b${WANT}\b.?" "$TMP" ; then
- status="warning: unsafe ${FUNC}() usage lacked '$WANT' warning in $IN"
- fi
-fi
-
-if [ -n "$status" ]; then
- # Report on failure results, including compilation warnings.
- echo "$status" | tee "$OUT" >&2
-else
- # Report on good results, and save any compilation output to log.
- echo "ok: unsafe ${FUNC}() usage correctly detected with '$WANT' in $IN" >"$OUT"
-fi
-cat "$TMP" >>"$OUT"
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
index d06baf626abe..f19369687030 100755
--- a/scripts/xz_wrap.sh
+++ b/scripts/xz_wrap.sh
@@ -1,22 +1,162 @@
#!/bin/sh
+# SPDX-License-Identifier: 0BSD
#
# This is a wrapper for xz to compress the kernel image using appropriate
# compression options depending on the architecture.
#
# Author: Lasse Collin <lasse.collin@tukaani.org>
+
+# This has specialized settings for the following archs. However,
+# XZ-compressed kernel isn't currently supported on every listed arch.
#
-# This file has been put into the public domain.
-# You can do whatever you want with this file.
-#
+# Arch Align Notes
+# arm 2/4 ARM and ARM-Thumb2
+# arm64 4
+# csky 2
+# loongarch 4
+# mips 2/4 MicroMIPS is 2-byte aligned
+# parisc 4
+# powerpc 4 Uses its own wrapper for compressors instead of this.
+# riscv 2/4
+# s390 2
+# sh 2
+# sparc 4
+# x86 1
+
+# A few archs use 2-byte or 4-byte aligned instructions depending on
+# the kernel config. This function is used to check if the relevant
+# config option is set to "y".
+is_enabled()
+{
+ grep -q "^$1=y$" include/config/auto.conf
+}
+
+# XZ_VERSION is needed to disable features that aren't available in
+# old XZ Utils versions.
+XZ_VERSION=$($XZ --robot --version) || exit
+XZ_VERSION=$(printf '%s\n' "$XZ_VERSION" | sed -n 's/^XZ_VERSION=//p')
+# Assume that no BCJ filter is available.
BCJ=
-LZMA2OPTS=
+# Set the instruction alignment to 1, 2, or 4 bytes.
+#
+# Set the BCJ filter if one is available.
+# It must match the #ifdef usage in lib/decompress_unxz.c.
case $SRCARCH in
- x86) BCJ=--x86 ;;
- powerpc) BCJ=--powerpc ;;
- arm) BCJ=--arm ;;
- sparc) BCJ=--sparc ;;
+ arm)
+ if is_enabled CONFIG_THUMB2_KERNEL; then
+ ALIGN=2
+ BCJ=--armthumb
+ else
+ ALIGN=4
+ BCJ=--arm
+ fi
+ ;;
+
+ arm64)
+ ALIGN=4
+
+ # ARM64 filter was added in XZ Utils 5.4.0.
+ if [ "$XZ_VERSION" -ge 50040002 ]; then
+ BCJ=--arm64
+ else
+ echo "$0: Upgrading to xz >= 5.4.0" \
+ "would enable the ARM64 filter" \
+ "for better compression" >&2
+ fi
+ ;;
+
+ csky)
+ ALIGN=2
+ ;;
+
+ loongarch)
+ ALIGN=4
+ ;;
+
+ mips)
+ if is_enabled CONFIG_CPU_MICROMIPS; then
+ ALIGN=2
+ else
+ ALIGN=4
+ fi
+ ;;
+
+ parisc)
+ ALIGN=4
+ ;;
+
+ powerpc)
+ ALIGN=4
+
+ # The filter is only for big endian instruction encoding.
+ if is_enabled CONFIG_CPU_BIG_ENDIAN; then
+ BCJ=--powerpc
+ fi
+ ;;
+
+ riscv)
+ if is_enabled CONFIG_RISCV_ISA_C; then
+ ALIGN=2
+ else
+ ALIGN=4
+ fi
+
+ # RISC-V filter was added in XZ Utils 5.6.0.
+ if [ "$XZ_VERSION" -ge 50060002 ]; then
+ BCJ=--riscv
+ else
+ echo "$0: Upgrading to xz >= 5.6.0" \
+ "would enable the RISC-V filter" \
+ "for better compression" >&2
+ fi
+ ;;
+
+ s390)
+ ALIGN=2
+ ;;
+
+ sh)
+ ALIGN=2
+ ;;
+
+ sparc)
+ ALIGN=4
+ BCJ=--sparc
+ ;;
+
+ x86)
+ ALIGN=1
+ BCJ=--x86
+ ;;
+
+ *)
+ echo "$0: Arch-specific tuning is missing for '$SRCARCH'" >&2
+
+ # Guess 2-byte-aligned instructions. Guessing too low
+ # should hurt less than guessing too high.
+ ALIGN=2
+ ;;
+esac
+
+# Select the LZMA2 options matching the instruction alignment.
+case $ALIGN in
+ 1) LZMA2OPTS= ;;
+ 2) LZMA2OPTS=lp=1 ;;
+ 4) LZMA2OPTS=lp=2,lc=2 ;;
+ *) echo "$0: ALIGN wrong or missing" >&2; exit 1 ;;
esac
-exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
+# Use single-threaded mode because it compresses a little better
+# (and uses less RAM) than multithreaded mode.
+#
+# For the best compression, the dictionary size shouldn't be
+# smaller than the uncompressed kernel. 128 MiB dictionary
+# needs less than 1400 MiB of RAM in single-threaded mode.
+#
+# On the archs that use this script to compress the kernel,
+# decompression in the preboot code is done in single-call mode.
+# Thus the dictionary size doesn't affect the memory requirements
+# of the preboot decompressor at all.
+exec $XZ --check=crc32 --threads=1 $BCJ --lzma2=$LZMA2OPTS,dict=128MiB