summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/Kconfig.include3
-rw-r--r--scripts/Makefile.modinst2
-rw-r--r--scripts/Makefile.package20
-rw-r--r--scripts/Makefile.warn (renamed from scripts/Makefile.extrawarn)0
-rwxr-xr-xscripts/cc-can-link.sh2
-rwxr-xr-xscripts/check-variable-fonts.sh115
-rwxr-xr-xscripts/checktransupdate.py307
-rwxr-xr-xscripts/clang-tools/gen_compile_commands.py135
-rwxr-xr-xscripts/crypto/gen-fips-testvecs.py36
-rwxr-xr-xscripts/crypto/gen-hash-testvecs.py101
-rwxr-xr-xscripts/documentation-file-ref-check245
-rwxr-xr-xscripts/find-unused-docs.sh62
-rwxr-xr-xscripts/generate_rust_analyzer.py25
-rwxr-xr-xscripts/get_abi.py214
-rwxr-xr-xscripts/get_feat.pl641
-rwxr-xr-xscripts/headers_install.sh2
-rwxr-xr-xscripts/jobserver-exec88
-rwxr-xr-xscripts/kernel-doc.pl2439
-rwxr-xr-xscripts/kernel-doc.py6
-rw-r--r--scripts/lib/abi/abi_parser.py628
-rw-r--r--scripts/lib/abi/abi_regex.py234
-rw-r--r--scripts/lib/abi/helpers.py38
-rw-r--r--scripts/lib/abi/system_symbols.py378
-rw-r--r--scripts/lib/kdoc/kdoc_files.py291
-rw-r--r--scripts/lib/kdoc/kdoc_item.py42
-rw-r--r--scripts/lib/kdoc/kdoc_output.py749
-rw-r--r--scripts/lib/kdoc/kdoc_parser.py1649
-rw-r--r--scripts/lib/kdoc/kdoc_re.py270
-rw-r--r--scripts/rustdoc_test_gen.rs1
-rwxr-xr-xscripts/sphinx-build-wrapper719
-rwxr-xr-xscripts/sphinx-pre-install1621
-rwxr-xr-xscripts/split-man.pl28
-rwxr-xr-xscripts/test_doc_build.py513
33 files changed, 323 insertions, 11281 deletions
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 33193ca6e803..d42042b6c9e2 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -65,6 +65,9 @@ cc-option-bit = $(if-success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null,$
m32-flag := $(cc-option-bit,-m32)
m64-flag := $(cc-option-bit,-m64)
+# Test whether the compiler can link userspace applications
+cc_can_link_user = $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(1))
+
rustc-version := $(shell,$(srctree)/scripts/rustc-version.sh $(RUSTC))
rustc-llvm-version := $(shell,$(srctree)/scripts/rustc-llvm-version.sh $(RUSTC))
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 1628198f3e83..9ba45e5b32b1 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -100,7 +100,7 @@ endif
# Don't stop modules_install even if we can't sign external modules.
#
ifeq ($(filter pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),)
-sig-key := $(if $(wildcard $(CONFIG_MODULE_SIG_KEY)),,$(srctree)/)$(CONFIG_MODULE_SIG_KEY)
+sig-key := $(if $(wildcard $(CONFIG_MODULE_SIG_KEY)),,$(objtree)/)$(CONFIG_MODULE_SIG_KEY)
else
sig-key := $(CONFIG_MODULE_SIG_KEY)
endif
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 74bcb9e7f7a4..83bfcf7cb09f 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -189,6 +189,25 @@ tar-pkg: linux-$(KERNELRELEASE)-$(ARCH).tar
tar%-pkg: linux-$(KERNELRELEASE)-$(ARCH).tar.% FORCE
@:
+# modules-cpio-pkg - generate an initramfs with the modules
+# ---------------------------------------------------------------------------
+
+.tmp_modules_cpio: FORCE
+ $(Q)$(MAKE) -f $(srctree)/Makefile
+ $(Q)rm -rf $@
+ $(Q)$(MAKE) -f $(srctree)/Makefile INSTALL_MOD_PATH=$@ modules_install
+
+quiet_cmd_cpio = CPIO $@
+ cmd_cpio = $(CONFIG_SHELL) $(srctree)/usr/gen_initramfs.sh -o $@ $<
+
+modules-$(KERNELRELEASE)-$(ARCH).cpio: .tmp_modules_cpio
+ $(Q)$(MAKE) $(build)=usr usr/gen_init_cpio
+ $(call cmd,cpio)
+
+PHONY += modules-cpio-pkg
+modules-cpio-pkg: modules-$(KERNELRELEASE)-$(ARCH).cpio
+ @:
+
# perf-tar*-src-pkg - generate a source tarball with perf source
# ---------------------------------------------------------------------------
@@ -245,6 +264,7 @@ help:
@echo ' tarbz2-pkg - Build the kernel as a bzip2 compressed tarball'
@echo ' tarxz-pkg - Build the kernel as a xz compressed tarball'
@echo ' tarzst-pkg - Build the kernel as a zstd compressed tarball'
+ @echo ' modules-cpio-pkg - Build the kernel modules as cpio archive'
@echo ' perf-tar-src-pkg - Build the perf source tarball with no compression'
@echo ' perf-targz-src-pkg - Build the perf source tarball with gzip compression'
@echo ' perf-tarbz2-src-pkg - Build the perf source tarball with bz2 compression'
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.warn
index 68e6fafcb80c..68e6fafcb80c 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.warn
diff --git a/scripts/cc-can-link.sh b/scripts/cc-can-link.sh
index 6efcead31989..e67fd8d7b684 100755
--- a/scripts/cc-can-link.sh
+++ b/scripts/cc-can-link.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1
+cat << "END" | $@ -Werror -Wl,--fatal-warnings -x c - -o /dev/null >/dev/null 2>&1
#include <stdio.h>
int main(void)
{
diff --git a/scripts/check-variable-fonts.sh b/scripts/check-variable-fonts.sh
deleted file mode 100755
index ce63f0acea5f..000000000000
--- a/scripts/check-variable-fonts.sh
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0-only
-# Copyright (C) Akira Yokosawa, 2024
-#
-# For "make pdfdocs", reports of build errors of translations.pdf started
-# arriving early 2024 [1, 2]. It turned out that Fedora and openSUSE
-# tumbleweed have started deploying variable-font [3] format of "Noto CJK"
-# fonts [4, 5]. For PDF, a LaTeX package named xeCJK is used for CJK
-# (Chinese, Japanese, Korean) pages. xeCJK requires XeLaTeX/XeTeX, which
-# does not (and likely never will) understand variable fonts for historical
-# reasons.
-#
-# The build error happens even when both of variable- and non-variable-format
-# fonts are found on the build system. To make matters worse, Fedora enlists
-# variable "Noto CJK" fonts in the requirements of langpacks-ja, -ko, -zh_CN,
-# -zh_TW, etc. Hence developers who have interest in CJK pages are more
-# likely to encounter the build errors.
-#
-# This script is invoked from the error path of "make pdfdocs" and emits
-# suggestions if variable-font files of "Noto CJK" fonts are in the list of
-# fonts accessible from XeTeX.
-#
-# References:
-# [1]: https://lore.kernel.org/r/8734tqsrt7.fsf@meer.lwn.net/
-# [2]: https://lore.kernel.org/r/1708585803.600323099@f111.i.mail.ru/
-# [3]: https://en.wikipedia.org/wiki/Variable_font
-# [4]: https://fedoraproject.org/wiki/Changes/Noto_CJK_Variable_Fonts
-# [5]: https://build.opensuse.org/request/show/1157217
-#
-#===========================================================================
-# Workarounds for building translations.pdf
-#===========================================================================
-#
-# * Denylist "variable font" Noto CJK fonts.
-# - Create $HOME/deny-vf/fontconfig/fonts.conf from template below, with
-# tweaks if necessary. Remove leading "# ".
-# - Path of fontconfig/fonts.conf can be overridden by setting an env
-# variable FONTS_CONF_DENY_VF.
-#
-# * Template:
-# -----------------------------------------------------------------
-# <?xml version="1.0"?>
-# <!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
-# <fontconfig>
-# <!--
-# Ignore variable-font glob (not to break xetex)
-# -->
-# <selectfont>
-# <rejectfont>
-# <!--
-# for Fedora
-# -->
-# <glob>/usr/share/fonts/google-noto-*-cjk-vf-fonts</glob>
-# <!--
-# for openSUSE tumbleweed
-# -->
-# <glob>/usr/share/fonts/truetype/Noto*CJK*-VF.otf</glob>
-# </rejectfont>
-# </selectfont>
-# </fontconfig>
-# -----------------------------------------------------------------
-#
-# The denylisting is activated for "make pdfdocs".
-#
-# * For skipping CJK pages in PDF
-# - Uninstall texlive-xecjk.
-# Denylisting is not needed in this case.
-#
-# * For printing CJK pages in PDF
-# - Need non-variable "Noto CJK" fonts.
-# * Fedora
-# - google-noto-sans-cjk-fonts
-# - google-noto-serif-cjk-fonts
-# * openSUSE tumbleweed
-# - Non-variable "Noto CJK" fonts are not available as distro packages
-# as of April, 2024. Fetch a set of font files from upstream Noto
-# CJK Font released at:
-# https://github.com/notofonts/noto-cjk/tree/main/Sans#super-otc
-# and at:
-# https://github.com/notofonts/noto-cjk/tree/main/Serif#super-otc
-# , then uncompress and deploy them.
-# - Remember to update fontconfig cache by running fc-cache.
-#
-# !!! Caution !!!
-# Uninstalling "variable font" packages can be dangerous.
-# They might be depended upon by other packages important for your work.
-# Denylisting should be less invasive, as it is effective only while
-# XeLaTeX runs in "make pdfdocs".
-
-# Default per-user fontconfig path (overridden by env variable)
-: ${FONTS_CONF_DENY_VF:=$HOME/deny-vf}
-
-export XDG_CONFIG_HOME=${FONTS_CONF_DENY_VF}
-
-notocjkvffonts=`fc-list : file family variable | \
- grep 'variable=True' | \
- grep -E -e 'Noto (Sans|Sans Mono|Serif) CJK' | \
- sed -e 's/^/ /' -e 's/: Noto S.*$//' | sort | uniq`
-
-if [ "x$notocjkvffonts" != "x" ] ; then
- echo '============================================================================='
- echo 'XeTeX is confused by "variable font" files listed below:'
- echo "$notocjkvffonts"
- echo
- echo 'For CJK pages in PDF, they need to be hidden from XeTeX by denylisting.'
- echo 'Or, CJK pages can be skipped by uninstalling texlive-xecjk.'
- echo
- echo 'For more info on denylisting, other options, and variable font, see header'
- echo 'comments of scripts/check-variable-fonts.sh.'
- echo '============================================================================='
-fi
-
-# As this script is invoked from Makefile's error path, always error exit
-# regardless of whether any variable font is discovered or not.
-exit 1
diff --git a/scripts/checktransupdate.py b/scripts/checktransupdate.py
deleted file mode 100755
index e39529e46c3d..000000000000
--- a/scripts/checktransupdate.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0
-
-"""
-This script helps track the translation status of the documentation
-in different locales, e.g., zh_CN. More specially, it uses `git log`
-commit to find the latest english commit from the translation commit
-(order by author date) and the latest english commits from HEAD. If
-differences occur, report the file and commits that need to be updated.
-
-The usage is as follows:
-- ./scripts/checktransupdate.py -l zh_CN
-This will print all the files that need to be updated or translated in the zh_CN locale.
-- ./scripts/checktransupdate.py Documentation/translations/zh_CN/dev-tools/testing-overview.rst
-This will only print the status of the specified file.
-
-The output is something like:
-Documentation/dev-tools/kfence.rst
-No translation in the locale of zh_CN
-
-Documentation/translations/zh_CN/dev-tools/testing-overview.rst
-commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs")
-1 commits needs resolving in total
-"""
-
-import os
-import re
-import time
-import logging
-from argparse import ArgumentParser, ArgumentTypeError, BooleanOptionalAction
-from datetime import datetime
-
-
-def get_origin_path(file_path):
- """Get the origin path from the translation path"""
- paths = file_path.split("/")
- tidx = paths.index("translations")
- opaths = paths[:tidx]
- opaths += paths[tidx + 2 :]
- return "/".join(opaths)
-
-
-def get_latest_commit_from(file_path, commit):
- """Get the latest commit from the specified commit for the specified file"""
- command = f"git log --pretty=format:%H%n%aD%n%cD%n%n%B {commit} -1 -- {file_path}"
- logging.debug(command)
- pipe = os.popen(command)
- result = pipe.read()
- result = result.split("\n")
- if len(result) <= 1:
- return None
-
- logging.debug("Result: %s", result[0])
-
- return {
- "hash": result[0],
- "author_date": datetime.strptime(result[1], "%a, %d %b %Y %H:%M:%S %z"),
- "commit_date": datetime.strptime(result[2], "%a, %d %b %Y %H:%M:%S %z"),
- "message": result[4:],
- }
-
-
-def get_origin_from_trans(origin_path, t_from_head):
- """Get the latest origin commit from the translation commit"""
- o_from_t = get_latest_commit_from(origin_path, t_from_head["hash"])
- while o_from_t is not None and o_from_t["author_date"] > t_from_head["author_date"]:
- o_from_t = get_latest_commit_from(origin_path, o_from_t["hash"] + "^")
- if o_from_t is not None:
- logging.debug("tracked origin commit id: %s", o_from_t["hash"])
- return o_from_t
-
-
-def get_origin_from_trans_smartly(origin_path, t_from_head):
- """Get the latest origin commit from the formatted translation commit:
- (1) update to commit HASH (TITLE)
- (2) Update the translation through commit HASH (TITLE)
- """
- # catch flag for 12-bit commit hash
- HASH = r'([0-9a-f]{12})'
- # pattern 1: contains "update to commit HASH"
- pat_update_to = re.compile(rf'update to commit {HASH}')
- # pattern 2: contains "Update the translation through commit HASH"
- pat_update_translation = re.compile(rf'Update the translation through commit {HASH}')
-
- origin_commit_hash = None
- for line in t_from_head["message"]:
- # check if the line matches the first pattern
- match = pat_update_to.search(line)
- if match:
- origin_commit_hash = match.group(1)
- break
- # check if the line matches the second pattern
- match = pat_update_translation.search(line)
- if match:
- origin_commit_hash = match.group(1)
- break
- if origin_commit_hash is None:
- return None
- o_from_t = get_latest_commit_from(origin_path, origin_commit_hash)
- if o_from_t is not None:
- logging.debug("tracked origin commit id: %s", o_from_t["hash"])
- return o_from_t
-
-
-def get_commits_count_between(opath, commit1, commit2):
- """Get the commits count between two commits for the specified file"""
- command = f"git log --pretty=format:%H {commit1}...{commit2} -- {opath}"
- logging.debug(command)
- pipe = os.popen(command)
- result = pipe.read().split("\n")
- # filter out empty lines
- result = list(filter(lambda x: x != "", result))
- return result
-
-
-def pretty_output(commit):
- """Pretty print the commit message"""
- command = f"git log --pretty='format:%h (\"%s\")' -1 {commit}"
- logging.debug(command)
- pipe = os.popen(command)
- return pipe.read()
-
-
-def valid_commit(commit):
- """Check if the commit is valid or not"""
- msg = pretty_output(commit)
- return "Merge tag" not in msg
-
-def check_per_file(file_path):
- """Check the translation status for the specified file"""
- opath = get_origin_path(file_path)
-
- if not os.path.isfile(opath):
- logging.error("Cannot find the origin path for {file_path}")
- return
-
- o_from_head = get_latest_commit_from(opath, "HEAD")
- t_from_head = get_latest_commit_from(file_path, "HEAD")
-
- if o_from_head is None or t_from_head is None:
- logging.error("Cannot find the latest commit for %s", file_path)
- return
-
- o_from_t = get_origin_from_trans_smartly(opath, t_from_head)
- # notice, o_from_t from get_*_smartly() is always more accurate than from get_*()
- if o_from_t is None:
- o_from_t = get_origin_from_trans(opath, t_from_head)
-
- if o_from_t is None:
- logging.error("Error: Cannot find the latest origin commit for %s", file_path)
- return
-
- if o_from_head["hash"] == o_from_t["hash"]:
- logging.debug("No update needed for %s", file_path)
- else:
- logging.info(file_path)
- commits = get_commits_count_between(
- opath, o_from_t["hash"], o_from_head["hash"]
- )
- count = 0
- for commit in commits:
- if valid_commit(commit):
- logging.info("commit %s", pretty_output(commit))
- count += 1
- logging.info("%d commits needs resolving in total\n", count)
-
-
-def valid_locales(locale):
- """Check if the locale is valid or not"""
- script_path = os.path.dirname(os.path.abspath(__file__))
- linux_path = os.path.join(script_path, "..")
- if not os.path.isdir(f"{linux_path}/Documentation/translations/{locale}"):
- raise ArgumentTypeError("Invalid locale: {locale}")
- return locale
-
-
-def list_files_with_excluding_folders(folder, exclude_folders, include_suffix):
- """List all files with the specified suffix in the folder and its subfolders"""
- files = []
- stack = [folder]
-
- while stack:
- pwd = stack.pop()
- # filter out the exclude folders
- if os.path.basename(pwd) in exclude_folders:
- continue
- # list all files and folders
- for item in os.listdir(pwd):
- ab_item = os.path.join(pwd, item)
- if os.path.isdir(ab_item):
- stack.append(ab_item)
- else:
- if ab_item.endswith(include_suffix):
- files.append(ab_item)
-
- return files
-
-
-class DmesgFormatter(logging.Formatter):
- """Custom dmesg logging formatter"""
- def format(self, record):
- timestamp = time.time()
- formatted_time = f"[{timestamp:>10.6f}]"
- log_message = f"{formatted_time} {record.getMessage()}"
- return log_message
-
-
-def config_logging(log_level, log_file="checktransupdate.log"):
- """configure logging based on the log level"""
- # set up the root logger
- logger = logging.getLogger()
- logger.setLevel(log_level)
-
- # Create console handler
- console_handler = logging.StreamHandler()
- console_handler.setLevel(log_level)
-
- # Create file handler
- file_handler = logging.FileHandler(log_file)
- file_handler.setLevel(log_level)
-
- # Create formatter and add it to the handlers
- formatter = DmesgFormatter()
- console_handler.setFormatter(formatter)
- file_handler.setFormatter(formatter)
-
- # Add the handler to the logger
- logger.addHandler(console_handler)
- logger.addHandler(file_handler)
-
-
-def main():
- """Main function of the script"""
- script_path = os.path.dirname(os.path.abspath(__file__))
- linux_path = os.path.join(script_path, "..")
-
- parser = ArgumentParser(description="Check the translation update")
- parser.add_argument(
- "-l",
- "--locale",
- default="zh_CN",
- type=valid_locales,
- help="Locale to check when files are not specified",
- )
-
- parser.add_argument(
- "--print-missing-translations",
- action=BooleanOptionalAction,
- default=True,
- help="Print files that do not have translations",
- )
-
- parser.add_argument(
- '--log',
- default='INFO',
- choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
- help='Set the logging level')
-
- parser.add_argument(
- '--logfile',
- default='checktransupdate.log',
- help='Set the logging file (default: checktransupdate.log)')
-
- parser.add_argument(
- "files", nargs="*", help="Files to check, if not specified, check all files"
- )
- args = parser.parse_args()
-
- # Configure logging based on the --log argument
- log_level = getattr(logging, args.log.upper(), logging.INFO)
- config_logging(log_level)
-
- # Get files related to linux path
- files = args.files
- if len(files) == 0:
- offical_files = list_files_with_excluding_folders(
- os.path.join(linux_path, "Documentation"), ["translations", "output"], "rst"
- )
-
- for file in offical_files:
- # split the path into parts
- path_parts = file.split(os.sep)
- # find the index of the "Documentation" directory
- kindex = path_parts.index("Documentation")
- # insert the translations and locale after the Documentation directory
- new_path_parts = path_parts[:kindex + 1] + ["translations", args.locale] \
- + path_parts[kindex + 1 :]
- # join the path parts back together
- new_file = os.sep.join(new_path_parts)
- if os.path.isfile(new_file):
- files.append(new_file)
- else:
- if args.print_missing_translations:
- logging.info(os.path.relpath(os.path.abspath(file), linux_path))
- logging.info("No translation in the locale of %s\n", args.locale)
-
- files = list(map(lambda x: os.path.relpath(os.path.abspath(x), linux_path), files))
-
- # cd to linux root directory
- os.chdir(linux_path)
-
- for file in files:
- check_per_file(file)
-
-
-if __name__ == "__main__":
- main()
diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
index 96e6e46ad1a7..6f4afa92a466 100755
--- a/scripts/clang-tools/gen_compile_commands.py
+++ b/scripts/clang-tools/gen_compile_commands.py
@@ -21,6 +21,12 @@ _DEFAULT_LOG_LEVEL = 'WARNING'
_FILENAME_PATTERN = r'^\..*\.cmd$'
_LINE_PATTERN = r'^(saved)?cmd_[^ ]*\.o := (?P<command_prefix>.* )(?P<file_path>[^ ]*\.[cS]) *(;|$)'
_VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
+
+# Pre-compiled regexes for better performance
+_INCLUDE_PATTERN = re.compile(r'^\s*#\s*include\s*[<"]([^>"]*)[>"]')
+_C_INCLUDE_PATTERN = re.compile(r'^\s*#\s*include\s*"([^"]*\.c)"\s*$')
+_FILENAME_MATCHER = re.compile(_FILENAME_PATTERN)
+
# The tools/ directory adopts a different build system, and produces .cmd
# files in a different format. Do not support it.
_EXCLUDE_DIRS = ['.git', 'Documentation', 'include', 'tools']
@@ -82,7 +88,6 @@ def cmdfiles_in_dir(directory):
The path to a .cmd file.
"""
- filename_matcher = re.compile(_FILENAME_PATTERN)
exclude_dirs = [ os.path.join(directory, d) for d in _EXCLUDE_DIRS ]
for dirpath, dirnames, filenames in os.walk(directory, topdown=True):
@@ -92,7 +97,7 @@ def cmdfiles_in_dir(directory):
continue
for filename in filenames:
- if filename_matcher.match(filename):
+ if _FILENAME_MATCHER.match(filename):
yield os.path.join(dirpath, filename)
@@ -149,8 +154,87 @@ def cmdfiles_for_modorder(modorder):
yield to_cmdfile(mod_line.rstrip())
+def extract_includes_from_file(source_file, root_directory):
+ """Extract #include statements from a C file.
+
+ Args:
+ source_file: Path to the source .c file to analyze
+ root_directory: Root directory for resolving relative paths
+
+ Returns:
+ List of header files that should be included (without quotes/brackets)
+ """
+ includes = []
+ if not os.path.exists(source_file):
+ return includes
+
+ try:
+ with open(source_file, 'r') as f:
+ for line in f:
+ line = line.strip()
+ # Look for #include statements.
+ # Match both #include "header.h" and #include <header.h>.
+ match = _INCLUDE_PATTERN.match(line)
+ if match:
+ header = match.group(1)
+ # Skip including other .c files to avoid circular includes.
+ if not header.endswith('.c'):
+ # For relative includes (quoted), resolve path relative to source file.
+ if '"' in line:
+ src_dir = os.path.dirname(source_file)
+ header_path = os.path.join(src_dir, header)
+ if os.path.exists(header_path):
+ rel_header = os.path.relpath(header_path, root_directory)
+ includes.append(rel_header)
+ else:
+ includes.append(header)
+ else:
+ # System include like <linux/sched.h>.
+ includes.append(header)
+ except IOError:
+ pass
+
+ return includes
+
+
+def find_included_c_files(source_file, root_directory):
+ """Find .c files that are included by the given source file.
+
+ Args:
+ source_file: Path to the source .c file
+ root_directory: Root directory for resolving relative paths
+
+ Yields:
+ Full paths to included .c files
+ """
+ if not os.path.exists(source_file):
+ return
+
+ try:
+ with open(source_file, 'r') as f:
+ for line in f:
+ line = line.strip()
+ # Look for #include "*.c" patterns.
+ match = _C_INCLUDE_PATTERN.match(line)
+ if match:
+ included_file = match.group(1)
+ # Handle relative paths.
+ if not os.path.isabs(included_file):
+ src_dir = os.path.dirname(source_file)
+ included_file = os.path.join(src_dir, included_file)
+
+ # Normalize the path.
+ included_file = os.path.normpath(included_file)
+
+ # Check if the file exists.
+ if os.path.exists(included_file):
+ yield included_file
+ except IOError:
+ pass
+
+
def process_line(root_directory, command_prefix, file_path):
- """Extracts information from a .cmd line and creates an entry from it.
+ """Extracts information from a .cmd line and creates entries from it.
Args:
root_directory: The directory that was searched for .cmd files. Usually
@@ -160,7 +244,8 @@ def process_line(root_directory, command_prefix, file_path):
Usually relative to root_directory, but sometimes absolute.
Returns:
- An entry to append to compile_commands.
+ A list of entries to append to compile_commands (may include multiple
+ entries if the source file includes other .c files).
Raises:
ValueError: Could not find the extracted file based on file_path and
@@ -176,11 +261,47 @@ def process_line(root_directory, command_prefix, file_path):
abs_path = os.path.realpath(os.path.join(root_directory, file_path))
if not os.path.exists(abs_path):
raise ValueError('File %s not found' % abs_path)
- return {
+
+ entries = []
+
+ # Create entry for the main source file.
+ main_entry = {
'directory': root_directory,
'file': abs_path,
'command': prefix + file_path,
}
+ entries.append(main_entry)
+
+ # Find and create entries for included .c files.
+ for included_c_file in find_included_c_files(abs_path, root_directory):
+ # For included .c files, create a compilation command that:
+ # 1. Uses the same compilation flags as the parent file
+ # 2. But compiles the included file directly (not the parent)
+ # 3. Includes necessary headers from the parent file for proper macro resolution
+
+ # Convert absolute path to relative for the command.
+ rel_path = os.path.relpath(included_c_file, root_directory)
+
+ # Extract includes from the parent file to provide proper compilation context.
+ extra_includes = ''
+ try:
+ parent_includes = extract_includes_from_file(abs_path, root_directory)
+ if parent_includes:
+ extra_includes = ' ' + ' '.join('-include ' + inc for inc in parent_includes)
+ except IOError:
+ pass
+
+ included_entry = {
+ 'directory': root_directory,
+ 'file': included_c_file,
+ # Use the same compilation prefix but target the included file directly.
+ # Add extra headers for proper macro resolution.
+ 'command': prefix + extra_includes + ' ' + rel_path,
+ }
+ entries.append(included_entry)
+ logging.debug('Added entry for included file: %s', included_c_file)
+
+ return entries
def main():
@@ -213,9 +334,9 @@ def main():
result = line_matcher.match(f.readline())
if result:
try:
- entry = process_line(directory, result.group('command_prefix'),
+ entries = process_line(directory, result.group('command_prefix'),
result.group('file_path'))
- compile_commands.append(entry)
+ compile_commands.extend(entries)
except ValueError as err:
logging.info('Could not add line from %s: %s',
cmdfile, err)
diff --git a/scripts/crypto/gen-fips-testvecs.py b/scripts/crypto/gen-fips-testvecs.py
new file mode 100755
index 000000000000..db873f88619a
--- /dev/null
+++ b/scripts/crypto/gen-fips-testvecs.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Script that generates lib/crypto/fips.h
+#
+# Copyright 2025 Google LLC
+
+import hashlib
+import hmac
+
+fips_test_data = b"fips test data\0\0"
+fips_test_key = b"fips test key\0\0\0"
+
+def print_static_u8_array_definition(name, value):
+ print('')
+ print(f'static const u8 {name}[] __initconst __maybe_unused = {{')
+ for i in range(0, len(value), 8):
+ line = '\t' + ''.join(f'0x{b:02x}, ' for b in value[i:i+8])
+ print(f'{line.rstrip()}')
+ print('};')
+
+print('/* SPDX-License-Identifier: GPL-2.0-or-later */')
+print(f'/* This file was generated by: gen-fips-testvecs.py */')
+print()
+print('#include <linux/fips.h>')
+
+print_static_u8_array_definition("fips_test_data", fips_test_data)
+print_static_u8_array_definition("fips_test_key", fips_test_key)
+
+for alg in 'sha1', 'sha256', 'sha512':
+ ctx = hmac.new(fips_test_key, digestmod=alg)
+ ctx.update(fips_test_data)
+ print_static_u8_array_definition(f'fips_test_hmac_{alg}_value', ctx.digest())
+
+print_static_u8_array_definition(f'fips_test_sha3_256_value',
+ hashlib.sha3_256(fips_test_data).digest())
diff --git a/scripts/crypto/gen-hash-testvecs.py b/scripts/crypto/gen-hash-testvecs.py
index fc063f2ee95f..c1d0517140bd 100755
--- a/scripts/crypto/gen-hash-testvecs.py
+++ b/scripts/crypto/gen-hash-testvecs.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
#
-# Script that generates test vectors for the given cryptographic hash function.
+# Script that generates test vectors for the given hash function.
#
# Copyright 2025 Google LLC
@@ -50,11 +50,42 @@ class Poly1305:
m = (self.h + self.s) % 2**128
return m.to_bytes(16, byteorder='little')
+POLYVAL_POLY = sum((1 << i) for i in [128, 127, 126, 121, 0])
+POLYVAL_BLOCK_SIZE = 16
+
+# A straightforward, unoptimized implementation of POLYVAL.
+# Reference: https://datatracker.ietf.org/doc/html/rfc8452
+class Polyval:
+ def __init__(self, key):
+ assert len(key) == 16
+ self.h = int.from_bytes(key, byteorder='little')
+ self.acc = 0
+
+ # Note: this supports partial blocks only at the end.
+ def update(self, data):
+ for i in range(0, len(data), 16):
+ # acc += block
+ self.acc ^= int.from_bytes(data[i:i+16], byteorder='little')
+ # acc = (acc * h * x^-128) mod POLYVAL_POLY
+ product = 0
+ for j in range(128):
+ if (self.h & (1 << j)) != 0:
+ product ^= self.acc << j
+ if (product & (1 << j)) != 0:
+ product ^= POLYVAL_POLY << j
+ self.acc = product >> 128
+ return self
+
+ def digest(self):
+ return self.acc.to_bytes(16, byteorder='little')
+
def hash_init(alg):
if alg == 'poly1305':
# Use a fixed random key here, to present Poly1305 as an unkeyed hash.
# This allows all the test cases for unkeyed hashes to work on Poly1305.
return Poly1305(rand_bytes(POLY1305_KEY_SIZE))
+ if alg == 'polyval':
+ return Polyval(rand_bytes(POLYVAL_BLOCK_SIZE))
return hashlib.new(alg)
def hash_update(ctx, data):
@@ -85,9 +116,9 @@ def print_c_struct_u8_array_field(name, value):
print('\t\t},')
def alg_digest_size_const(alg):
- if alg == 'blake2s':
- return 'BLAKE2S_HASH_SIZE'
- return f'{alg.upper()}_DIGEST_SIZE'
+ if alg.startswith('blake2'):
+ return f'{alg.upper()}_HASH_SIZE'
+ return f'{alg.upper().replace('-', '_')}_DIGEST_SIZE'
def gen_unkeyed_testvecs(alg):
print('')
@@ -111,6 +142,18 @@ def gen_unkeyed_testvecs(alg):
f'hash_testvec_consolidated[{alg_digest_size_const(alg)}]',
hash_final(ctx))
+def gen_additional_sha3_testvecs():
+ max_len = 4096
+ in_data = rand_bytes(max_len)
+ for alg in ['shake128', 'shake256']:
+ ctx = hashlib.new('sha3-256')
+ for in_len in range(max_len + 1):
+ out_len = (in_len * 293) % (max_len + 1)
+ out = hashlib.new(alg, data=in_data[:in_len]).digest(out_len)
+ ctx.update(out)
+ print_static_u8_array_definition(f'{alg}_testvec_consolidated[SHA3_256_DIGEST_SIZE]',
+ ctx.digest())
+
def gen_hmac_testvecs(alg):
ctx = hmac.new(rand_bytes(32), digestmod=alg)
data = rand_bytes(4096)
@@ -124,19 +167,22 @@ def gen_hmac_testvecs(alg):
f'hmac_testvec_consolidated[{alg.upper()}_DIGEST_SIZE]',
ctx.digest())
-BLAKE2S_KEY_SIZE = 32
-BLAKE2S_HASH_SIZE = 32
-
-def gen_additional_blake2s_testvecs():
+def gen_additional_blake2_testvecs(alg):
+ if alg == 'blake2s':
+ (max_key_size, max_hash_size) = (32, 32)
+ elif alg == 'blake2b':
+ (max_key_size, max_hash_size) = (64, 64)
+ else:
+ raise ValueError(f'Unsupported alg: {alg}')
hashes = b''
- for key_len in range(BLAKE2S_KEY_SIZE + 1):
- for out_len in range(1, BLAKE2S_HASH_SIZE + 1):
- h = hashlib.blake2s(digest_size=out_len, key=rand_bytes(key_len))
+ for key_len in range(max_key_size + 1):
+ for out_len in range(1, max_hash_size + 1):
+ h = hashlib.new(alg, digest_size=out_len, key=rand_bytes(key_len))
h.update(rand_bytes(100))
hashes += h.digest()
print_static_u8_array_definition(
- 'blake2s_keyed_testvec_consolidated[BLAKE2S_HASH_SIZE]',
- compute_hash('blake2s', hashes))
+ f'{alg}_keyed_testvec_consolidated[{alg_digest_size_const(alg)}]',
+ compute_hash(alg, hashes))
def gen_additional_poly1305_testvecs():
key = b'\xff' * POLY1305_KEY_SIZE
@@ -150,19 +196,40 @@ def gen_additional_poly1305_testvecs():
'poly1305_allones_macofmacs[POLY1305_DIGEST_SIZE]',
Poly1305(key).update(data).digest())
+def gen_additional_polyval_testvecs():
+ key = b'\xff' * POLYVAL_BLOCK_SIZE
+ hashes = b''
+ for data_len in range(0, 4097, 16):
+ hashes += Polyval(key).update(b'\xff' * data_len).digest()
+ print_static_u8_array_definition(
+ 'polyval_allones_hashofhashes[POLYVAL_DIGEST_SIZE]',
+ Polyval(key).update(hashes).digest())
+
if len(sys.argv) != 2:
sys.stderr.write('Usage: gen-hash-testvecs.py ALGORITHM\n')
- sys.stderr.write('ALGORITHM may be any supported by Python hashlib, or poly1305.\n')
+ sys.stderr.write('ALGORITHM may be any supported by Python hashlib; or poly1305, polyval, or sha3.\n')
sys.stderr.write('Example: gen-hash-testvecs.py sha512\n')
sys.exit(1)
alg = sys.argv[1]
print('/* SPDX-License-Identifier: GPL-2.0-or-later */')
print(f'/* This file was generated by: {sys.argv[0]} {" ".join(sys.argv[1:])} */')
-gen_unkeyed_testvecs(alg)
-if alg == 'blake2s':
- gen_additional_blake2s_testvecs()
+if alg.startswith('blake2'):
+ gen_unkeyed_testvecs(alg)
+ gen_additional_blake2_testvecs(alg)
elif alg == 'poly1305':
+ gen_unkeyed_testvecs(alg)
gen_additional_poly1305_testvecs()
+elif alg == 'polyval':
+ gen_unkeyed_testvecs(alg)
+ gen_additional_polyval_testvecs()
+elif alg == 'sha3':
+ print()
+ print('/* SHA3-256 test vectors */')
+ gen_unkeyed_testvecs('sha3-256')
+ print()
+ print('/* SHAKE test vectors */')
+ gen_additional_sha3_testvecs()
else:
+ gen_unkeyed_testvecs(alg)
gen_hmac_testvecs(alg)
diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check
deleted file mode 100755
index 408b1dbe7884..000000000000
--- a/scripts/documentation-file-ref-check
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-#
-# Treewide grep for references to files under Documentation, and report
-# non-existing files in stderr.
-
-use warnings;
-use strict;
-use Getopt::Long qw(:config no_auto_abbrev);
-
-# NOTE: only add things here when the file was gone, but the text wants
-# to mention a past documentation file, for example, to give credits for
-# the original work.
-my %false_positives = (
- "Documentation/scsi/scsi_mid_low_api.rst" => "Documentation/Configure.help",
- "drivers/vhost/vhost.c" => "Documentation/virtual/lguest/lguest.c",
-);
-
-my $scriptname = $0;
-$scriptname =~ s,.*/([^/]+/),$1,;
-
-# Parse arguments
-my $help = 0;
-my $fix = 0;
-my $warn = 0;
-
-if (! -e ".git") {
- printf "Warning: can't check if file exists, as this is not a git tree\n";
- exit 0;
-}
-
-GetOptions(
- 'fix' => \$fix,
- 'warn' => \$warn,
- 'h|help|usage' => \$help,
-);
-
-if ($help != 0) {
- print "$scriptname [--help] [--fix]\n";
- exit -1;
-}
-
-# Step 1: find broken references
-print "Finding broken references. This may take a while... " if ($fix);
-
-my %broken_ref;
-
-my $doc_fix = 0;
-
-open IN, "git grep ':doc:\`' Documentation/|"
- or die "Failed to run git grep";
-while (<IN>) {
- next if (!m,^([^:]+):.*\:doc\:\`([^\`]+)\`,);
- next if (m,sphinx/,);
-
- my $file = $1;
- my $d = $1;
- my $doc_ref = $2;
-
- my $f = $doc_ref;
-
- $d =~ s,(.*/).*,$1,;
- $f =~ s,.*\<([^\>]+)\>,$1,;
-
- if ($f =~ m,^/,) {
- $f = "$f.rst";
- $f =~ s,^/,Documentation/,;
- } else {
- $f = "$d$f.rst";
- }
-
- next if (grep -e, glob("$f"));
-
- if ($fix && !$doc_fix) {
- print STDERR "\nWARNING: Currently, can't fix broken :doc:`` fields\n";
- }
- $doc_fix++;
-
- print STDERR "$file: :doc:`$doc_ref`\n";
-}
-close IN;
-
-open IN, "git grep 'Documentation/'|"
- or die "Failed to run git grep";
-while (<IN>) {
- next if (!m/^([^:]+):(.*)/);
-
- my $f = $1;
- my $ln = $2;
-
- # On linux-next, discard the Next/ directory
- next if ($f =~ m,^Next/,);
-
- # Makefiles and scripts contain nasty expressions to parse docs
- next if ($f =~ m/Makefile/ || $f =~ m/\.(sh|py|pl|~|rej|org|orig)$/);
-
- # It doesn't make sense to parse hidden files
- next if ($f =~ m#/\.#);
-
- # Skip this script
- next if ($f eq $scriptname);
-
- # Ignore the dir where documentation will be built
- next if ($ln =~ m,\b(\S*)Documentation/output,);
-
- if ($ln =~ m,\b(\S*)(Documentation/[A-Za-z0-9\_\.\,\~/\*\[\]\?+-]*)(.*),) {
- my $prefix = $1;
- my $ref = $2;
- my $base = $2;
- my $extra = $3;
-
- # some file references are like:
- # /usr/src/linux/Documentation/DMA-{API,mapping}.txt
- # For now, ignore them
- next if ($extra =~ m/^{/);
-
- # Remove footnotes at the end like:
- # Documentation/devicetree/dt-object-internal.txt[1]
- $ref =~ s/(txt|rst)\[\d+]$/$1/;
-
- # Remove ending ']' without any '['
- $ref =~ s/\].*// if (!($ref =~ m/\[/));
-
- # Remove puntuation marks at the end
- $ref =~ s/[\,\.]+$//;
-
- my $fulref = "$prefix$ref";
-
- $fulref =~ s/^(\<file|ref)://;
- $fulref =~ s/^[\'\`]+//;
- $fulref =~ s,^\$\(.*\)/,,;
- $base =~ s,.*/,,;
-
- # Remove URL false-positives
- next if ($fulref =~ m/^http/);
-
- # Remove sched-pelt false-positive
- next if ($fulref =~ m,^Documentation/scheduler/sched-pelt$,);
-
- # Discard some build examples from Documentation/target/tcm_mod_builder.rst
- next if ($fulref =~ m,mnt/sdb/lio-core-2.6.git/Documentation/target,);
-
- # Check if exists, evaluating wildcards
- next if (grep -e, glob("$ref $fulref"));
-
- # Accept relative Documentation patches for tools/
- if ($f =~ m/tools/) {
- my $path = $f;
- $path =~ s,(.*)/.*,$1,;
- $path =~ s,testing/selftests/bpf,bpf/bpftool,;
- next if (grep -e, glob("$path/$ref $path/../$ref $path/$fulref"));
- }
-
- # Discard known false-positives
- if (defined($false_positives{$f})) {
- next if ($false_positives{$f} eq $fulref);
- }
-
- if ($fix) {
- if (!($ref =~ m/(scripts|Kconfig|Kbuild)/)) {
- $broken_ref{$ref}++;
- }
- } elsif ($warn) {
- print STDERR "Warning: $f references a file that doesn't exist: $fulref\n";
- } else {
- print STDERR "$f: $fulref\n";
- }
- }
-}
-close IN;
-
-exit 0 if (!$fix);
-
-# Step 2: Seek for file name alternatives
-print "Auto-fixing broken references. Please double-check the results\n";
-
-foreach my $ref (keys %broken_ref) {
- my $new =$ref;
-
- my $basedir = ".";
- # On translations, only seek inside the translations directory
- $basedir = $1 if ($ref =~ m,(Documentation/translations/[^/]+),);
-
- # get just the basename
- $new =~ s,.*/,,;
-
- my $f="";
-
- # usual reason for breakage: DT file moved around
- if ($ref =~ /devicetree/) {
- # usual reason for breakage: DT file renamed to .yaml
- if (!$f) {
- my $new_ref = $ref;
- $new_ref =~ s/\.txt$/.yaml/;
- $f=$new_ref if (-f $new_ref);
- }
-
- if (!$f) {
- my $search = $new;
- $search =~ s,^.*/,,;
- $f = qx(find Documentation/devicetree/ -iname "*$search*") if ($search);
- if (!$f) {
- # Manufacturer name may have changed
- $search =~ s/^.*,//;
- $f = qx(find Documentation/devicetree/ -iname "*$search*") if ($search);
- }
- }
- }
-
- # usual reason for breakage: file renamed to .rst
- if (!$f) {
- $new =~ s/\.txt$/.rst/;
- $f=qx(find $basedir -iname $new) if ($new);
- }
-
- # usual reason for breakage: use dash or underline
- if (!$f) {
- $new =~ s/[-_]/[-_]/g;
- $f=qx(find $basedir -iname $new) if ($new);
- }
-
- # Wild guess: seek for the same name on another place
- if (!$f) {
- $f = qx(find $basedir -iname $new) if ($new);
- }
-
- my @find = split /\s+/, $f;
-
- if (!$f) {
- print STDERR "ERROR: Didn't find a replacement for $ref\n";
- } elsif (scalar(@find) > 1) {
- print STDERR "WARNING: Won't auto-replace, as found multiple files close to $ref:\n";
- foreach my $j (@find) {
- $j =~ s,^./,,;
- print STDERR " $j\n";
- }
- } else {
- $f = $find[0];
- $f =~ s,^./,,;
- print "INFO: Replacing $ref to $f\n";
- foreach my $j (qx(git grep -l $ref)) {
- qx(sed "s\@$ref\@$f\@g" -i $j);
- }
- }
-}
diff --git a/scripts/find-unused-docs.sh b/scripts/find-unused-docs.sh
deleted file mode 100755
index d6d397fbf917..000000000000
--- a/scripts/find-unused-docs.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-# (c) 2017, Jonathan Corbet <corbet@lwn.net>
-# sayli karnik <karniksayli1995@gmail.com>
-#
-# This script detects files with kernel-doc comments for exported functions
-# that are not included in documentation.
-#
-# usage: Run 'scripts/find-unused-docs.sh directory' from top level of kernel
-# tree.
-#
-# example: $scripts/find-unused-docs.sh drivers/scsi
-#
-# Licensed under the terms of the GNU GPL License
-
-if ! [ -d "Documentation" ]; then
- echo "Run from top level of kernel tree"
- exit 1
-fi
-
-if [ "$#" -ne 1 ]; then
- echo "Usage: scripts/find-unused-docs.sh directory"
- exit 1
-fi
-
-if ! [ -d "$1" ]; then
- echo "Directory $1 doesn't exist"
- exit 1
-fi
-
-cd "$( dirname "${BASH_SOURCE[0]}" )"
-cd ..
-
-cd Documentation/
-
-echo "The following files contain kerneldoc comments for exported functions \
-that are not used in the formatted documentation"
-
-# FILES INCLUDED
-
-files_included=($(grep -rHR ".. kernel-doc" --include \*.rst | cut -d " " -f 3))
-
-declare -A FILES_INCLUDED
-
-for each in "${files_included[@]}"; do
- FILES_INCLUDED[$each]="$each"
- done
-
-cd ..
-
-# FILES NOT INCLUDED
-
-for file in `find $1 -name '*.c'`; do
-
- if [[ ${FILES_INCLUDED[$file]+_} ]]; then
- continue;
- fi
- str=$(PYTHONDONTWRITEBYTECODE=1 scripts/kernel-doc -export "$file" 2>/dev/null)
- if [[ -n "$str" ]]; then
- echo "$file"
- fi
- done
-
diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
index fc27f0cca752..147d0cc94068 100755
--- a/scripts/generate_rust_analyzer.py
+++ b/scripts/generate_rust_analyzer.py
@@ -15,7 +15,7 @@ def args_crates_cfgs(cfgs):
crates_cfgs = {}
for cfg in cfgs:
crate, vals = cfg.split("=", 1)
- crates_cfgs[crate] = vals.replace("--cfg", "").split()
+ crates_cfgs[crate] = vals.split()
return crates_cfgs
@@ -87,9 +87,30 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
)
append_crate(
+ "proc_macro2",
+ srctree / "rust" / "proc-macro2" / "lib.rs",
+ ["core", "alloc", "std", "proc_macro"],
+ cfg=crates_cfgs["proc_macro2"],
+ )
+
+ append_crate(
+ "quote",
+ srctree / "rust" / "quote" / "lib.rs",
+ ["alloc", "proc_macro", "proc_macro2"],
+ cfg=crates_cfgs["quote"],
+ )
+
+ append_crate(
+ "syn",
+ srctree / "rust" / "syn" / "lib.rs",
+ ["proc_macro", "proc_macro2", "quote"],
+ cfg=crates_cfgs["syn"],
+ )
+
+ append_crate(
"macros",
srctree / "rust" / "macros" / "lib.rs",
- ["std", "proc_macro"],
+ ["std", "proc_macro", "proc_macro2", "quote", "syn"],
is_proc_macro=True,
)
diff --git a/scripts/get_abi.py b/scripts/get_abi.py
deleted file mode 100755
index 7ce4748a46d2..000000000000
--- a/scripts/get_abi.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env python3
-# pylint: disable=R0903
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-# SPDX-License-Identifier: GPL-2.0
-
-"""
-Parse ABI documentation and produce results from it.
-"""
-
-import argparse
-import logging
-import os
-import sys
-
-# Import Python modules
-
-LIB_DIR = "lib/abi"
-SRC_DIR = os.path.dirname(os.path.realpath(__file__))
-
-sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
-
-from abi_parser import AbiParser # pylint: disable=C0413
-from abi_regex import AbiRegex # pylint: disable=C0413
-from helpers import ABI_DIR, DEBUG_HELP # pylint: disable=C0413
-from system_symbols import SystemSymbols # pylint: disable=C0413
-
-# Command line classes
-
-
-REST_DESC = """
-Produce output in ReST format.
-
-The output is done on two sections:
-
-- Symbols: show all parsed symbols in alphabetic order;
-- Files: cross reference the content of each file with the symbols on it.
-"""
-
-class AbiRest:
- """Initialize an argparse subparser for rest output"""
-
- def __init__(self, subparsers):
- """Initialize argparse subparsers"""
-
- parser = subparsers.add_parser("rest",
- formatter_class=argparse.RawTextHelpFormatter,
- description=REST_DESC)
-
- parser.add_argument("--enable-lineno", action="store_true",
- help="enable lineno")
- parser.add_argument("--raw", action="store_true",
- help="output text as contained in the ABI files. "
- "It not used, output will contain dynamically"
- " generated cross references when possible.")
- parser.add_argument("--no-file", action="store_true",
- help="Don't the files section")
- parser.add_argument("--show-hints", help="Show-hints")
-
- parser.set_defaults(func=self.run)
-
- def run(self, args):
- """Run subparser"""
-
- parser = AbiParser(args.dir, debug=args.debug)
- parser.parse_abi()
- parser.check_issues()
-
- for t in parser.doc(args.raw, not args.no_file):
- if args.enable_lineno:
- print (f".. LINENO {t[1]}#{t[2]}\n\n")
-
- print(t[0])
-
-class AbiValidate:
- """Initialize an argparse subparser for ABI validation"""
-
- def __init__(self, subparsers):
- """Initialize argparse subparsers"""
-
- parser = subparsers.add_parser("validate",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- description="list events")
-
- parser.set_defaults(func=self.run)
-
- def run(self, args):
- """Run subparser"""
-
- parser = AbiParser(args.dir, debug=args.debug)
- parser.parse_abi()
- parser.check_issues()
-
-
-class AbiSearch:
- """Initialize an argparse subparser for ABI search"""
-
- def __init__(self, subparsers):
- """Initialize argparse subparsers"""
-
- parser = subparsers.add_parser("search",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- description="Search ABI using a regular expression")
-
- parser.add_argument("expression",
- help="Case-insensitive search pattern for the ABI symbol")
-
- parser.set_defaults(func=self.run)
-
- def run(self, args):
- """Run subparser"""
-
- parser = AbiParser(args.dir, debug=args.debug)
- parser.parse_abi()
- parser.search_symbols(args.expression)
-
-UNDEFINED_DESC="""
-Check undefined ABIs on local machine.
-
-Read sysfs devnodes and check if the devnodes there are defined inside
-ABI documentation.
-
-The search logic tries to minimize the number of regular expressions to
-search per each symbol.
-
-By default, it runs on a single CPU, as Python support for CPU threads
-is still experimental, and multi-process runs on Python is very slow.
-
-On experimental tests, if the number of ABI symbols to search per devnode
-is contained on a limit of ~150 regular expressions, using a single CPU
-is a lot faster than using multiple processes. However, if the number of
-regular expressions to check is at the order of ~30000, using multiple
-CPUs speeds up the check.
-"""
-
-class AbiUndefined:
- """
- Initialize an argparse subparser for logic to check undefined ABI at
- the current machine's sysfs
- """
-
- def __init__(self, subparsers):
- """Initialize argparse subparsers"""
-
- parser = subparsers.add_parser("undefined",
- formatter_class=argparse.RawTextHelpFormatter,
- description=UNDEFINED_DESC)
-
- parser.add_argument("-S", "--sysfs-dir", default="/sys",
- help="directory where sysfs is mounted")
- parser.add_argument("-s", "--search-string",
- help="search string regular expression to limit symbol search")
- parser.add_argument("-H", "--show-hints", action="store_true",
- help="Hints about definitions for missing ABI symbols.")
- parser.add_argument("-j", "--jobs", "--max-workers", type=int, default=1,
- help="If bigger than one, enables multiprocessing.")
- parser.add_argument("-c", "--max-chunk-size", type=int, default=50,
- help="Maximum number of chunk size")
- parser.add_argument("-f", "--found", action="store_true",
- help="Also show found items. "
- "Helpful to debug the parser."),
- parser.add_argument("-d", "--dry-run", action="store_true",
- help="Don't actually search for undefined. "
- "Helpful to debug the parser."),
-
- parser.set_defaults(func=self.run)
-
- def run(self, args):
- """Run subparser"""
-
- abi = AbiRegex(args.dir, debug=args.debug,
- search_string=args.search_string)
-
- abi_symbols = SystemSymbols(abi=abi, hints=args.show_hints,
- sysfs=args.sysfs_dir)
-
- abi_symbols.check_undefined_symbols(dry_run=args.dry_run,
- found=args.found,
- max_workers=args.jobs,
- chunk_size=args.max_chunk_size)
-
-
-def main():
- """Main program"""
-
- parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
-
- parser.add_argument("-d", "--debug", type=int, default=0, help="debug level")
- parser.add_argument("-D", "--dir", default=ABI_DIR, help=DEBUG_HELP)
-
- subparsers = parser.add_subparsers()
-
- AbiRest(subparsers)
- AbiValidate(subparsers)
- AbiSearch(subparsers)
- AbiUndefined(subparsers)
-
- args = parser.parse_args()
-
- if args.debug:
- level = logging.DEBUG
- else:
- level = logging.INFO
-
- logging.basicConfig(level=level, format="[%(levelname)s] %(message)s")
-
- if "func" in args:
- args.func(args)
- else:
- sys.exit(f"Please specify a valid command for {sys.argv[0]}")
-
-
-# Call main method
-if __name__ == "__main__":
- main()
diff --git a/scripts/get_feat.pl b/scripts/get_feat.pl
deleted file mode 100755
index 40fb28c8424e..000000000000
--- a/scripts/get_feat.pl
+++ /dev/null
@@ -1,641 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-
-use strict;
-use Pod::Usage;
-use Getopt::Long;
-use File::Find;
-use Fcntl ':mode';
-use Cwd 'abs_path';
-
-my $help;
-my $man;
-my $debug;
-my $arch;
-my $feat;
-my $enable_fname;
-
-my $basename = abs_path($0);
-$basename =~ s,/[^/]+$,/,;
-
-my $prefix=$basename . "../Documentation/features";
-
-# Used only at for full features output. The script will auto-adjust
-# such values for the minimal possible values
-my $status_size = 1;
-my $description_size = 1;
-
-GetOptions(
- "debug|d+" => \$debug,
- "dir=s" => \$prefix,
- 'help|?' => \$help,
- 'arch=s' => \$arch,
- 'feat=s' => \$feat,
- 'feature=s' => \$feat,
- "enable-fname" => \$enable_fname,
- man => \$man
-) or pod2usage(2);
-
-pod2usage(1) if $help;
-pod2usage(-exitstatus => 0, -verbose => 2) if $man;
-
-pod2usage(1) if (scalar @ARGV < 1 || @ARGV > 2);
-
-my ($cmd, $arg) = @ARGV;
-
-pod2usage(2) if ($cmd ne "current" && $cmd ne "rest" && $cmd ne "validate"
- && $cmd ne "ls" && $cmd ne "list");
-
-require Data::Dumper if ($debug);
-
-my %data;
-my %archs;
-
-#
-# Displays an error message, printing file name and line
-#
-sub parse_error($$$$) {
- my ($file, $ln, $msg, $data) = @_;
-
- $data =~ s/\s+$/\n/;
-
- print STDERR "Warning: file $file#$ln:\n\t$msg";
-
- if ($data ne "") {
- print STDERR ". Line\n\t\t$data";
- } else {
- print STDERR "\n";
- }
-}
-
-#
-# Parse a features file, storing its contents at %data
-#
-
-my $h_name = "Feature";
-my $h_kconfig = "Kconfig";
-my $h_description = "Description";
-my $h_subsys = "Subsystem";
-my $h_status = "Status";
-my $h_arch = "Architecture";
-
-my $max_size_name = length($h_name);
-my $max_size_kconfig = length($h_kconfig);
-my $max_size_description = length($h_description);
-my $max_size_subsys = length($h_subsys);
-my $max_size_status = length($h_status);
-
-my $max_size_arch = 0;
-my $max_size_arch_with_header;
-my $max_description_word = 0;
-
-sub parse_feat {
- my $file = $File::Find::name;
-
- my $mode = (stat($file))[2];
- return if ($mode & S_IFDIR);
- return if ($file =~ m,($prefix)/arch-support.txt,);
- return if (!($file =~ m,arch-support.txt$,));
-
- if ($enable_fname) {
- printf ".. FILE %s\n", abs_path($file);
- }
-
- my $subsys = "";
- $subsys = $2 if ( m,.*($prefix)/([^/]+).*,);
-
- if (length($subsys) > $max_size_subsys) {
- $max_size_subsys = length($subsys);
- }
-
- my $name;
- my $kconfig;
- my $description;
- my $comments = "";
- my $last_status;
- my $ln;
- my %arch_table;
-
- print STDERR "Opening $file\n" if ($debug > 1);
- open IN, $file;
-
- while(<IN>) {
- $ln++;
-
- if (m/^\#\s+Feature\s+name:\s*(.*\S)/) {
- $name = $1;
- if (length($name) > $max_size_name) {
- $max_size_name = length($name);
- }
- next;
- }
- if (m/^\#\s+Kconfig:\s*(.*\S)/) {
- $kconfig = $1;
- if (length($kconfig) > $max_size_kconfig) {
- $max_size_kconfig = length($kconfig);
- }
- next;
- }
- if (m/^\#\s+description:\s*(.*\S)/) {
- $description = $1;
- if (length($description) > $max_size_description) {
- $max_size_description = length($description);
- }
-
- foreach my $word (split /\s+/, $description) {
- if (length($word) > $max_description_word) {
- $max_description_word = length($word);
- }
- }
-
- next;
- }
- next if (m/^\\s*$/);
- next if (m/^\s*\-+\s*$/);
- next if (m/^\s*\|\s*arch\s*\|\s*status\s*\|\s*$/);
-
- if (m/^\#\s*(.*)/) {
- $comments .= "$1\n";
- next;
- }
- if (m/^\s*\|\s*(\S+):\s*\|\s*(\S+)\s*\|\s*$/) {
- my $a = $1;
- my $status = $2;
-
- if (length($status) > $max_size_status) {
- $max_size_status = length($status);
- }
- if (length($a) > $max_size_arch) {
- $max_size_arch = length($a);
- }
-
- $status = "---" if ($status =~ m/^\.\.$/);
-
- $archs{$a} = 1;
- $arch_table{$a} = $status;
- next;
- }
-
- #Everything else is an error
- parse_error($file, $ln, "line is invalid", $_);
- }
- close IN;
-
- if (!$name) {
- parse_error($file, $ln, "Feature name not found", "");
- return;
- }
-
- parse_error($file, $ln, "Subsystem not found", "") if (!$subsys);
- parse_error($file, $ln, "Kconfig not found", "") if (!$kconfig);
- parse_error($file, $ln, "Description not found", "") if (!$description);
-
- if (!%arch_table) {
- parse_error($file, $ln, "Architecture table not found", "");
- return;
- }
-
- $data{$name}->{where} = $file;
- $data{$name}->{subsys} = $subsys;
- $data{$name}->{kconfig} = $kconfig;
- $data{$name}->{description} = $description;
- $data{$name}->{comments} = $comments;
- $data{$name}->{table} = \%arch_table;
-
- $max_size_arch_with_header = $max_size_arch + length($h_arch);
-}
-
-#
-# Output feature(s) for a given architecture
-#
-sub output_arch_table {
- my $title = "Feature status on $arch architecture";
-
- print "=" x length($title) . "\n";
- print "$title\n";
- print "=" x length($title) . "\n\n";
-
- print "=" x $max_size_subsys;
- print " ";
- print "=" x $max_size_name;
- print " ";
- print "=" x $max_size_kconfig;
- print " ";
- print "=" x $max_size_status;
- print " ";
- print "=" x $max_size_description;
- print "\n";
- printf "%-${max_size_subsys}s ", $h_subsys;
- printf "%-${max_size_name}s ", $h_name;
- printf "%-${max_size_kconfig}s ", $h_kconfig;
- printf "%-${max_size_status}s ", $h_status;
- printf "%-${max_size_description}s\n", $h_description;
- print "=" x $max_size_subsys;
- print " ";
- print "=" x $max_size_name;
- print " ";
- print "=" x $max_size_kconfig;
- print " ";
- print "=" x $max_size_status;
- print " ";
- print "=" x $max_size_description;
- print "\n";
-
- foreach my $name (sort {
- ($data{$a}->{subsys} cmp $data{$b}->{subsys}) ||
- ("\L$a" cmp "\L$b")
- } keys %data) {
- next if ($feat && $name ne $feat);
-
- my %arch_table = %{$data{$name}->{table}};
- printf "%-${max_size_subsys}s ", $data{$name}->{subsys};
- printf "%-${max_size_name}s ", $name;
- printf "%-${max_size_kconfig}s ", $data{$name}->{kconfig};
- printf "%-${max_size_status}s ", $arch_table{$arch};
- printf "%-s\n", $data{$name}->{description};
- }
-
- print "=" x $max_size_subsys;
- print " ";
- print "=" x $max_size_name;
- print " ";
- print "=" x $max_size_kconfig;
- print " ";
- print "=" x $max_size_status;
- print " ";
- print "=" x $max_size_description;
- print "\n";
-}
-
-#
-# list feature(s) for a given architecture
-#
-sub list_arch_features {
- print "#\n# Kernel feature support matrix of the '$arch' architecture:\n#\n";
-
- foreach my $name (sort {
- ($data{$a}->{subsys} cmp $data{$b}->{subsys}) ||
- ("\L$a" cmp "\L$b")
- } keys %data) {
- next if ($feat && $name ne $feat);
-
- my %arch_table = %{$data{$name}->{table}};
-
- my $status = $arch_table{$arch};
- $status = " " x ((4 - length($status)) / 2) . $status;
-
- printf " %${max_size_subsys}s/ ", $data{$name}->{subsys};
- printf "%-${max_size_name}s: ", $name;
- printf "%-5s| ", $status;
- printf "%${max_size_kconfig}s # ", $data{$name}->{kconfig};
- printf " %s\n", $data{$name}->{description};
- }
-}
-
-#
-# Output a feature on all architectures
-#
-sub output_feature {
- my $title = "Feature $feat";
-
- print "=" x length($title) . "\n";
- print "$title\n";
- print "=" x length($title) . "\n\n";
-
- print ":Subsystem: $data{$feat}->{subsys} \n" if ($data{$feat}->{subsys});
- print ":Kconfig: $data{$feat}->{kconfig} \n" if ($data{$feat}->{kconfig});
-
- my $desc = $data{$feat}->{description};
- $desc =~ s/^([a-z])/\U$1/;
- $desc =~ s/\.?\s*//;
- print "\n$desc.\n\n";
-
- my $com = $data{$feat}->{comments};
- $com =~ s/^\s+//;
- $com =~ s/\s+$//;
- if ($com) {
- print "Comments\n";
- print "--------\n\n";
- print "$com\n\n";
- }
-
- print "=" x $max_size_arch_with_header;
- print " ";
- print "=" x $max_size_status;
- print "\n";
-
- printf "%-${max_size_arch}s ", $h_arch;
- printf "%-${max_size_status}s", $h_status . "\n";
-
- print "=" x $max_size_arch_with_header;
- print " ";
- print "=" x $max_size_status;
- print "\n";
-
- my %arch_table = %{$data{$feat}->{table}};
- foreach my $arch (sort keys %arch_table) {
- printf "%-${max_size_arch}s ", $arch;
- printf "%-${max_size_status}s\n", $arch_table{$arch};
- }
-
- print "=" x $max_size_arch_with_header;
- print " ";
- print "=" x $max_size_status;
- print "\n";
-}
-
-#
-# Output all features for all architectures
-#
-
-sub matrix_lines($$$) {
- my $desc_size = shift;
- my $status_size = shift;
- my $header = shift;
- my $fill;
- my $ln_marker;
-
- if ($header) {
- $ln_marker = "=";
- } else {
- $ln_marker = "-";
- }
-
- $fill = $ln_marker;
-
- print "+";
- print $fill x $max_size_name;
- print "+";
- print $fill x $desc_size;
- print "+";
- print $ln_marker x $status_size;
- print "+\n";
-}
-
-sub output_matrix {
- my $title = "Feature status on all architectures";
- my $notcompat = "Not compatible";
-
- print "=" x length($title) . "\n";
- print "$title\n";
- print "=" x length($title) . "\n\n";
-
- my $desc_title = "$h_kconfig / $h_description";
-
- my $desc_size = $max_size_kconfig + 4;
- if (!$description_size) {
- $desc_size = $max_size_description if ($max_size_description > $desc_size);
- } else {
- $desc_size = $description_size if ($description_size > $desc_size);
- }
- $desc_size = $max_description_word if ($max_description_word > $desc_size);
-
- $desc_size = length($desc_title) if (length($desc_title) > $desc_size);
-
- $max_size_status = length($notcompat) if (length($notcompat) > $max_size_status);
-
- # Ensure that the status will fit
- my $min_status_size = $max_size_status + $max_size_arch + 6;
- $status_size = $min_status_size if ($status_size < $min_status_size);
-
-
- my $cur_subsys = "";
- foreach my $name (sort {
- ($data{$a}->{subsys} cmp $data{$b}->{subsys}) or
- ("\L$a" cmp "\L$b")
- } keys %data) {
-
- if ($cur_subsys ne $data{$name}->{subsys}) {
- if ($cur_subsys ne "") {
- printf "\n";
- }
-
- $cur_subsys = $data{$name}->{subsys};
-
- my $title = "Subsystem: $cur_subsys";
- print "$title\n";
- print "=" x length($title) . "\n\n";
-
-
- matrix_lines($desc_size, $status_size, 0);
-
- printf "|%-${max_size_name}s", $h_name;
- printf "|%-${desc_size}s", $desc_title;
-
- printf "|%-${status_size}s|\n", "Status per architecture";
- matrix_lines($desc_size, $status_size, 1);
- }
-
- my %arch_table = %{$data{$name}->{table}};
- my $cur_status = "";
-
- my (@lines, @descs);
- my $line = "";
- foreach my $arch (sort {
- ($arch_table{$b} cmp $arch_table{$a}) or
- ("\L$a" cmp "\L$b")
- } keys %arch_table) {
-
- my $status = $arch_table{$arch};
-
- if ($status eq "---") {
- $status = $notcompat;
- }
-
- if ($status ne $cur_status) {
- if ($line ne "") {
- push @lines, $line;
- $line = "";
- }
- $line = "- **" . $status . "**: " . $arch;
- } elsif (length($line) + length ($arch) + 2 < $status_size) {
- $line .= ", " . $arch;
- } else {
- push @lines, $line;
- $line = " " . $arch;
- }
- $cur_status = $status;
- }
- push @lines, $line if ($line ne "");
-
- my $description = $data{$name}->{description};
- while (length($description) > $desc_size) {
- my $d = substr $description, 0, $desc_size;
-
- # Ensure that it will end on a space
- # if it can't, it means that the size is too small
- # Instead of aborting it, let's print what we have
- if (!($d =~ s/^(.*)\s+.*/$1/)) {
- $d = substr $d, 0, -1;
- push @descs, "$d\\";
- $description =~ s/^\Q$d\E//;
- } else {
- push @descs, $d;
- $description =~ s/^\Q$d\E\s+//;
- }
- }
- push @descs, $description;
-
- # Ensure that the full description will be printed
- push @lines, "" while (scalar(@lines) < 2 + scalar(@descs));
-
- my $ln = 0;
- for my $line(@lines) {
- if (!$ln) {
- printf "|%-${max_size_name}s", $name;
- printf "|%-${desc_size}s", "``" . $data{$name}->{kconfig} . "``";
- } elsif ($ln >= 2 && scalar(@descs)) {
- printf "|%-${max_size_name}s", "";
- printf "|%-${desc_size}s", shift @descs;
- } else {
- printf "|%-${max_size_name}s", "";
- printf "|%-${desc_size}s", "";
- }
-
- printf "|%-${status_size}s|\n", $line;
-
- $ln++;
- }
- matrix_lines($desc_size, $status_size, 0);
- }
-}
-
-
-#
-# Parses all feature files located at $prefix dir
-#
-find({wanted =>\&parse_feat, no_chdir => 1}, $prefix);
-
-print STDERR Data::Dumper->Dump([\%data], [qw(*data)]) if ($debug);
-
-#
-# Handles the command
-#
-if ($cmd eq "current") {
- $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/');
- $arch =~s/\s+$//;
-}
-
-if ($cmd eq "ls" or $cmd eq "list") {
- if (!$arch) {
- $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/');
- $arch =~s/\s+$//;
- }
-
- list_arch_features;
-
- exit;
-}
-
-if ($cmd ne "validate") {
- if ($arch) {
- output_arch_table;
- } elsif ($feat) {
- output_feature;
- } else {
- output_matrix;
- }
-}
-
-__END__
-
-=head1 NAME
-
-get_feat.pl - parse the Linux Feature files and produce a ReST book.
-
-=head1 SYNOPSIS
-
-B<get_feat.pl> [--debug] [--man] [--help] [--dir=<dir>] [--arch=<arch>]
- [--feature=<feature>|--feat=<feature>] <COMAND> [<ARGUMENT>]
-
-Where <COMMAND> can be:
-
-=over 8
-
-B<current> - output table in ReST compatible ASCII format
- with features for this machine's architecture
-
-B<rest> - output table(s) in ReST compatible ASCII format
- with features in ReST markup language. The output
- is affected by --arch or --feat/--feature flags.
-
-B<validate> - validate the contents of the files under
- Documentation/features.
-
-B<ls> or B<list> - list features for this machine's architecture,
- using an easier to parse format.
- The output is affected by --arch flag.
-
-=back
-
-=head1 OPTIONS
-
-=over 8
-
-=item B<--arch>
-
-Output features for an specific architecture, optionally filtering for
-a single specific feature.
-
-=item B<--feat> or B<--feature>
-
-Output features for a single specific feature.
-
-=item B<--dir>
-
-Changes the location of the Feature files. By default, it uses
-the Documentation/features directory.
-
-=item B<--enable-fname>
-
-Prints the file name of the feature files. This can be used in order to
-track dependencies during documentation build.
-
-=item B<--debug>
-
-Put the script in verbose mode, useful for debugging. Can be called multiple
-times, to increase verbosity.
-
-=item B<--help>
-
-Prints a brief help message and exits.
-
-=item B<--man>
-
-Prints the manual page and exits.
-
-=back
-
-=head1 DESCRIPTION
-
-Parse the Linux feature files from Documentation/features (by default),
-optionally producing results at ReST format.
-
-It supports output data per architecture, per feature or a
-feature x arch matrix.
-
-When used with B<rest> command, it will use either one of the tree formats:
-
-If neither B<--arch> or B<--feature> arguments are used, it will output a
-matrix with features per architecture.
-
-If B<--arch> argument is used, it will output the features availability for
-a given architecture.
-
-If B<--feat> argument is used, it will output the content of the feature
-file using ReStructured Text markup.
-
-=head1 BUGS
-
-Report bugs to Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
-
-=head1 COPYRIGHT
-
-Copyright (c) 2019 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
-
-License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
-
-This is free software: you are free to change and redistribute it.
-There is NO WARRANTY, to the extent permitted by law.
-
-=cut
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 4c20c62c4faf..0e4e939efc94 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -70,8 +70,6 @@ configs=$(sed -e '
#
# The format is <file-name>:<CONFIG-option> in each line.
config_leak_ignores="
-arch/arc/include/uapi/asm/page.h:CONFIG_ARC_PAGE_SIZE_16K
-arch/arc/include/uapi/asm/page.h:CONFIG_ARC_PAGE_SIZE_4K
arch/arc/include/uapi/asm/swab.h:CONFIG_ARC_HAS_SWAPE
arch/arm/include/uapi/asm/ptrace.h:CONFIG_CPU_ENDIAN_BE8
arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_NO
diff --git a/scripts/jobserver-exec b/scripts/jobserver-exec
index 7eca035472d3..758e947a6fb9 100755
--- a/scripts/jobserver-exec
+++ b/scripts/jobserver-exec
@@ -1,77 +1,35 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
-#
-# This determines how many parallel tasks "make" is expecting, as it is
-# not exposed via an special variables, reserves them all, runs a subprocess
-# with PARALLELISM environment variable set, and releases the jobs back again.
-#
-# https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html#POSIX-Jobserver
-from __future__ import print_function
-import os, sys, errno
-import subprocess
-# Extract and prepare jobserver file descriptors from environment.
-claim = 0
-jobs = b""
-try:
- # Fetch the make environment options.
- flags = os.environ['MAKEFLAGS']
+"""
+Determines how many parallel tasks "make" is expecting, as it is
+not exposed via any special variables, reserves them all, runs a subprocess
+with PARALLELISM environment variable set, and releases the jobs back again.
- # Look for "--jobserver=R,W"
- # Note that GNU Make has used --jobserver-fds and --jobserver-auth
- # so this handles all of them.
- opts = [x for x in flags.split(" ") if x.startswith("--jobserver")]
+See:
+ https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html#POSIX-Jobserver
+"""
- # Parse out R,W file descriptor numbers and set them nonblocking.
- # If the MAKEFLAGS variable contains multiple instances of the
- # --jobserver-auth= option, the last one is relevant.
- fds = opts[-1].split("=", 1)[1]
+import os
+import sys
- # Starting with GNU Make 4.4, named pipes are used for reader and writer.
- # Example argument: --jobserver-auth=fifo:/tmp/GMfifo8134
- _, _, path = fds.partition('fifo:')
+LIB_DIR = "../tools/lib/python"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
- if path:
- reader = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
- writer = os.open(path, os.O_WRONLY)
- else:
- reader, writer = [int(x) for x in fds.split(",", 1)]
- # Open a private copy of reader to avoid setting nonblocking
- # on an unexpecting process with the same reader fd.
- reader = os.open("/proc/self/fd/%d" % (reader),
- os.O_RDONLY | os.O_NONBLOCK)
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
- # Read out as many jobserver slots as possible.
- while True:
- try:
- slot = os.read(reader, 8)
- jobs += slot
- except (OSError, IOError) as e:
- if e.errno == errno.EWOULDBLOCK:
- # Stop at the end of the jobserver queue.
- break
- # If something went wrong, give back the jobs.
- if len(jobs):
- os.write(writer, jobs)
- raise e
- # Add a bump for our caller's reserveration, since we're just going
- # to sit here blocked on our child.
- claim = len(jobs) + 1
-except (KeyError, IndexError, ValueError, OSError, IOError) as e:
- # Any missing environment strings or bad fds should result in just
- # not being parallel.
- pass
+from jobserver import JobserverExec # pylint: disable=C0415
-# We can only claim parallelism if there was a jobserver (i.e. a top-level
-# "-jN" argument) and there were no other failures. Otherwise leave out the
-# environment variable and let the child figure out what is best.
-if claim > 0:
- os.environ['PARALLELISM'] = '%d' % (claim)
-rc = subprocess.call(sys.argv[1:])
+def main():
+ """Main program"""
+ if len(sys.argv) < 2:
+ name = os.path.basename(__file__)
+ sys.exit("usage: " + name +" command [args ...]\n" + __doc__)
-# Return all the reserved slots.
-if len(jobs):
- os.write(writer, jobs)
+ with JobserverExec() as jobserver:
+ jobserver.run(sys.argv[1:])
-sys.exit(rc)
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/kernel-doc.pl b/scripts/kernel-doc.pl
deleted file mode 100755
index 5db23cbf4eb2..000000000000
--- a/scripts/kernel-doc.pl
+++ /dev/null
@@ -1,2439 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-# vim: softtabstop=4
-
-use warnings;
-use strict;
-
-## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ##
-## Copyright (C) 2000, 1 Tim Waugh <twaugh@redhat.com> ##
-## Copyright (C) 2001 Simon Huggins ##
-## Copyright (C) 2005-2012 Randy Dunlap ##
-## Copyright (C) 2012 Dan Luedtke ##
-## ##
-## #define enhancements by Armin Kuster <akuster@mvista.com> ##
-## Copyright (c) 2000 MontaVista Software, Inc. ##
-#
-# Copyright (C) 2022 Tomasz Warniełło (POD)
-
-use Pod::Usage qw/pod2usage/;
-
-=head1 NAME
-
-kernel-doc - Print formatted kernel documentation to stdout
-
-=head1 SYNOPSIS
-
- kernel-doc [-h] [-v] [-Werror] [-Wall] [-Wreturn] [-Wshort-desc[ription]] [-Wcontents-before-sections]
- [ -man |
- -rst [-enable-lineno] |
- -none
- ]
- [
- -export |
- -internal |
- [-function NAME] ... |
- [-nosymbol NAME] ...
- ]
- [-no-doc-sections]
- [-export-file FILE] ...
- FILE ...
-
-Run `kernel-doc -h` for details.
-
-=head1 DESCRIPTION
-
-Read C language source or header FILEs, extract embedded documentation comments,
-and print formatted documentation to standard output.
-
-The documentation comments are identified by the "/**" opening comment mark.
-
-See Documentation/doc-guide/kernel-doc.rst for the documentation comment syntax.
-
-=cut
-
-# more perldoc at the end of the file
-
-## init lots of data
-
-my $errors = 0;
-my $warnings = 0;
-my $anon_struct_union = 0;
-
-# match expressions used to find embedded type information
-my $type_constant = '\b``([^\`]+)``\b';
-my $type_constant2 = '\%([-_*\w]+)';
-my $type_func = '(\w+)\(\)';
-my $type_param = '\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)';
-my $type_param_ref = '([\!~\*]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)';
-my $type_fp_param = '\@(\w+)\(\)'; # Special RST handling for func ptr params
-my $type_fp_param2 = '\@(\w+->\S+)\(\)'; # Special RST handling for structs with func ptr params
-my $type_env = '(\$\w+)';
-my $type_enum = '\&(enum\s*([_\w]+))';
-my $type_struct = '\&(struct\s*([_\w]+))';
-my $type_typedef = '\&(typedef\s*([_\w]+))';
-my $type_union = '\&(union\s*([_\w]+))';
-my $type_member = '\&([_\w]+)(\.|->)([_\w]+)';
-my $type_fallback = '\&([_\w]+)';
-my $type_member_func = $type_member . '\(\)';
-
-# Output conversion substitutions.
-# One for each output format
-
-# these are pretty rough
-my @highlights_man = (
- [$type_constant, "\$1"],
- [$type_constant2, "\$1"],
- [$type_func, "\\\\fB\$1\\\\fP"],
- [$type_enum, "\\\\fI\$1\\\\fP"],
- [$type_struct, "\\\\fI\$1\\\\fP"],
- [$type_typedef, "\\\\fI\$1\\\\fP"],
- [$type_union, "\\\\fI\$1\\\\fP"],
- [$type_param, "\\\\fI\$1\\\\fP"],
- [$type_param_ref, "\\\\fI\$1\$2\\\\fP"],
- [$type_member, "\\\\fI\$1\$2\$3\\\\fP"],
- [$type_fallback, "\\\\fI\$1\\\\fP"]
- );
-my $blankline_man = "";
-
-# rst-mode
-my @highlights_rst = (
- [$type_constant, "``\$1``"],
- [$type_constant2, "``\$1``"],
-
- # Note: need to escape () to avoid func matching later
- [$type_member_func, "\\:c\\:type\\:`\$1\$2\$3\\\\(\\\\) <\$1>`"],
- [$type_member, "\\:c\\:type\\:`\$1\$2\$3 <\$1>`"],
- [$type_fp_param, "**\$1\\\\(\\\\)**"],
- [$type_fp_param2, "**\$1\\\\(\\\\)**"],
- [$type_func, "\$1()"],
- [$type_enum, "\\:c\\:type\\:`\$1 <\$2>`"],
- [$type_struct, "\\:c\\:type\\:`\$1 <\$2>`"],
- [$type_typedef, "\\:c\\:type\\:`\$1 <\$2>`"],
- [$type_union, "\\:c\\:type\\:`\$1 <\$2>`"],
-
- # in rst this can refer to any type
- [$type_fallback, "\\:c\\:type\\:`\$1`"],
- [$type_param_ref, "**\$1\$2**"]
- );
-my $blankline_rst = "\n";
-
-# read arguments
-if ($#ARGV == -1) {
- pod2usage(
- -message => "No arguments!\n",
- -exitval => 1,
- -verbose => 99,
- -sections => 'SYNOPSIS',
- -output => \*STDERR,
- );
-}
-
-my $kernelversion;
-
-my $dohighlight = "";
-
-my $verbose = 0;
-my $Werror = 0;
-my $Wreturn = 0;
-my $Wshort_desc = 0;
-my $output_mode = "rst";
-my $output_preformatted = 0;
-my $no_doc_sections = 0;
-my $enable_lineno = 0;
-my @highlights = @highlights_rst;
-my $blankline = $blankline_rst;
-my $modulename = "Kernel API";
-
-use constant {
- OUTPUT_ALL => 0, # output all symbols and doc sections
- OUTPUT_INCLUDE => 1, # output only specified symbols
- OUTPUT_EXPORTED => 2, # output exported symbols
- OUTPUT_INTERNAL => 3, # output non-exported symbols
-};
-my $output_selection = OUTPUT_ALL;
-my $show_not_found = 0; # No longer used
-
-my @export_file_list;
-
-my @build_time;
-if (defined($ENV{'KBUILD_BUILD_TIMESTAMP'}) &&
- (my $seconds = `date -d "${ENV{'KBUILD_BUILD_TIMESTAMP'}}" +%s`) ne '') {
- @build_time = gmtime($seconds);
-} else {
- @build_time = localtime;
-}
-
-my $man_date = ('January', 'February', 'March', 'April', 'May', 'June',
- 'July', 'August', 'September', 'October',
- 'November', 'December')[$build_time[4]] .
- " " . ($build_time[5]+1900);
-
-# Essentially these are globals.
-# They probably want to be tidied up, made more localised or something.
-# CAVEAT EMPTOR! Some of the others I localised may not want to be, which
-# could cause "use of undefined value" or other bugs.
-my ($function, %function_table, %parametertypes, $declaration_purpose);
-my %nosymbol_table = ();
-my $declaration_start_line;
-my ($type, $declaration_name, $return_type);
-my ($newsection, $newcontents, $prototype, $brcount);
-
-if (defined($ENV{'KBUILD_VERBOSE'}) && $ENV{'KBUILD_VERBOSE'} =~ '1') {
- $verbose = 1;
-}
-
-if (defined($ENV{'KCFLAGS'})) {
- my $kcflags = "$ENV{'KCFLAGS'}";
-
- if ($kcflags =~ /(\s|^)-Werror(\s|$)/) {
- $Werror = 1;
- }
-}
-
-# reading this variable is for backwards compat just in case
-# someone was calling it with the variable from outside the
-# kernel's build system
-if (defined($ENV{'KDOC_WERROR'})) {
- $Werror = "$ENV{'KDOC_WERROR'}";
-}
-# other environment variables are converted to command-line
-# arguments in cmd_checkdoc in the build system
-
-# Generated docbook code is inserted in a template at a point where
-# docbook v3.1 requires a non-zero sequence of RefEntry's; see:
-# https://www.oasis-open.org/docbook/documentation/reference/html/refentry.html
-# We keep track of number of generated entries and generate a dummy
-# if needs be to ensure the expanded template can be postprocessed
-# into html.
-my $section_counter = 0;
-
-my $lineprefix="";
-
-# Parser states
-use constant {
- STATE_NORMAL => 0, # normal code
- STATE_NAME => 1, # looking for function name
- STATE_BODY_MAYBE => 2, # body - or maybe more description
- STATE_BODY => 3, # the body of the comment
- STATE_BODY_WITH_BLANK_LINE => 4, # the body, which has a blank line
- STATE_PROTO => 5, # scanning prototype
- STATE_DOCBLOCK => 6, # documentation block
- STATE_INLINE => 7, # gathering doc outside main block
-};
-my $state;
-my $leading_space;
-
-# Inline documentation state
-use constant {
- STATE_INLINE_NA => 0, # not applicable ($state != STATE_INLINE)
- STATE_INLINE_NAME => 1, # looking for member name (@foo:)
- STATE_INLINE_TEXT => 2, # looking for member documentation
- STATE_INLINE_END => 3, # done
- STATE_INLINE_ERROR => 4, # error - Comment without header was found.
- # Spit a warning as it's not
- # proper kernel-doc and ignore the rest.
-};
-my $inline_doc_state;
-
-#declaration types: can be
-# 'function', 'struct', 'union', 'enum', 'typedef'
-my $decl_type;
-
-# Name of the kernel-doc identifier for non-DOC markups
-my $identifier;
-
-my $doc_start = '^/\*\*\s*$'; # Allow whitespace at end of comment start.
-my $doc_end = '\*/';
-my $doc_com = '\s*\*\s*';
-my $doc_com_body = '\s*\* ?';
-my $doc_decl = $doc_com . '(\w+)';
-# @params and a strictly limited set of supported section names
-# Specifically:
-# Match @word:
-# @...:
-# @{section-name}:
-# while trying to not match literal block starts like "example::"
-#
-my $doc_sect = $doc_com .
- '\s*(\@[.\w]+|\@\.\.\.|description|context|returns?|notes?|examples?)\s*:([^:].*)?$';
-my $doc_content = $doc_com_body . '(.*)';
-my $doc_block = $doc_com . 'DOC:\s*(.*)?';
-my $doc_inline_start = '^\s*/\*\*\s*$';
-my $doc_inline_sect = '\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)';
-my $doc_inline_end = '^\s*\*/\s*$';
-my $doc_inline_oneline = '^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$';
-my $export_symbol = '^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*;';
-my $export_symbol_ns = '^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+"\)\s*;';
-my $function_pointer = qr{([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)};
-my $attribute = qr{__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)}i;
-
-my %parameterdescs;
-my %parameterdesc_start_lines;
-my @parameterlist;
-my %sections;
-my @sectionlist;
-my %section_start_lines;
-my $sectcheck;
-my $struct_actual;
-
-my $contents = "";
-my $new_start_line = 0;
-
-# the canonical section names. see also $doc_sect above.
-my $section_default = "Description"; # default section
-my $section_intro = "Introduction";
-my $section = $section_default;
-my $section_context = "Context";
-my $section_return = "Return";
-
-my $undescribed = "-- undescribed --";
-
-reset_state();
-
-while ($ARGV[0] =~ m/^--?(.*)/) {
- my $cmd = $1;
- shift @ARGV;
- if ($cmd eq "man") {
- $output_mode = "man";
- @highlights = @highlights_man;
- $blankline = $blankline_man;
- } elsif ($cmd eq "rst") {
- $output_mode = "rst";
- @highlights = @highlights_rst;
- $blankline = $blankline_rst;
- } elsif ($cmd eq "none") {
- $output_mode = "none";
- } elsif ($cmd eq "module") { # not needed for XML, inherits from calling document
- $modulename = shift @ARGV;
- } elsif ($cmd eq "function") { # to only output specific functions
- $output_selection = OUTPUT_INCLUDE;
- $function = shift @ARGV;
- $function_table{$function} = 1;
- } elsif ($cmd eq "nosymbol") { # Exclude specific symbols
- my $symbol = shift @ARGV;
- $nosymbol_table{$symbol} = 1;
- } elsif ($cmd eq "export") { # only exported symbols
- $output_selection = OUTPUT_EXPORTED;
- %function_table = ();
- } elsif ($cmd eq "internal") { # only non-exported symbols
- $output_selection = OUTPUT_INTERNAL;
- %function_table = ();
- } elsif ($cmd eq "export-file") {
- my $file = shift @ARGV;
- push(@export_file_list, $file);
- } elsif ($cmd eq "v") {
- $verbose = 1;
- } elsif ($cmd eq "Werror") {
- $Werror = 1;
- } elsif ($cmd eq "Wreturn") {
- $Wreturn = 1;
- } elsif ($cmd eq "Wshort-desc" or $cmd eq "Wshort-description") {
- $Wshort_desc = 1;
- } elsif ($cmd eq "Wall") {
- $Wreturn = 1;
- $Wshort_desc = 1;
- } elsif (($cmd eq "h") || ($cmd eq "help")) {
- pod2usage(-exitval => 0, -verbose => 2);
- } elsif ($cmd eq 'no-doc-sections') {
- $no_doc_sections = 1;
- } elsif ($cmd eq 'enable-lineno') {
- $enable_lineno = 1;
- } elsif ($cmd eq 'show-not-found') {
- $show_not_found = 1; # A no-op but don't fail
- } else {
- # Unknown argument
- pod2usage(
- -message => "Argument unknown!\n",
- -exitval => 1,
- -verbose => 99,
- -sections => 'SYNOPSIS',
- -output => \*STDERR,
- );
- }
- if ($#ARGV < 0){
- pod2usage(
- -message => "FILE argument missing\n",
- -exitval => 1,
- -verbose => 99,
- -sections => 'SYNOPSIS',
- -output => \*STDERR,
- );
- }
-}
-
-# continue execution near EOF;
-
-sub findprog($)
-{
- foreach(split(/:/, $ENV{PATH})) {
- return "$_/$_[0]" if(-x "$_/$_[0]");
- }
-}
-
-# get kernel version from env
-sub get_kernel_version() {
- my $version = 'unknown kernel version';
-
- if (defined($ENV{'KERNELVERSION'})) {
- $version = $ENV{'KERNELVERSION'};
- }
- return $version;
-}
-
-#
-sub print_lineno {
- my $lineno = shift;
- if ($enable_lineno && defined($lineno)) {
- print ".. LINENO " . $lineno . "\n";
- }
-}
-
-sub emit_warning {
- my $location = shift;
- my $msg = shift;
- print STDERR "$location: warning: $msg";
- ++$warnings;
-}
-##
-# dumps section contents to arrays/hashes intended for that purpose.
-#
-sub dump_section {
- my $file = shift;
- my $name = shift;
- my $contents = join "\n", @_;
-
- if ($name =~ m/$type_param/) {
- $name = $1;
- $parameterdescs{$name} = $contents;
- $sectcheck = $sectcheck . $name . " ";
- $parameterdesc_start_lines{$name} = $new_start_line;
- $new_start_line = 0;
- } elsif ($name eq "@\.\.\.") {
- $name = "...";
- $parameterdescs{$name} = $contents;
- $sectcheck = $sectcheck . $name . " ";
- $parameterdesc_start_lines{$name} = $new_start_line;
- $new_start_line = 0;
- } else {
- if (defined($sections{$name}) && ($sections{$name} ne "")) {
- # Only warn on user specified duplicate section names.
- if ($name ne $section_default) {
- emit_warning("${file}:$.", "duplicate section name '$name'\n");
- }
- $sections{$name} .= $contents;
- } else {
- $sections{$name} = $contents;
- push @sectionlist, $name;
- $section_start_lines{$name} = $new_start_line;
- $new_start_line = 0;
- }
- }
-}
-
-##
-# dump DOC: section after checking that it should go out
-#
-sub dump_doc_section {
- my $file = shift;
- my $name = shift;
- my $contents = join "\n", @_;
-
- if ($no_doc_sections) {
- return;
- }
-
- return if (defined($nosymbol_table{$name}));
-
- if (($output_selection == OUTPUT_ALL) ||
- (($output_selection == OUTPUT_INCLUDE) &&
- defined($function_table{$name})))
- {
- dump_section($file, $name, $contents);
- output_blockhead({'sectionlist' => \@sectionlist,
- 'sections' => \%sections,
- 'module' => $modulename,
- 'content-only' => ($output_selection != OUTPUT_ALL), });
- }
-}
-
-##
-# output function
-#
-# parameterdescs, a hash.
-# function => "function name"
-# parameterlist => @list of parameters
-# parameterdescs => %parameter descriptions
-# sectionlist => @list of sections
-# sections => %section descriptions
-#
-
-sub output_highlight {
- my $contents = join "\n",@_;
- my $line;
-
-# DEBUG
-# if (!defined $contents) {
-# use Carp;
-# confess "output_highlight got called with no args?\n";
-# }
-
-# print STDERR "contents b4:$contents\n";
- eval $dohighlight;
- die $@ if $@;
-# print STDERR "contents af:$contents\n";
-
- foreach $line (split "\n", $contents) {
- if (! $output_preformatted) {
- $line =~ s/^\s*//;
- }
- if ($line eq ""){
- if (! $output_preformatted) {
- print $lineprefix, $blankline;
- }
- } else {
- if ($output_mode eq "man" && substr($line, 0, 1) eq ".") {
- print "\\&$line";
- } else {
- print $lineprefix, $line;
- }
- }
- print "\n";
- }
-}
-
-##
-# output function in man
-sub output_function_man(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
- my $func_macro = $args{'func_macro'};
- my $paramcount = $#{$args{'parameterlist'}}; # -1 is empty
-
- print ".TH \"$args{'function'}\" 9 \"$args{'function'}\" \"$man_date\" \"Kernel Hacker's Manual\" LINUX\n";
-
- print ".SH NAME\n";
- print $args{'function'} . " \\- " . $args{'purpose'} . "\n";
-
- print ".SH SYNOPSIS\n";
- if ($args{'functiontype'} ne "") {
- print ".B \"" . $args{'functiontype'} . "\" " . $args{'function'} . "\n";
- } else {
- print ".B \"" . $args{'function'} . "\n";
- }
- $count = 0;
- my $parenth = "(";
- my $post = ",";
- foreach my $parameter (@{$args{'parameterlist'}}) {
- if ($count == $#{$args{'parameterlist'}}) {
- $post = ");";
- }
- $type = $args{'parametertypes'}{$parameter};
- if ($type =~ m/$function_pointer/) {
- # pointer-to-function
- print ".BI \"" . $parenth . $1 . "\" " . " \") (" . $2 . ")" . $post . "\"\n";
- } else {
- $type =~ s/([^\*])$/$1 /;
- print ".BI \"" . $parenth . $type . "\" " . " \"" . $post . "\"\n";
- }
- $count++;
- $parenth = "";
- }
-
- $paramcount = $#{$args{'parameterlist'}}; # -1 is empty
- if ($paramcount >= 0) {
- print ".SH ARGUMENTS\n";
- }
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- print ".IP \"" . $parameter . "\" 12\n";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- }
- foreach $section (@{$args{'sectionlist'}}) {
- print ".SH \"", uc $section, "\"\n";
- output_highlight($args{'sections'}{$section});
- }
-}
-
-##
-# output enum in man
-sub output_enum_man(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
-
- print ".TH \"$args{'module'}\" 9 \"enum $args{'enum'}\" \"$man_date\" \"API Manual\" LINUX\n";
-
- print ".SH NAME\n";
- print "enum " . $args{'enum'} . " \\- " . $args{'purpose'} . "\n";
-
- print ".SH SYNOPSIS\n";
- print "enum " . $args{'enum'} . " {\n";
- $count = 0;
- foreach my $parameter (@{$args{'parameterlist'}}) {
- print ".br\n.BI \" $parameter\"\n";
- if ($count == $#{$args{'parameterlist'}}) {
- print "\n};\n";
- last;
- } else {
- print ", \n.br\n";
- }
- $count++;
- }
-
- print ".SH Constants\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- print ".IP \"" . $parameter . "\" 12\n";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- }
- foreach $section (@{$args{'sectionlist'}}) {
- print ".SH \"$section\"\n";
- output_highlight($args{'sections'}{$section});
- }
-}
-
-##
-# output struct in man
-sub output_struct_man(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
-
- print ".TH \"$args{'module'}\" 9 \"" . $args{'type'} . " " . $args{'struct'} . "\" \"$man_date\" \"API Manual\" LINUX\n";
-
- print ".SH NAME\n";
- print $args{'type'} . " " . $args{'struct'} . " \\- " . $args{'purpose'} . "\n";
-
- my $declaration = $args{'definition'};
- $declaration =~ s/\t/ /g;
- $declaration =~ s/\n/"\n.br\n.BI \"/g;
- print ".SH SYNOPSIS\n";
- print $args{'type'} . " " . $args{'struct'} . " {\n.br\n";
- print ".BI \"$declaration\n};\n.br\n\n";
-
- print ".SH Members\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- ($parameter =~ /^#/) && next;
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- print ".IP \"" . $parameter . "\" 12\n";
- output_highlight($args{'parameterdescs'}{$parameter_name});
- }
- foreach $section (@{$args{'sectionlist'}}) {
- print ".SH \"$section\"\n";
- output_highlight($args{'sections'}{$section});
- }
-}
-
-##
-# output typedef in man
-sub output_typedef_man(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
-
- print ".TH \"$args{'module'}\" 9 \"$args{'typedef'}\" \"$man_date\" \"API Manual\" LINUX\n";
-
- print ".SH NAME\n";
- print "typedef " . $args{'typedef'} . " \\- " . $args{'purpose'} . "\n";
-
- foreach $section (@{$args{'sectionlist'}}) {
- print ".SH \"$section\"\n";
- output_highlight($args{'sections'}{$section});
- }
-}
-
-sub output_blockhead_man(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $count;
-
- print ".TH \"$args{'module'}\" 9 \"$args{'module'}\" \"$man_date\" \"API Manual\" LINUX\n";
-
- foreach $section (@{$args{'sectionlist'}}) {
- print ".SH \"$section\"\n";
- output_highlight($args{'sections'}{$section});
- }
-}
-
-##
-# output in restructured text
-#
-
-#
-# This could use some work; it's used to output the DOC: sections, and
-# starts by putting out the name of the doc section itself, but that tends
-# to duplicate a header already in the template file.
-#
-sub output_blockhead_rst(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
-
- foreach $section (@{$args{'sectionlist'}}) {
- next if (defined($nosymbol_table{$section}));
-
- if ($output_selection != OUTPUT_INCLUDE) {
- print ".. _$section:\n\n";
- print "**$section**\n\n";
- }
- print_lineno($section_start_lines{$section});
- output_highlight_rst($args{'sections'}{$section});
- print "\n";
- }
-}
-
-#
-# Apply the RST highlights to a sub-block of text.
-#
-sub highlight_block($) {
- # The dohighlight kludge requires the text be called $contents
- my $contents = shift;
- eval $dohighlight;
- die $@ if $@;
- return $contents;
-}
-
-#
-# Regexes used only here.
-#
-my $sphinx_literal = '^[^.].*::$';
-my $sphinx_cblock = '^\.\.\ +code-block::';
-
-sub output_highlight_rst {
- my $input = join "\n",@_;
- my $output = "";
- my $line;
- my $in_literal = 0;
- my $litprefix;
- my $block = "";
-
- foreach $line (split "\n",$input) {
- #
- # If we're in a literal block, see if we should drop out
- # of it. Otherwise pass the line straight through unmunged.
- #
- if ($in_literal) {
- if (! ($line =~ /^\s*$/)) {
- #
- # If this is the first non-blank line in a literal
- # block we need to figure out what the proper indent is.
- #
- if ($litprefix eq "") {
- $line =~ /^(\s*)/;
- $litprefix = '^' . $1;
- $output .= $line . "\n";
- } elsif (! ($line =~ /$litprefix/)) {
- $in_literal = 0;
- } else {
- $output .= $line . "\n";
- }
- } else {
- $output .= $line . "\n";
- }
- }
- #
- # Not in a literal block (or just dropped out)
- #
- if (! $in_literal) {
- $block .= $line . "\n";
- if (($line =~ /$sphinx_literal/) || ($line =~ /$sphinx_cblock/)) {
- $in_literal = 1;
- $litprefix = "";
- $output .= highlight_block($block);
- $block = ""
- }
- }
- }
-
- if ($block) {
- $output .= highlight_block($block);
- }
-
- $output =~ s/^\n+//g;
- $output =~ s/\n+$//g;
-
- foreach $line (split "\n", $output) {
- print $lineprefix . $line . "\n";
- }
-}
-
-sub output_function_rst(%) {
- my %args = %{$_[0]};
- my ($parameter, $section);
- my $oldprefix = $lineprefix;
-
- my $signature = "";
- my $func_macro = $args{'func_macro'};
- my $paramcount = $#{$args{'parameterlist'}}; # -1 is empty
-
- if ($func_macro) {
- $signature = $args{'function'};
- } else {
- if ($args{'functiontype'}) {
- $signature = $args{'functiontype'} . " ";
- }
- $signature .= $args{'function'} . " (";
- }
-
- my $count = 0;
- foreach my $parameter (@{$args{'parameterlist'}}) {
- if ($count ne 0) {
- $signature .= ", ";
- }
- $count++;
- $type = $args{'parametertypes'}{$parameter};
-
- if ($type =~ m/$function_pointer/) {
- # pointer-to-function
- $signature .= $1 . $parameter . ") (" . $2 . ")";
- } else {
- $signature .= $type;
- }
- }
-
- if (!$func_macro) {
- $signature .= ")";
- }
-
- if ($args{'typedef'} || $args{'functiontype'} eq "") {
- print ".. c:macro:: ". $args{'function'} . "\n\n";
-
- if ($args{'typedef'}) {
- print_lineno($declaration_start_line);
- print " **Typedef**: ";
- $lineprefix = "";
- output_highlight_rst($args{'purpose'});
- print "\n\n**Syntax**\n\n";
- print " ``$signature``\n\n";
- } else {
- print "``$signature``\n\n";
- }
- } else {
- print ".. c:function:: $signature\n\n";
- }
-
- if (!$args{'typedef'}) {
- print_lineno($declaration_start_line);
- $lineprefix = " ";
- output_highlight_rst($args{'purpose'});
- print "\n";
- }
-
- #
- # Put our descriptive text into a container (thus an HTML <div>) to help
- # set the function prototypes apart.
- #
- $lineprefix = " ";
- if ($paramcount >= 0) {
- print ".. container:: kernelindent\n\n";
- print $lineprefix . "**Parameters**\n\n";
- }
- foreach $parameter (@{$args{'parameterlist'}}) {
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
- $type = $args{'parametertypes'}{$parameter};
-
- if ($type ne "") {
- print $lineprefix . "``$type``\n";
- } else {
- print $lineprefix . "``$parameter``\n";
- }
-
- print_lineno($parameterdesc_start_lines{$parameter_name});
-
- $lineprefix = " ";
- if (defined($args{'parameterdescs'}{$parameter_name}) &&
- $args{'parameterdescs'}{$parameter_name} ne $undescribed) {
- output_highlight_rst($args{'parameterdescs'}{$parameter_name});
- } else {
- print $lineprefix . "*undescribed*\n";
- }
- $lineprefix = " ";
- print "\n";
- }
-
- output_section_rst(@_);
- $lineprefix = $oldprefix;
-}
-
-sub output_section_rst(%) {
- my %args = %{$_[0]};
- my $section;
- my $oldprefix = $lineprefix;
-
- foreach $section (@{$args{'sectionlist'}}) {
- print $lineprefix . "**$section**\n\n";
- print_lineno($section_start_lines{$section});
- output_highlight_rst($args{'sections'}{$section});
- print "\n";
- }
- print "\n";
-}
-
-sub output_enum_rst(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $oldprefix = $lineprefix;
- my $count;
- my $outer;
-
- my $name = $args{'enum'};
- print "\n\n.. c:enum:: " . $name . "\n\n";
-
- print_lineno($declaration_start_line);
- $lineprefix = " ";
- output_highlight_rst($args{'purpose'});
- print "\n";
-
- print ".. container:: kernelindent\n\n";
- $outer = $lineprefix . " ";
- $lineprefix = $outer . " ";
- print $outer . "**Constants**\n\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- print $outer . "``$parameter``\n";
-
- if ($args{'parameterdescs'}{$parameter} ne $undescribed) {
- output_highlight_rst($args{'parameterdescs'}{$parameter});
- } else {
- print $lineprefix . "*undescribed*\n";
- }
- print "\n";
- }
- print "\n";
- $lineprefix = $oldprefix;
- output_section_rst(@_);
-}
-
-sub output_typedef_rst(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $oldprefix = $lineprefix;
- my $name;
-
- $name = $args{'typedef'};
-
- print "\n\n.. c:type:: " . $name . "\n\n";
- print_lineno($declaration_start_line);
- $lineprefix = " ";
- output_highlight_rst($args{'purpose'});
- print "\n";
-
- $lineprefix = $oldprefix;
- output_section_rst(@_);
-}
-
-sub output_struct_rst(%) {
- my %args = %{$_[0]};
- my ($parameter);
- my $oldprefix = $lineprefix;
-
- my $name = $args{'struct'};
- if ($args{'type'} eq 'union') {
- print "\n\n.. c:union:: " . $name . "\n\n";
- } else {
- print "\n\n.. c:struct:: " . $name . "\n\n";
- }
-
- print_lineno($declaration_start_line);
- $lineprefix = " ";
- output_highlight_rst($args{'purpose'});
- print "\n";
-
- print ".. container:: kernelindent\n\n";
- print $lineprefix . "**Definition**::\n\n";
- my $declaration = $args{'definition'};
- $lineprefix = $lineprefix . " ";
- $declaration =~ s/\t/$lineprefix/g;
- print $lineprefix . $args{'type'} . " " . $args{'struct'} . " {\n$declaration" . $lineprefix . "};\n\n";
-
- $lineprefix = " ";
- print $lineprefix . "**Members**\n\n";
- foreach $parameter (@{$args{'parameterlist'}}) {
- ($parameter =~ /^#/) && next;
-
- my $parameter_name = $parameter;
- $parameter_name =~ s/\[.*//;
-
- ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
- $type = $args{'parametertypes'}{$parameter};
- print_lineno($parameterdesc_start_lines{$parameter_name});
- print $lineprefix . "``" . $parameter . "``\n";
- $lineprefix = " ";
- output_highlight_rst($args{'parameterdescs'}{$parameter_name});
- $lineprefix = " ";
- print "\n";
- }
- print "\n";
-
- $lineprefix = $oldprefix;
- output_section_rst(@_);
-}
-
-## none mode output functions
-
-sub output_function_none(%) {
-}
-
-sub output_enum_none(%) {
-}
-
-sub output_typedef_none(%) {
-}
-
-sub output_struct_none(%) {
-}
-
-sub output_blockhead_none(%) {
-}
-
-##
-# generic output function for all types (function, struct/union, typedef, enum);
-# calls the generated, variable output_ function name based on
-# functype and output_mode
-sub output_declaration {
- no strict 'refs';
- my $name = shift;
- my $functype = shift;
- my $func = "output_${functype}_$output_mode";
-
- return if (defined($nosymbol_table{$name}));
-
- if (($output_selection == OUTPUT_ALL) ||
- (($output_selection == OUTPUT_INCLUDE ||
- $output_selection == OUTPUT_EXPORTED) &&
- defined($function_table{$name})) ||
- ($output_selection == OUTPUT_INTERNAL &&
- !($functype eq "function" && defined($function_table{$name}))))
- {
- &$func(@_);
- $section_counter++;
- }
-}
-
-##
-# generic output function - calls the right one based on current output mode.
-sub output_blockhead {
- no strict 'refs';
- my $func = "output_blockhead_" . $output_mode;
- &$func(@_);
- $section_counter++;
-}
-
-##
-# takes a declaration (struct, union, enum, typedef) and
-# invokes the right handler. NOT called for functions.
-sub dump_declaration($$) {
- no strict 'refs';
- my ($prototype, $file) = @_;
- my $func = "dump_" . $decl_type;
- &$func(@_);
-}
-
-sub dump_union($$) {
- dump_struct(@_);
-}
-
-sub dump_struct($$) {
- my $x = shift;
- my $file = shift;
- my $decl_type;
- my $members;
- my $type = qr{struct|union};
- # For capturing struct/union definition body, i.e. "{members*}qualifiers*"
- my $qualifiers = qr{$attribute|__packed|__aligned|____cacheline_aligned_in_smp|____cacheline_aligned};
- my $definition_body = qr{\{(.*)\}\s*$qualifiers*};
- my $struct_members = qr{($type)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;};
-
- if ($x =~ /($type)\s+(\w+)\s*$definition_body/) {
- $decl_type = $1;
- $declaration_name = $2;
- $members = $3;
- } elsif ($x =~ /typedef\s+($type)\s*$definition_body\s*(\w+)\s*;/) {
- $decl_type = $1;
- $declaration_name = $3;
- $members = $2;
- }
-
- if ($members) {
- if ($identifier ne $declaration_name) {
- emit_warning("${file}:$.", "expecting prototype for $decl_type $identifier. Prototype was for $decl_type $declaration_name instead\n");
- return;
- }
-
- # ignore members marked private:
- $members =~ s/\/\*\s*private:.*?\/\*\s*public:.*?\*\///gosi;
- $members =~ s/\/\*\s*private:.*//gosi;
- # strip comments:
- $members =~ s/\/\*.*?\*\///gos;
- # strip attributes
- $members =~ s/\s*$attribute/ /gi;
- $members =~ s/\s*__aligned\s*\([^;]*\)/ /gos;
- $members =~ s/\s*__counted_by\s*\([^;]*\)/ /gos;
- $members =~ s/\s*__counted_by_(le|be)\s*\([^;]*\)/ /gos;
- $members =~ s/\s*__packed\s*/ /gos;
- $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos;
- $members =~ s/\s*____cacheline_aligned_in_smp/ /gos;
- $members =~ s/\s*____cacheline_aligned/ /gos;
- # unwrap struct_group():
- # - first eat non-declaration parameters and rewrite for final match
- # - then remove macro, outer parens, and trailing semicolon
- $members =~ s/\bstruct_group\s*\(([^,]*,)/STRUCT_GROUP(/gos;
- $members =~ s/\bstruct_group_attr\s*\(([^,]*,){2}/STRUCT_GROUP(/gos;
- $members =~ s/\bstruct_group_tagged\s*\(([^,]*),([^,]*),/struct $1 $2; STRUCT_GROUP(/gos;
- $members =~ s/\b__struct_group\s*\(([^,]*,){3}/STRUCT_GROUP(/gos;
- $members =~ s/\bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*;/$2/gos;
-
- my $args = qr{([^,)]+)};
- # replace DECLARE_BITMAP
- $members =~ s/__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)/DECLARE_BITMAP($1, __ETHTOOL_LINK_MODE_MASK_NBITS)/gos;
- $members =~ s/DECLARE_PHY_INTERFACE_MASK\s*\(([^\)]+)\)/DECLARE_BITMAP($1, PHY_INTERFACE_MODE_MAX)/gos;
- $members =~ s/DECLARE_BITMAP\s*\($args,\s*$args\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
- # replace DECLARE_HASHTABLE
- $members =~ s/DECLARE_HASHTABLE\s*\($args,\s*$args\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
- # replace DECLARE_KFIFO
- $members =~ s/DECLARE_KFIFO\s*\($args,\s*$args,\s*$args\)/$2 \*$1/gos;
- # replace DECLARE_KFIFO_PTR
- $members =~ s/DECLARE_KFIFO_PTR\s*\($args,\s*$args\)/$2 \*$1/gos;
- # replace DECLARE_FLEX_ARRAY
- $members =~ s/(?:__)?DECLARE_FLEX_ARRAY\s*\($args,\s*$args\)/$1 $2\[\]/gos;
- #replace DEFINE_DMA_UNMAP_ADDR
- $members =~ s/DEFINE_DMA_UNMAP_ADDR\s*\($args\)/dma_addr_t $1/gos;
- #replace DEFINE_DMA_UNMAP_LEN
- $members =~ s/DEFINE_DMA_UNMAP_LEN\s*\($args\)/__u32 $1/gos;
- my $declaration = $members;
-
- # Split nested struct/union elements as newer ones
- while ($members =~ m/$struct_members/) {
- my $newmember;
- my $maintype = $1;
- my $ids = $4;
- my $content = $3;
- foreach my $id(split /,/, $ids) {
- $newmember .= "$maintype $id; ";
-
- $id =~ s/[:\[].*//;
- $id =~ s/^\s*\**(\S+)\s*/$1/;
- foreach my $arg (split /;/, $content) {
- next if ($arg =~ m/^\s*$/);
- if ($arg =~ m/^([^\(]+\(\*?\s*)([\w\.]*)(\s*\).*)/) {
- # pointer-to-function
- my $type = $1;
- my $name = $2;
- my $extra = $3;
- next if (!$name);
- if ($id =~ m/^\s*$/) {
- # anonymous struct/union
- $newmember .= "$type$name$extra; ";
- } else {
- $newmember .= "$type$id.$name$extra; ";
- }
- } else {
- my $type;
- my $names;
- $arg =~ s/^\s+//;
- $arg =~ s/\s+$//;
- # Handle bitmaps
- $arg =~ s/:\s*\d+\s*//g;
- # Handle arrays
- $arg =~ s/\[.*\]//g;
- # The type may have multiple words,
- # and multiple IDs can be defined, like:
- # const struct foo, *bar, foobar
- # So, we remove spaces when parsing the
- # names, in order to match just names
- # and commas for the names
- $arg =~ s/\s*,\s*/,/g;
- if ($arg =~ m/(.*)\s+([\S+,]+)/) {
- $type = $1;
- $names = $2;
- } else {
- $newmember .= "$arg; ";
- next;
- }
- foreach my $name (split /,/, $names) {
- $name =~ s/^\s*\**(\S+)\s*/$1/;
- next if (($name =~ m/^\s*$/));
- if ($id =~ m/^\s*$/) {
- # anonymous struct/union
- $newmember .= "$type $name; ";
- } else {
- $newmember .= "$type $id.$name; ";
- }
- }
- }
- }
- }
- $members =~ s/$struct_members/$newmember/;
- }
-
- # Ignore other nested elements, like enums
- $members =~ s/(\{[^\{\}]*\})//g;
-
- create_parameterlist($members, ';', $file, $declaration_name);
- check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual);
-
- # Adjust declaration for better display
- $declaration =~ s/([\{;])/$1\n/g;
- $declaration =~ s/\}\s+;/};/g;
- # Better handle inlined enums
- do {} while ($declaration =~ s/(enum\s+\{[^\}]+),([^\n])/$1,\n$2/);
-
- my @def_args = split /\n/, $declaration;
- my $level = 1;
- $declaration = "";
- foreach my $clause (@def_args) {
- $clause =~ s/^\s+//;
- $clause =~ s/\s+$//;
- $clause =~ s/\s+/ /;
- next if (!$clause);
- $level-- if ($clause =~ m/(\})/ && $level > 1);
- if (!($clause =~ m/^\s*#/)) {
- $declaration .= "\t" x $level;
- }
- $declaration .= "\t" . $clause . "\n";
- $level++ if ($clause =~ m/(\{)/ && !($clause =~m/\}/));
- }
- output_declaration($declaration_name,
- 'struct',
- {'struct' => $declaration_name,
- 'module' => $modulename,
- 'definition' => $declaration,
- 'parameterlist' => \@parameterlist,
- 'parameterdescs' => \%parameterdescs,
- 'parametertypes' => \%parametertypes,
- 'sectionlist' => \@sectionlist,
- 'sections' => \%sections,
- 'purpose' => $declaration_purpose,
- 'type' => $decl_type
- });
- } else {
- print STDERR "${file}:$.: error: Cannot parse struct or union!\n";
- ++$errors;
- }
-}
-
-
-sub show_warnings($$) {
- my $functype = shift;
- my $name = shift;
-
- return 0 if (defined($nosymbol_table{$name}));
-
- return 1 if ($output_selection == OUTPUT_ALL);
-
- if ($output_selection == OUTPUT_EXPORTED) {
- if (defined($function_table{$name})) {
- return 1;
- } else {
- return 0;
- }
- }
- if ($output_selection == OUTPUT_INTERNAL) {
- if (!($functype eq "function" && defined($function_table{$name}))) {
- return 1;
- } else {
- return 0;
- }
- }
- if ($output_selection == OUTPUT_INCLUDE) {
- if (defined($function_table{$name})) {
- return 1;
- } else {
- return 0;
- }
- }
- die("Please add the new output type at show_warnings()");
-}
-
-sub dump_enum($$) {
- my $x = shift;
- my $file = shift;
- my $members;
-
- # ignore members marked private:
- $x =~ s/\/\*\s*private:.*?\/\*\s*public:.*?\*\///gosi;
- $x =~ s/\/\*\s*private:.*}/}/gosi;
-
- $x =~ s@/\*.*?\*/@@gos; # strip comments.
- # strip #define macros inside enums
- $x =~ s@#\s*((define|ifdef|if)\s+|endif)[^;]*;@@gos;
-
- if ($x =~ /typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;/) {
- $declaration_name = $2;
- $members = $1;
- } elsif ($x =~ /enum\s+(\w*)\s*\{(.*)\}/) {
- $declaration_name = $1;
- $members = $2;
- }
-
- if ($members) {
- if ($identifier ne $declaration_name) {
- if ($identifier eq "") {
- emit_warning("${file}:$.", "wrong kernel-doc identifier on line:\n");
- } else {
- emit_warning("${file}:$.", "expecting prototype for enum $identifier. Prototype was for enum $declaration_name instead\n");
- }
- return;
- }
- $declaration_name = "(anonymous)" if ($declaration_name eq "");
-
- my %_members;
-
- $members =~ s/\s+$//;
- $members =~ s/\([^;]*?[\)]//g;
-
- foreach my $arg (split ',', $members) {
- $arg =~ s/^\s*(\w+).*/$1/;
- push @parameterlist, $arg;
- if (!$parameterdescs{$arg}) {
- $parameterdescs{$arg} = $undescribed;
- if (show_warnings("enum", $declaration_name)) {
- emit_warning("${file}:$.", "Enum value '$arg' not described in enum '$declaration_name'\n");
- }
- }
- $_members{$arg} = 1;
- }
-
- while (my ($k, $v) = each %parameterdescs) {
- if (!exists($_members{$k})) {
- if (show_warnings("enum", $declaration_name)) {
- emit_warning("${file}:$.", "Excess enum value '$k' description in '$declaration_name'\n");
- }
- }
- }
-
- output_declaration($declaration_name,
- 'enum',
- {'enum' => $declaration_name,
- 'module' => $modulename,
- 'parameterlist' => \@parameterlist,
- 'parameterdescs' => \%parameterdescs,
- 'sectionlist' => \@sectionlist,
- 'sections' => \%sections,
- 'purpose' => $declaration_purpose
- });
- } else {
- print STDERR "${file}:$.: error: Cannot parse enum!\n";
- ++$errors;
- }
-}
-
-my $typedef_type = qr { ((?:\s+[\w\*]+\b){0,7}\s+(?:\w+\b|\*+))\s* }x;
-my $typedef_ident = qr { \*?\s*(\w\S+)\s* }x;
-my $typedef_args = qr { \s*\((.*)\); }x;
-
-my $typedef1 = qr { typedef$typedef_type\($typedef_ident\)$typedef_args }x;
-my $typedef2 = qr { typedef$typedef_type$typedef_ident$typedef_args }x;
-
-sub dump_typedef($$) {
- my $x = shift;
- my $file = shift;
-
- $x =~ s@/\*.*?\*/@@gos; # strip comments.
-
- # Parse function typedef prototypes
- if ($x =~ $typedef1 || $x =~ $typedef2) {
- $return_type = $1;
- $declaration_name = $2;
- my $args = $3;
- $return_type =~ s/^\s+//;
-
- if ($identifier ne $declaration_name) {
- emit_warning("${file}:$.", "expecting prototype for typedef $identifier. Prototype was for typedef $declaration_name instead\n");
- return;
- }
-
- create_parameterlist($args, ',', $file, $declaration_name);
-
- output_declaration($declaration_name,
- 'function',
- {'function' => $declaration_name,
- 'typedef' => 1,
- 'module' => $modulename,
- 'functiontype' => $return_type,
- 'parameterlist' => \@parameterlist,
- 'parameterdescs' => \%parameterdescs,
- 'parametertypes' => \%parametertypes,
- 'sectionlist' => \@sectionlist,
- 'sections' => \%sections,
- 'purpose' => $declaration_purpose
- });
- return;
- }
-
- while (($x =~ /\(*.\)\s*;$/) || ($x =~ /\[*.\]\s*;$/)) {
- $x =~ s/\(*.\)\s*;$/;/;
- $x =~ s/\[*.\]\s*;$/;/;
- }
-
- if ($x =~ /typedef.*\s+(\w+)\s*;/) {
- $declaration_name = $1;
-
- if ($identifier ne $declaration_name) {
- emit_warning("${file}:$.", "expecting prototype for typedef $identifier. Prototype was for typedef $declaration_name instead\n");
- return;
- }
-
- output_declaration($declaration_name,
- 'typedef',
- {'typedef' => $declaration_name,
- 'module' => $modulename,
- 'sectionlist' => \@sectionlist,
- 'sections' => \%sections,
- 'purpose' => $declaration_purpose
- });
- } else {
- print STDERR "${file}:$.: error: Cannot parse typedef!\n";
- ++$errors;
- }
-}
-
-sub save_struct_actual($) {
- my $actual = shift;
-
- # strip all spaces from the actual param so that it looks like one string item
- $actual =~ s/\s*//g;
- $struct_actual = $struct_actual . $actual . " ";
-}
-
-sub create_parameterlist($$$$) {
- my $args = shift;
- my $splitter = shift;
- my $file = shift;
- my $declaration_name = shift;
- my $type;
- my $param;
-
- # temporarily replace commas inside function pointer definition
- my $arg_expr = qr{\([^\),]+};
- while ($args =~ /$arg_expr,/) {
- $args =~ s/($arg_expr),/$1#/g;
- }
-
- foreach my $arg (split($splitter, $args)) {
- # strip comments
- $arg =~ s/\/\*.*\*\///;
- # ignore argument attributes
- $arg =~ s/\sPOS0?\s/ /;
- # strip leading/trailing spaces
- $arg =~ s/^\s*//;
- $arg =~ s/\s*$//;
- $arg =~ s/\s+/ /;
-
- if ($arg =~ /^#/) {
- # Treat preprocessor directive as a typeless variable just to fill
- # corresponding data structures "correctly". Catch it later in
- # output_* subs.
- push_parameter($arg, "", "", $file);
- } elsif ($arg =~ m/\(.+\)\s*\(/) {
- # pointer-to-function
- $arg =~ tr/#/,/;
- $arg =~ m/[^\(]+\(\*?\s*([\w\[\]\.]*)\s*\)/;
- $param = $1;
- $type = $arg;
- $type =~ s/([^\(]+\(\*?)\s*$param/$1/;
- save_struct_actual($param);
- push_parameter($param, $type, $arg, $file, $declaration_name);
- } elsif ($arg =~ m/\(.+\)\s*\[/) {
- # array-of-pointers
- $arg =~ tr/#/,/;
- $arg =~ m/[^\(]+\(\s*\*\s*([\w\[\]\.]*?)\s*(\s*\[\s*[\w]+\s*\]\s*)*\)/;
- $param = $1;
- $type = $arg;
- $type =~ s/([^\(]+\(\*?)\s*$param/$1/;
- save_struct_actual($param);
- push_parameter($param, $type, $arg, $file, $declaration_name);
- } elsif ($arg) {
- $arg =~ s/\s*:\s*/:/g;
- $arg =~ s/\s*\[/\[/g;
-
- my @args = split('\s*,\s*', $arg);
- if ($args[0] =~ m/\*/) {
- $args[0] =~ s/(\*+)\s*/ $1/;
- }
-
- my @first_arg;
- if ($args[0] =~ /^(.*\s+)(.*?\[.*\].*)$/) {
- shift @args;
- push(@first_arg, split('\s+', $1));
- push(@first_arg, $2);
- } else {
- @first_arg = split('\s+', shift @args);
- }
-
- unshift(@args, pop @first_arg);
- $type = join " ", @first_arg;
-
- foreach $param (@args) {
- if ($param =~ m/^(\*+)\s*(.*)/) {
- save_struct_actual($2);
-
- push_parameter($2, "$type $1", $arg, $file, $declaration_name);
- } elsif ($param =~ m/(.*?):(\w+)/) {
- if ($type ne "") { # skip unnamed bit-fields
- save_struct_actual($1);
- push_parameter($1, "$type:$2", $arg, $file, $declaration_name)
- }
- } else {
- save_struct_actual($param);
- push_parameter($param, $type, $arg, $file, $declaration_name);
- }
- }
- }
- }
-}
-
-sub push_parameter($$$$$) {
- my $param = shift;
- my $type = shift;
- my $org_arg = shift;
- my $file = shift;
- my $declaration_name = shift;
-
- if (($anon_struct_union == 1) && ($type eq "") &&
- ($param eq "}")) {
- return; # ignore the ending }; from anon. struct/union
- }
-
- $anon_struct_union = 0;
- $param =~ s/[\[\)].*//;
-
- if ($type eq "" && $param =~ /\.\.\.$/)
- {
- if (!$param =~ /\w\.\.\.$/) {
- # handles unnamed variable parameters
- $param = "...";
- } elsif ($param =~ /\w\.\.\.$/) {
- # for named variable parameters of the form `x...`, remove the dots
- $param =~ s/\.\.\.$//;
- }
- if (!defined $parameterdescs{$param} || $parameterdescs{$param} eq "") {
- $parameterdescs{$param} = "variable arguments";
- }
- }
- elsif ($type eq "" && ($param eq "" or $param eq "void"))
- {
- $param="void";
- $parameterdescs{void} = "no arguments";
- }
- elsif ($type eq "" && ($param eq "struct" or $param eq "union"))
- # handle unnamed (anonymous) union or struct:
- {
- $type = $param;
- $param = "{unnamed_" . $param . "}";
- $parameterdescs{$param} = "anonymous\n";
- $anon_struct_union = 1;
- }
- elsif ($param =~ "__cacheline_group" )
- # handle cache group enforcing variables: they do not need be described in header files
- {
- return; # ignore __cacheline_group_begin and __cacheline_group_end
- }
-
- # warn if parameter has no description
- # (but ignore ones starting with # as these are not parameters
- # but inline preprocessor statements);
- # Note: It will also ignore void params and unnamed structs/unions
- if (!defined $parameterdescs{$param} && $param !~ /^#/) {
- $parameterdescs{$param} = $undescribed;
-
- if (show_warnings($type, $declaration_name) && $param !~ /\./) {
- emit_warning("${file}:$.", "Function parameter or struct member '$param' not described in '$declaration_name'\n");
- }
- }
-
- # strip spaces from $param so that it is one continuous string
- # on @parameterlist;
- # this fixes a problem where check_sections() cannot find
- # a parameter like "addr[6 + 2]" because it actually appears
- # as "addr[6", "+", "2]" on the parameter list;
- # but it's better to maintain the param string unchanged for output,
- # so just weaken the string compare in check_sections() to ignore
- # "[blah" in a parameter string;
- ###$param =~ s/\s*//g;
- push @parameterlist, $param;
- $org_arg =~ s/\s\s+/ /g;
- $parametertypes{$param} = $org_arg;
-}
-
-sub check_sections($$$$$) {
- my ($file, $decl_name, $decl_type, $sectcheck, $prmscheck) = @_;
- my @sects = split ' ', $sectcheck;
- my @prms = split ' ', $prmscheck;
- my $err;
- my ($px, $sx);
- my $prm_clean; # strip trailing "[array size]" and/or beginning "*"
-
- foreach $sx (0 .. $#sects) {
- $err = 1;
- foreach $px (0 .. $#prms) {
- $prm_clean = $prms[$px];
- $prm_clean =~ s/\[.*\]//;
- $prm_clean =~ s/$attribute//i;
- # ignore array size in a parameter string;
- # however, the original param string may contain
- # spaces, e.g.: addr[6 + 2]
- # and this appears in @prms as "addr[6" since the
- # parameter list is split at spaces;
- # hence just ignore "[..." for the sections check;
- $prm_clean =~ s/\[.*//;
-
- ##$prm_clean =~ s/^\**//;
- if ($prm_clean eq $sects[$sx]) {
- $err = 0;
- last;
- }
- }
- if ($err) {
- if ($decl_type eq "function") {
- emit_warning("${file}:$.",
- "Excess function parameter " .
- "'$sects[$sx]' " .
- "description in '$decl_name'\n");
- } elsif (($decl_type eq "struct") or
- ($decl_type eq "union")) {
- emit_warning("${file}:$.",
- "Excess $decl_type member " .
- "'$sects[$sx]' " .
- "description in '$decl_name'\n");
- }
- }
- }
-}
-
-##
-# Checks the section describing the return value of a function.
-sub check_return_section {
- my $file = shift;
- my $declaration_name = shift;
- my $return_type = shift;
-
- # Ignore an empty return type (It's a macro)
- # Ignore functions with a "void" return type. (But don't ignore "void *")
- if (($return_type eq "") || ($return_type =~ /void\s*\w*\s*$/)) {
- return;
- }
-
- if (!defined($sections{$section_return}) ||
- $sections{$section_return} eq "")
- {
- emit_warning("${file}:$.",
- "No description found for return value of " .
- "'$declaration_name'\n");
- }
-}
-
-##
-# takes a function prototype and the name of the current file being
-# processed and spits out all the details stored in the global
-# arrays/hashes.
-sub dump_function($$) {
- my $prototype = shift;
- my $file = shift;
- my $func_macro = 0;
-
- print_lineno($new_start_line);
-
- $prototype =~ s/^static +//;
- $prototype =~ s/^extern +//;
- $prototype =~ s/^asmlinkage +//;
- $prototype =~ s/^inline +//;
- $prototype =~ s/^__inline__ +//;
- $prototype =~ s/^__inline +//;
- $prototype =~ s/^__always_inline +//;
- $prototype =~ s/^noinline +//;
- $prototype =~ s/^__FORTIFY_INLINE +//;
- $prototype =~ s/__init +//;
- $prototype =~ s/__init_or_module +//;
- $prototype =~ s/__deprecated +//;
- $prototype =~ s/__flatten +//;
- $prototype =~ s/__meminit +//;
- $prototype =~ s/__must_check +//;
- $prototype =~ s/__weak +//;
- $prototype =~ s/__sched +//;
- $prototype =~ s/_noprof//;
- $prototype =~ s/__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +//;
- $prototype =~ s/__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +//;
- $prototype =~ s/__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +//;
- $prototype =~ s/DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)/$1, $2/;
- my $define = $prototype =~ s/^#\s*define\s+//; #ak added
- $prototype =~ s/__attribute_const__ +//;
- $prototype =~ s/__attribute__\s*\(\(
- (?:
- [\w\s]++ # attribute name
- (?:\([^)]*+\))? # attribute arguments
- \s*+,? # optional comma at the end
- )+
- \)\)\s+//x;
-
- # Yes, this truly is vile. We are looking for:
- # 1. Return type (may be nothing if we're looking at a macro)
- # 2. Function name
- # 3. Function parameters.
- #
- # All the while we have to watch out for function pointer parameters
- # (which IIRC is what the two sections are for), C types (these
- # regexps don't even start to express all the possibilities), and
- # so on.
- #
- # If you mess with these regexps, it's a good idea to check that
- # the following functions' documentation still comes out right:
- # - parport_register_device (function pointer parameters)
- # - atomic_set (macro)
- # - pci_match_device, __copy_to_user (long return type)
- my $name = qr{[a-zA-Z0-9_~:]+};
- my $prototype_end1 = qr{[^\(]*};
- my $prototype_end2 = qr{[^\{]*};
- my $prototype_end = qr{\(($prototype_end1|$prototype_end2)\)};
- my $type1 = qr{[\w\s]+};
- my $type2 = qr{$type1\*+};
-
- if ($define && $prototype =~ m/^()($name)\s+/) {
- # This is an object-like macro, it has no return type and no parameter
- # list.
- # Function-like macros are not allowed to have spaces between
- # declaration_name and opening parenthesis (notice the \s+).
- $return_type = $1;
- $declaration_name = $2;
- $func_macro = 1;
- } elsif ($prototype =~ m/^()($name)\s*$prototype_end/ ||
- $prototype =~ m/^($type1)\s+($name)\s*$prototype_end/ ||
- $prototype =~ m/^($type2+)\s*($name)\s*$prototype_end/) {
- $return_type = $1;
- $declaration_name = $2;
- my $args = $3;
-
- create_parameterlist($args, ',', $file, $declaration_name);
- } else {
- emit_warning("${file}:$.", "cannot understand function prototype: '$prototype'\n");
- return;
- }
-
- if ($identifier ne $declaration_name) {
- emit_warning("${file}:$.", "expecting prototype for $identifier(). Prototype was for $declaration_name() instead\n");
- return;
- }
-
- my $prms = join " ", @parameterlist;
- check_sections($file, $declaration_name, "function", $sectcheck, $prms);
-
- # This check emits a lot of warnings at the moment, because many
- # functions don't have a 'Return' doc section. So until the number
- # of warnings goes sufficiently down, the check is only performed in
- # -Wreturn mode.
- # TODO: always perform the check.
- if ($Wreturn && !$func_macro) {
- check_return_section($file, $declaration_name, $return_type);
- }
-
- # The function parser can be called with a typedef parameter.
- # Handle it.
- if ($return_type =~ /typedef/) {
- output_declaration($declaration_name,
- 'function',
- {'function' => $declaration_name,
- 'typedef' => 1,
- 'module' => $modulename,
- 'functiontype' => $return_type,
- 'parameterlist' => \@parameterlist,
- 'parameterdescs' => \%parameterdescs,
- 'parametertypes' => \%parametertypes,
- 'sectionlist' => \@sectionlist,
- 'sections' => \%sections,
- 'purpose' => $declaration_purpose,
- 'func_macro' => $func_macro
- });
- } else {
- output_declaration($declaration_name,
- 'function',
- {'function' => $declaration_name,
- 'module' => $modulename,
- 'functiontype' => $return_type,
- 'parameterlist' => \@parameterlist,
- 'parameterdescs' => \%parameterdescs,
- 'parametertypes' => \%parametertypes,
- 'sectionlist' => \@sectionlist,
- 'sections' => \%sections,
- 'purpose' => $declaration_purpose,
- 'func_macro' => $func_macro
- });
- }
-}
-
-sub reset_state {
- $function = "";
- %parameterdescs = ();
- %parametertypes = ();
- @parameterlist = ();
- %sections = ();
- @sectionlist = ();
- $sectcheck = "";
- $struct_actual = "";
- $prototype = "";
-
- $state = STATE_NORMAL;
- $inline_doc_state = STATE_INLINE_NA;
-}
-
-sub tracepoint_munge($) {
- my $file = shift;
- my $tracepointname = 0;
- my $tracepointargs = 0;
-
- if ($prototype =~ m/TRACE_EVENT\((.*?),/) {
- $tracepointname = $1;
- }
- if ($prototype =~ m/DEFINE_SINGLE_EVENT\((.*?),/) {
- $tracepointname = $1;
- }
- if ($prototype =~ m/DEFINE_EVENT\((.*?),(.*?),/) {
- $tracepointname = $2;
- }
- $tracepointname =~ s/^\s+//; #strip leading whitespace
- if ($prototype =~ m/TP_PROTO\((.*?)\)/) {
- $tracepointargs = $1;
- }
- if (($tracepointname eq 0) || ($tracepointargs eq 0)) {
- emit_warning("${file}:$.", "Unrecognized tracepoint format: \n".
- "$prototype\n");
- } else {
- $prototype = "static inline void trace_$tracepointname($tracepointargs)";
- $identifier = "trace_$identifier";
- }
-}
-
-sub syscall_munge() {
- my $void = 0;
-
- $prototype =~ s@[\r\n]+@ @gos; # strip newlines/CR's
-## if ($prototype =~ m/SYSCALL_DEFINE0\s*\(\s*(a-zA-Z0-9_)*\s*\)/) {
- if ($prototype =~ m/SYSCALL_DEFINE0/) {
- $void = 1;
-## $prototype = "long sys_$1(void)";
- }
-
- $prototype =~ s/SYSCALL_DEFINE.*\(/long sys_/; # fix return type & func name
- if ($prototype =~ m/long (sys_.*?),/) {
- $prototype =~ s/,/\(/;
- } elsif ($void) {
- $prototype =~ s/\)/\(void\)/;
- }
-
- # now delete all of the odd-number commas in $prototype
- # so that arg types & arg names don't have a comma between them
- my $count = 0;
- my $len = length($prototype);
- if ($void) {
- $len = 0; # skip the for-loop
- }
- for (my $ix = 0; $ix < $len; $ix++) {
- if (substr($prototype, $ix, 1) eq ',') {
- $count++;
- if ($count % 2 == 1) {
- substr($prototype, $ix, 1) = ' ';
- }
- }
- }
-}
-
-sub process_proto_function($$) {
- my $x = shift;
- my $file = shift;
-
- $x =~ s@\/\/.*$@@gos; # strip C99-style comments to end of line
-
- if ($x =~ /^#/ && $x !~ /^#\s*define/) {
- # do nothing
- } elsif ($x =~ /([^\{]*)/) {
- $prototype .= $1;
- }
-
- if (($x =~ /\{/) || ($x =~ /\#\s*define/) || ($x =~ /;/)) {
- $prototype =~ s@/\*.*?\*/@@gos; # strip comments.
- $prototype =~ s@[\r\n]+@ @gos; # strip newlines/cr's.
- $prototype =~ s@^\s+@@gos; # strip leading spaces
-
- # Handle prototypes for function pointers like:
- # int (*pcs_config)(struct foo)
- $prototype =~ s@^(\S+\s+)\(\s*\*(\S+)\)@$1$2@gos;
-
- if ($prototype =~ /SYSCALL_DEFINE/) {
- syscall_munge();
- }
- if ($prototype =~ /TRACE_EVENT/ || $prototype =~ /DEFINE_EVENT/ ||
- $prototype =~ /DEFINE_SINGLE_EVENT/)
- {
- tracepoint_munge($file);
- }
- dump_function($prototype, $file);
- reset_state();
- }
-}
-
-sub process_proto_type($$) {
- my $x = shift;
- my $file = shift;
-
- $x =~ s@[\r\n]+@ @gos; # strip newlines/cr's.
- $x =~ s@^\s+@@gos; # strip leading spaces
- $x =~ s@\s+$@@gos; # strip trailing spaces
- $x =~ s@\/\/.*$@@gos; # strip C99-style comments to end of line
-
- if ($x =~ /^#/) {
- # To distinguish preprocessor directive from regular declaration later.
- $x .= ";";
- }
-
- while (1) {
- if ( $x =~ /([^\{\};]*)([\{\};])(.*)/ ) {
- if( length $prototype ) {
- $prototype .= " "
- }
- $prototype .= $1 . $2;
- ($2 eq '{') && $brcount++;
- ($2 eq '}') && $brcount--;
- if (($2 eq ';') && ($brcount == 0)) {
- dump_declaration($prototype, $file);
- reset_state();
- last;
- }
- $x = $3;
- } else {
- $prototype .= $x;
- last;
- }
- }
-}
-
-
-sub map_filename($) {
- my $file;
- my ($orig_file) = @_;
-
- if (defined($ENV{'SRCTREE'})) {
- $file = "$ENV{'SRCTREE'}" . "/" . $orig_file;
- } else {
- $file = $orig_file;
- }
-
- return $file;
-}
-
-sub process_export_file($) {
- my ($orig_file) = @_;
- my $file = map_filename($orig_file);
-
- if (!open(IN,"<$file")) {
- print STDERR "Error: Cannot open file $file\n";
- ++$errors;
- return;
- }
-
- while (<IN>) {
- if (/$export_symbol/) {
- next if (defined($nosymbol_table{$2}));
- $function_table{$2} = 1;
- }
- if (/$export_symbol_ns/) {
- next if (defined($nosymbol_table{$2}));
- $function_table{$2} = 1;
- }
- }
-
- close(IN);
-}
-
-#
-# Parsers for the various processing states.
-#
-# STATE_NORMAL: looking for the /** to begin everything.
-#
-sub process_normal() {
- if (/$doc_start/o) {
- $state = STATE_NAME; # next line is always the function name
- $declaration_start_line = $. + 1;
- }
-}
-
-#
-# STATE_NAME: Looking for the "name - description" line
-#
-sub process_name($$) {
- my $file = shift;
- my $descr;
-
- if (/$doc_block/o) {
- $state = STATE_DOCBLOCK;
- $contents = "";
- $new_start_line = $.;
-
- if ( $1 eq "" ) {
- $section = $section_intro;
- } else {
- $section = $1;
- }
- } elsif (/$doc_decl/o) {
- $identifier = $1;
- my $is_kernel_comment = 0;
- my $decl_start = qr{$doc_com};
- # test for pointer declaration type, foo * bar() - desc
- my $fn_type = qr{\w+\s*\*\s*};
- my $parenthesis = qr{\(\w*\)};
- my $decl_end = qr{[-:].*};
- if (/^$decl_start([\w\s]+?)$parenthesis?\s*$decl_end?$/) {
- $identifier = $1;
- }
- if ($identifier =~ m/^(struct|union|enum|typedef)\b\s*(\S*)/) {
- $decl_type = $1;
- $identifier = $2;
- $is_kernel_comment = 1;
- }
- # Look for foo() or static void foo() - description; or misspelt
- # identifier
- elsif (/^$decl_start$fn_type?(\w+)\s*$parenthesis?\s*$decl_end?$/ ||
- /^$decl_start$fn_type?(\w+[^-:]*)$parenthesis?\s*$decl_end$/) {
- $identifier = $1;
- $decl_type = 'function';
- $identifier =~ s/^define\s+//;
- $is_kernel_comment = 1;
- }
- $identifier =~ s/\s+$//;
-
- $state = STATE_BODY;
- # if there's no @param blocks need to set up default section
- # here
- $contents = "";
- $section = $section_default;
- $new_start_line = $. + 1;
- if (/[-:](.*)/) {
- # strip leading/trailing/multiple spaces
- $descr= $1;
- $descr =~ s/^\s*//;
- $descr =~ s/\s*$//;
- $descr =~ s/\s+/ /g;
- $declaration_purpose = $descr;
- $state = STATE_BODY_MAYBE;
- } else {
- $declaration_purpose = "";
- }
-
- if (!$is_kernel_comment) {
- emit_warning("${file}:$.", "This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst\n$_");
- $state = STATE_NORMAL;
- }
-
- if (($declaration_purpose eq "") && $Wshort_desc) {
- emit_warning("${file}:$.", "missing initial short description on line:\n$_");
- }
-
- if ($identifier eq "" && $decl_type ne "enum") {
- emit_warning("${file}:$.", "wrong kernel-doc identifier on line:\n$_");
- $state = STATE_NORMAL;
- }
-
- if ($verbose) {
- print STDERR "${file}:$.: info: Scanning doc for $decl_type $identifier\n";
- }
- } else {
- emit_warning("${file}:$.", "Cannot understand $_ on line $. - I thought it was a doc line\n");
- $state = STATE_NORMAL;
- }
-}
-
-
-#
-# STATE_BODY and STATE_BODY_MAYBE: the bulk of a kerneldoc comment.
-#
-sub process_body($$) {
- my $file = shift;
-
- if ($state == STATE_BODY_WITH_BLANK_LINE && /^\s*\*\s?\S/) {
- dump_section($file, $section, $contents);
- $section = $section_default;
- $new_start_line = $.;
- $contents = "";
- }
-
- if (/$doc_sect/i) { # case insensitive for supported section names
- $newsection = $1;
- $newcontents = $2;
-
- # map the supported section names to the canonical names
- if ($newsection =~ m/^description$/i) {
- $newsection = $section_default;
- } elsif ($newsection =~ m/^context$/i) {
- $newsection = $section_context;
- } elsif ($newsection =~ m/^returns?$/i) {
- $newsection = $section_return;
- } elsif ($newsection =~ m/^\@return$/) {
- # special: @return is a section, not a param description
- $newsection = $section_return;
- }
-
- if (($contents ne "") && ($contents ne "\n")) {
- dump_section($file, $section, $contents);
- $section = $section_default;
- }
-
- $state = STATE_BODY;
- $contents = $newcontents;
- $new_start_line = $.;
- while (substr($contents, 0, 1) eq " ") {
- $contents = substr($contents, 1);
- }
- if ($contents ne "") {
- $contents .= "\n";
- }
- $section = $newsection;
- $leading_space = undef;
- } elsif (/$doc_end/) {
- if (($contents ne "") && ($contents ne "\n")) {
- dump_section($file, $section, $contents);
- $section = $section_default;
- $contents = "";
- }
- # look for doc_com + <text> + doc_end:
- if ($_ =~ m'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') {
- emit_warning("${file}:$.", "suspicious ending line: $_");
- }
-
- $prototype = "";
- $state = STATE_PROTO;
- $brcount = 0;
- $new_start_line = $. + 1;
- } elsif (/$doc_content/) {
- if ($1 eq "") {
- if ($section eq $section_context) {
- dump_section($file, $section, $contents);
- $section = $section_default;
- $contents = "";
- $new_start_line = $.;
- $state = STATE_BODY;
- } else {
- if ($section ne $section_default) {
- $state = STATE_BODY_WITH_BLANK_LINE;
- } else {
- $state = STATE_BODY;
- }
- $contents .= "\n";
- }
- } elsif ($state == STATE_BODY_MAYBE) {
- # Continued declaration purpose
- chomp($declaration_purpose);
- $declaration_purpose .= " " . $1;
- $declaration_purpose =~ s/\s+/ /g;
- } else {
- my $cont = $1;
- if ($section =~ m/^@/ || $section eq $section_context) {
- if (!defined $leading_space) {
- if ($cont =~ m/^(\s+)/) {
- $leading_space = $1;
- } else {
- $leading_space = "";
- }
- }
- $cont =~ s/^$leading_space//;
- }
- $contents .= $cont . "\n";
- }
- } else {
- # i dont know - bad line? ignore.
- emit_warning("${file}:$.", "bad line: $_");
- }
-}
-
-
-#
-# STATE_PROTO: reading a function/whatever prototype.
-#
-sub process_proto($$) {
- my $file = shift;
-
- if (/$doc_inline_oneline/) {
- $section = $1;
- $contents = $2;
- if ($contents ne "") {
- $contents .= "\n";
- dump_section($file, $section, $contents);
- $section = $section_default;
- $contents = "";
- }
- } elsif (/$doc_inline_start/) {
- $state = STATE_INLINE;
- $inline_doc_state = STATE_INLINE_NAME;
- } elsif ($decl_type eq 'function') {
- process_proto_function($_, $file);
- } else {
- process_proto_type($_, $file);
- }
-}
-
-#
-# STATE_DOCBLOCK: within a DOC: block.
-#
-sub process_docblock($$) {
- my $file = shift;
-
- if (/$doc_end/) {
- dump_doc_section($file, $section, $contents);
- $section = $section_default;
- $contents = "";
- $function = "";
- %parameterdescs = ();
- %parametertypes = ();
- @parameterlist = ();
- %sections = ();
- @sectionlist = ();
- $prototype = "";
- $state = STATE_NORMAL;
- } elsif (/$doc_content/) {
- if ( $1 eq "" ) {
- $contents .= $blankline;
- } else {
- $contents .= $1 . "\n";
- }
- }
-}
-
-#
-# STATE_INLINE: docbook comments within a prototype.
-#
-sub process_inline($$) {
- my $file = shift;
-
- # First line (state 1) needs to be a @parameter
- if ($inline_doc_state == STATE_INLINE_NAME && /$doc_inline_sect/o) {
- $section = $1;
- $contents = $2;
- $new_start_line = $.;
- if ($contents ne "") {
- while (substr($contents, 0, 1) eq " ") {
- $contents = substr($contents, 1);
- }
- $contents .= "\n";
- }
- $inline_doc_state = STATE_INLINE_TEXT;
- # Documentation block end */
- } elsif (/$doc_inline_end/) {
- if (($contents ne "") && ($contents ne "\n")) {
- dump_section($file, $section, $contents);
- $section = $section_default;
- $contents = "";
- }
- $state = STATE_PROTO;
- $inline_doc_state = STATE_INLINE_NA;
- # Regular text
- } elsif (/$doc_content/) {
- if ($inline_doc_state == STATE_INLINE_TEXT) {
- $contents .= $1 . "\n";
- # nuke leading blank lines
- if ($contents =~ /^\s*$/) {
- $contents = "";
- }
- } elsif ($inline_doc_state == STATE_INLINE_NAME) {
- $inline_doc_state = STATE_INLINE_ERROR;
- emit_warning("${file}:$.", "Incorrect use of kernel-doc format: $_");
- }
- }
-}
-
-
-sub process_file($) {
- my $file;
- my ($orig_file) = @_;
-
- $file = map_filename($orig_file);
-
- if (!open(IN_FILE,"<$file")) {
- print STDERR "Error: Cannot open file $file\n";
- ++$errors;
- return;
- }
-
- $. = 1;
-
- $section_counter = 0;
- while (<IN_FILE>) {
- while (!/^ \*/ && s/\\\s*$//) {
- $_ .= <IN_FILE>;
- }
- # Replace tabs by spaces
- while ($_ =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {};
- # Hand this line to the appropriate state handler
- if ($state == STATE_NORMAL) {
- process_normal();
- } elsif ($state == STATE_NAME) {
- process_name($file, $_);
- } elsif ($state == STATE_BODY || $state == STATE_BODY_MAYBE ||
- $state == STATE_BODY_WITH_BLANK_LINE) {
- process_body($file, $_);
- } elsif ($state == STATE_INLINE) { # scanning for inline parameters
- process_inline($file, $_);
- } elsif ($state == STATE_PROTO) {
- process_proto($file, $_);
- } elsif ($state == STATE_DOCBLOCK) {
- process_docblock($file, $_);
- }
- }
-
- # Make sure we got something interesting.
- if (!$section_counter && $output_mode ne "none") {
- if ($output_selection == OUTPUT_INCLUDE) {
- emit_warning("${file}:1", "'$_' not found\n")
- for keys %function_table;
- } else {
- emit_warning("${file}:1", "no structured comments found\n");
- }
- }
- close IN_FILE;
-}
-
-$kernelversion = get_kernel_version();
-
-# generate a sequence of code that will splice in highlighting information
-# using the s// operator.
-for (my $k = 0; $k < @highlights; $k++) {
- my $pattern = $highlights[$k][0];
- my $result = $highlights[$k][1];
-# print STDERR "scanning pattern:$pattern, highlight:($result)\n";
- $dohighlight .= "\$contents =~ s:$pattern:$result:gs;\n";
-}
-
-if ($output_selection == OUTPUT_EXPORTED ||
- $output_selection == OUTPUT_INTERNAL) {
-
- push(@export_file_list, @ARGV);
-
- foreach (@export_file_list) {
- chomp;
- process_export_file($_);
- }
-}
-
-foreach (@ARGV) {
- chomp;
- process_file($_);
-}
-if ($verbose && $errors) {
- print STDERR "$errors errors\n";
-}
-if ($verbose && $warnings) {
- print STDERR "$warnings warnings\n";
-}
-
-if ($Werror && $warnings) {
- print STDERR "$warnings warnings as Errors\n";
- exit($warnings);
-} else {
- exit($output_mode eq "none" ? 0 : $errors)
-}
-
-__END__
-
-=head1 OPTIONS
-
-=head2 Output format selection (mutually exclusive):
-
-=over 8
-
-=item -man
-
-Output troff manual page format.
-
-=item -rst
-
-Output reStructuredText format. This is the default.
-
-=item -none
-
-Do not output documentation, only warnings.
-
-=back
-
-=head2 Output format modifiers
-
-=head3 reStructuredText only
-
-=head2 Output selection (mutually exclusive):
-
-=over 8
-
-=item -export
-
-Only output documentation for the symbols that have been exported using
-EXPORT_SYMBOL() and related macros in any input FILE or -export-file FILE.
-
-=item -internal
-
-Only output documentation for the symbols that have NOT been exported using
-EXPORT_SYMBOL() and related macros in any input FILE or -export-file FILE.
-
-=item -function NAME
-
-Only output documentation for the given function or DOC: section title.
-All other functions and DOC: sections are ignored.
-
-May be specified multiple times.
-
-=item -nosymbol NAME
-
-Exclude the specified symbol from the output documentation.
-
-May be specified multiple times.
-
-=back
-
-=head2 Output selection modifiers:
-
-=over 8
-
-=item -no-doc-sections
-
-Do not output DOC: sections.
-
-=item -export-file FILE
-
-Specify an additional FILE in which to look for EXPORT_SYMBOL information.
-
-To be used with -export or -internal.
-
-May be specified multiple times.
-
-=back
-
-=head3 reStructuredText only
-
-=over 8
-
-=item -enable-lineno
-
-Enable output of .. LINENO lines.
-
-=back
-
-=head2 Other parameters:
-
-=over 8
-
-=item -h, -help
-
-Print this help.
-
-=item -v
-
-Verbose output, more warnings and other information.
-
-=item -Werror
-
-Treat warnings as errors.
-
-=back
-
-=cut
diff --git a/scripts/kernel-doc.py b/scripts/kernel-doc.py
index d9fe2bcbd39c..7a1eaf986bcd 100755
--- a/scripts/kernel-doc.py
+++ b/scripts/kernel-doc.py
@@ -111,7 +111,7 @@ import sys
# Import Python modules
-LIB_DIR = "lib/kdoc"
+LIB_DIR = "../tools/lib/python"
SRC_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
@@ -292,8 +292,8 @@ def main():
logger.warning("Python 3.7 or later is required for correct results")
# Import kernel-doc libraries only after checking Python version
- from kdoc_files import KernelFiles # pylint: disable=C0415
- from kdoc_output import RestFormat, ManFormat # pylint: disable=C0415
+ from kdoc.kdoc_files import KernelFiles # pylint: disable=C0415
+ from kdoc.kdoc_output import RestFormat, ManFormat # pylint: disable=C0415
if args.man:
out_style = ManFormat(modulename=args.modulename)
diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py
deleted file mode 100644
index 66a738013ce1..000000000000
--- a/scripts/lib/abi/abi_parser.py
+++ /dev/null
@@ -1,628 +0,0 @@
-#!/usr/bin/env python3
-# pylint: disable=R0902,R0903,R0911,R0912,R0913,R0914,R0915,R0917,C0302
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-# SPDX-License-Identifier: GPL-2.0
-
-"""
-Parse ABI documentation and produce results from it.
-"""
-
-from argparse import Namespace
-import logging
-import os
-import re
-
-from pprint import pformat
-from random import randrange, seed
-
-# Import Python modules
-
-from helpers import AbiDebug, ABI_DIR
-
-
-class AbiParser:
- """Main class to parse ABI files"""
-
- TAGS = r"(what|where|date|kernelversion|contact|description|users)"
- XREF = r"(?:^|\s|\()(\/(?:sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)(?:[,.:;\)\s]|\Z)"
-
- def __init__(self, directory, logger=None,
- enable_lineno=False, show_warnings=True, debug=0):
- """Stores arguments for the class and initialize class vars"""
-
- self.directory = directory
- self.enable_lineno = enable_lineno
- self.show_warnings = show_warnings
- self.debug = debug
-
- if not logger:
- self.log = logging.getLogger("get_abi")
- else:
- self.log = logger
-
- self.data = {}
- self.what_symbols = {}
- self.file_refs = {}
- self.what_refs = {}
-
- # Ignore files that contain such suffixes
- self.ignore_suffixes = (".rej", ".org", ".orig", ".bak", "~")
-
- # Regular expressions used on parser
- self.re_abi_dir = re.compile(r"(.*)" + ABI_DIR)
- self.re_tag = re.compile(r"(\S+)(:\s*)(.*)", re.I)
- self.re_valid = re.compile(self.TAGS)
- self.re_start_spc = re.compile(r"(\s*)(\S.*)")
- self.re_whitespace = re.compile(r"^\s+")
-
- # Regular used on print
- self.re_what = re.compile(r"(\/?(?:[\w\-]+\/?){1,2})")
- self.re_escape = re.compile(r"([\.\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])")
- self.re_unprintable = re.compile(r"([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff]+)")
- self.re_title_mark = re.compile(r"\n[\-\*\=\^\~]+\n")
- self.re_doc = re.compile(r"Documentation/(?!devicetree)(\S+)\.rst")
- self.re_abi = re.compile(r"(Documentation/ABI/)([\w\/\-]+)")
- self.re_xref_node = re.compile(self.XREF)
-
- def warn(self, fdata, msg, extra=None):
- """Displays a parse error if warning is enabled"""
-
- if not self.show_warnings:
- return
-
- msg = f"{fdata.fname}:{fdata.ln}: {msg}"
- if extra:
- msg += "\n\t\t" + extra
-
- self.log.warning(msg)
-
- def add_symbol(self, what, fname, ln=None, xref=None):
- """Create a reference table describing where each 'what' is located"""
-
- if what not in self.what_symbols:
- self.what_symbols[what] = {"file": {}}
-
- if fname not in self.what_symbols[what]["file"]:
- self.what_symbols[what]["file"][fname] = []
-
- if ln and ln not in self.what_symbols[what]["file"][fname]:
- self.what_symbols[what]["file"][fname].append(ln)
-
- if xref:
- self.what_symbols[what]["xref"] = xref
-
- def _parse_line(self, fdata, line):
- """Parse a single line of an ABI file"""
-
- new_what = False
- new_tag = False
- content = None
-
- match = self.re_tag.match(line)
- if match:
- new = match.group(1).lower()
- sep = match.group(2)
- content = match.group(3)
-
- match = self.re_valid.search(new)
- if match:
- new_tag = match.group(1)
- else:
- if fdata.tag == "description":
- # New "tag" is actually part of description.
- # Don't consider it a tag
- new_tag = False
- elif fdata.tag != "":
- self.warn(fdata, f"tag '{fdata.tag}' is invalid", line)
-
- if new_tag:
- # "where" is Invalid, but was a common mistake. Warn if found
- if new_tag == "where":
- self.warn(fdata, "tag 'Where' is invalid. Should be 'What:' instead")
- new_tag = "what"
-
- if new_tag == "what":
- fdata.space = None
-
- if content not in self.what_symbols:
- self.add_symbol(what=content, fname=fdata.fname, ln=fdata.ln)
-
- if fdata.tag == "what":
- fdata.what.append(content.strip("\n"))
- else:
- if fdata.key:
- if "description" not in self.data.get(fdata.key, {}):
- self.warn(fdata, f"{fdata.key} doesn't have a description")
-
- for w in fdata.what:
- self.add_symbol(what=w, fname=fdata.fname,
- ln=fdata.what_ln, xref=fdata.key)
-
- fdata.label = content
- new_what = True
-
- key = "abi_" + content.lower()
- fdata.key = self.re_unprintable.sub("_", key).strip("_")
-
- # Avoid duplicated keys but using a defined seed, to make
- # the namespace identical if there aren't changes at the
- # ABI symbols
- seed(42)
-
- while fdata.key in self.data:
- char = randrange(0, 51) + ord("A")
- if char > ord("Z"):
- char += ord("a") - ord("Z") - 1
-
- fdata.key += chr(char)
-
- if fdata.key and fdata.key not in self.data:
- self.data[fdata.key] = {
- "what": [content],
- "file": [fdata.file_ref],
- "path": fdata.ftype,
- "line_no": fdata.ln,
- }
-
- fdata.what = self.data[fdata.key]["what"]
-
- self.what_refs[content] = fdata.key
- fdata.tag = new_tag
- fdata.what_ln = fdata.ln
-
- if fdata.nametag["what"]:
- t = (content, fdata.key)
- if t not in fdata.nametag["symbols"]:
- fdata.nametag["symbols"].append(t)
-
- return
-
- if fdata.tag and new_tag:
- fdata.tag = new_tag
-
- if new_what:
- fdata.label = ""
-
- if "description" in self.data[fdata.key]:
- self.data[fdata.key]["description"] += "\n\n"
-
- if fdata.file_ref not in self.data[fdata.key]["file"]:
- self.data[fdata.key]["file"].append(fdata.file_ref)
-
- if self.debug == AbiDebug.WHAT_PARSING:
- self.log.debug("what: %s", fdata.what)
-
- if not fdata.what:
- self.warn(fdata, "'What:' should come first:", line)
- return
-
- if new_tag == "description":
- fdata.space = None
-
- if content:
- sep = sep.replace(":", " ")
-
- c = " " * len(new_tag) + sep + content
- c = c.expandtabs()
-
- match = self.re_start_spc.match(c)
- if match:
- # Preserve initial spaces for the first line
- fdata.space = match.group(1)
- content = match.group(2) + "\n"
-
- self.data[fdata.key][fdata.tag] = content
-
- return
-
- # Store any contents before tags at the database
- if not fdata.tag and "what" in fdata.nametag:
- fdata.nametag["description"] += line
- return
-
- if fdata.tag == "description":
- content = line.expandtabs()
-
- if self.re_whitespace.sub("", content) == "":
- self.data[fdata.key][fdata.tag] += "\n"
- return
-
- if fdata.space is None:
- match = self.re_start_spc.match(content)
- if match:
- # Preserve initial spaces for the first line
- fdata.space = match.group(1)
-
- content = match.group(2) + "\n"
- else:
- if content.startswith(fdata.space):
- content = content[len(fdata.space):]
-
- else:
- fdata.space = ""
-
- if fdata.tag == "what":
- w = content.strip("\n")
- if w:
- self.data[fdata.key][fdata.tag].append(w)
- else:
- self.data[fdata.key][fdata.tag] += content
- return
-
- content = line.strip()
- if fdata.tag:
- if fdata.tag == "what":
- w = content.strip("\n")
- if w:
- self.data[fdata.key][fdata.tag].append(w)
- else:
- self.data[fdata.key][fdata.tag] += "\n" + content.rstrip("\n")
- return
-
- # Everything else is error
- if content:
- self.warn(fdata, "Unexpected content", line)
-
- def parse_readme(self, nametag, fname):
- """Parse ABI README file"""
-
- nametag["what"] = ["Introduction"]
- nametag["path"] = "README"
- with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp:
- for line in fp:
- match = self.re_tag.match(line)
- if match:
- new = match.group(1).lower()
-
- match = self.re_valid.search(new)
- if match:
- nametag["description"] += "\n:" + line
- continue
-
- nametag["description"] += line
-
- def parse_file(self, fname, path, basename):
- """Parse a single file"""
-
- ref = f"abi_file_{path}_{basename}"
- ref = self.re_unprintable.sub("_", ref).strip("_")
-
- # Store per-file state into a namespace variable. This will be used
- # by the per-line parser state machine and by the warning function.
- fdata = Namespace
-
- fdata.fname = fname
- fdata.name = basename
-
- pos = fname.find(ABI_DIR)
- if pos > 0:
- f = fname[pos:]
- else:
- f = fname
-
- fdata.file_ref = (f, ref)
- self.file_refs[f] = ref
-
- fdata.ln = 0
- fdata.what_ln = 0
- fdata.tag = ""
- fdata.label = ""
- fdata.what = []
- fdata.key = None
- fdata.xrefs = None
- fdata.space = None
- fdata.ftype = path.split("/")[0]
-
- fdata.nametag = {}
- fdata.nametag["what"] = [f"ABI file {path}/{basename}"]
- fdata.nametag["type"] = "File"
- fdata.nametag["path"] = fdata.ftype
- fdata.nametag["file"] = [fdata.file_ref]
- fdata.nametag["line_no"] = 1
- fdata.nametag["description"] = ""
- fdata.nametag["symbols"] = []
-
- self.data[ref] = fdata.nametag
-
- if self.debug & AbiDebug.WHAT_OPEN:
- self.log.debug("Opening file %s", fname)
-
- if basename == "README":
- self.parse_readme(fdata.nametag, fname)
- return
-
- with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp:
- for line in fp:
- fdata.ln += 1
-
- self._parse_line(fdata, line)
-
- if "description" in fdata.nametag:
- fdata.nametag["description"] = fdata.nametag["description"].lstrip("\n")
-
- if fdata.key:
- if "description" not in self.data.get(fdata.key, {}):
- self.warn(fdata, f"{fdata.key} doesn't have a description")
-
- for w in fdata.what:
- self.add_symbol(what=w, fname=fname, xref=fdata.key)
-
- def _parse_abi(self, root=None):
- """Internal function to parse documentation ABI recursively"""
-
- if not root:
- root = self.directory
-
- with os.scandir(root) as obj:
- for entry in obj:
- name = os.path.join(root, entry.name)
-
- if entry.is_dir():
- self._parse_abi(name)
- continue
-
- if not entry.is_file():
- continue
-
- basename = os.path.basename(name)
-
- if basename.startswith("."):
- continue
-
- if basename.endswith(self.ignore_suffixes):
- continue
-
- path = self.re_abi_dir.sub("", os.path.dirname(name))
-
- self.parse_file(name, path, basename)
-
- def parse_abi(self, root=None):
- """Parse documentation ABI"""
-
- self._parse_abi(root)
-
- if self.debug & AbiDebug.DUMP_ABI_STRUCTS:
- self.log.debug(pformat(self.data))
-
- def desc_txt(self, desc):
- """Print description as found inside ABI files"""
-
- desc = desc.strip(" \t\n")
-
- return desc + "\n\n"
-
- def xref(self, fname):
- """
- Converts a Documentation/ABI + basename into a ReST cross-reference
- """
-
- xref = self.file_refs.get(fname)
- if not xref:
- return None
- else:
- return xref
-
- def desc_rst(self, desc):
- """Enrich ReST output by creating cross-references"""
-
- # Remove title markups from the description
- # Having titles inside ABI files will only work if extra
- # care would be taken in order to strictly follow the same
- # level order for each markup.
- desc = self.re_title_mark.sub("\n\n", "\n" + desc)
- desc = desc.rstrip(" \t\n").lstrip("\n")
-
- # Python's regex performance for non-compiled expressions is a lot
- # than Perl, as Perl automatically caches them at their
- # first usage. Here, we'll need to do the same, as otherwise the
- # performance penalty is be high
-
- new_desc = ""
- for d in desc.split("\n"):
- if d == "":
- new_desc += "\n"
- continue
-
- # Use cross-references for doc files where needed
- d = self.re_doc.sub(r":doc:`/\1`", d)
-
- # Use cross-references for ABI generated docs where needed
- matches = self.re_abi.findall(d)
- for m in matches:
- abi = m[0] + m[1]
-
- xref = self.file_refs.get(abi)
- if not xref:
- # This may happen if ABI is on a separate directory,
- # like parsing ABI testing and symbol is at stable.
- # The proper solution is to move this part of the code
- # for it to be inside sphinx/kernel_abi.py
- self.log.info("Didn't find ABI reference for '%s'", abi)
- else:
- new = self.re_escape.sub(r"\\\1", m[1])
- d = re.sub(fr"\b{abi}\b", f":ref:`{new} <{xref}>`", d)
-
- # Seek for cross reference symbols like /sys/...
- # Need to be careful to avoid doing it on a code block
- if d[0] not in [" ", "\t"]:
- matches = self.re_xref_node.findall(d)
- for m in matches:
- # Finding ABI here is more complex due to wildcards
- xref = self.what_refs.get(m)
- if xref:
- new = self.re_escape.sub(r"\\\1", m)
- d = re.sub(fr"\b{m}\b", f":ref:`{new} <{xref}>`", d)
-
- new_desc += d + "\n"
-
- return new_desc + "\n\n"
-
- def doc(self, output_in_txt=False, show_symbols=True, show_file=True,
- filter_path=None):
- """Print ABI at stdout"""
-
- part = None
- for key, v in sorted(self.data.items(),
- key=lambda x: (x[1].get("type", ""),
- x[1].get("what"))):
-
- wtype = v.get("type", "Symbol")
- file_ref = v.get("file")
- names = v.get("what", [""])
-
- if wtype == "File":
- if not show_file:
- continue
- else:
- if not show_symbols:
- continue
-
- if filter_path:
- if v.get("path") != filter_path:
- continue
-
- msg = ""
-
- if wtype != "File":
- cur_part = names[0]
- if cur_part.find("/") >= 0:
- match = self.re_what.match(cur_part)
- if match:
- symbol = match.group(1).rstrip("/")
- cur_part = "Symbols under " + symbol
-
- if cur_part and cur_part != part:
- part = cur_part
- msg += part + "\n"+ "-" * len(part) +"\n\n"
-
- msg += f".. _{key}:\n\n"
-
- max_len = 0
- for i in range(0, len(names)): # pylint: disable=C0200
- names[i] = "**" + self.re_escape.sub(r"\\\1", names[i]) + "**"
-
- max_len = max(max_len, len(names[i]))
-
- msg += "+-" + "-" * max_len + "-+\n"
- for name in names:
- msg += f"| {name}" + " " * (max_len - len(name)) + " |\n"
- msg += "+-" + "-" * max_len + "-+\n"
- msg += "\n"
-
- for ref in file_ref:
- if wtype == "File":
- msg += f".. _{ref[1]}:\n\n"
- else:
- base = os.path.basename(ref[0])
- msg += f"Defined on file :ref:`{base} <{ref[1]}>`\n\n"
-
- if wtype == "File":
- msg += names[0] +"\n" + "-" * len(names[0]) +"\n\n"
-
- desc = v.get("description")
- if not desc and wtype != "File":
- msg += f"DESCRIPTION MISSING for {names[0]}\n\n"
-
- if desc:
- if output_in_txt:
- msg += self.desc_txt(desc)
- else:
- msg += self.desc_rst(desc)
-
- symbols = v.get("symbols")
- if symbols:
- msg += "Has the following ABI:\n\n"
-
- for w, label in symbols:
- # Escape special chars from content
- content = self.re_escape.sub(r"\\\1", w)
-
- msg += f"- :ref:`{content} <{label}>`\n\n"
-
- users = v.get("users")
- if users and users.strip(" \t\n"):
- users = users.strip("\n").replace('\n', '\n\t')
- msg += f"Users:\n\t{users}\n\n"
-
- ln = v.get("line_no", 1)
-
- yield (msg, file_ref[0][0], ln)
-
- def check_issues(self):
- """Warn about duplicated ABI entries"""
-
- for what, v in self.what_symbols.items():
- files = v.get("file")
- if not files:
- # Should never happen if the parser works properly
- self.log.warning("%s doesn't have a file associated", what)
- continue
-
- if len(files) == 1:
- continue
-
- f = []
- for fname, lines in sorted(files.items()):
- if not lines:
- f.append(f"{fname}")
- elif len(lines) == 1:
- f.append(f"{fname}:{lines[0]}")
- else:
- m = fname + "lines "
- m += ", ".join(str(x) for x in lines)
- f.append(m)
-
- self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f))
-
- def search_symbols(self, expr):
- """ Searches for ABI symbols """
-
- regex = re.compile(expr, re.I)
-
- found_keys = 0
- for t in sorted(self.data.items(), key=lambda x: [0]):
- v = t[1]
-
- wtype = v.get("type", "")
- if wtype == "File":
- continue
-
- for what in v.get("what", [""]):
- if regex.search(what):
- found_keys += 1
-
- kernelversion = v.get("kernelversion", "").strip(" \t\n")
- date = v.get("date", "").strip(" \t\n")
- contact = v.get("contact", "").strip(" \t\n")
- users = v.get("users", "").strip(" \t\n")
- desc = v.get("description", "").strip(" \t\n")
-
- files = []
- for f in v.get("file", ()):
- files.append(f[0])
-
- what = str(found_keys) + ". " + what
- title_tag = "-" * len(what)
-
- print(f"\n{what}\n{title_tag}\n")
-
- if kernelversion:
- print(f"Kernel version:\t\t{kernelversion}")
-
- if date:
- print(f"Date:\t\t\t{date}")
-
- if contact:
- print(f"Contact:\t\t{contact}")
-
- if users:
- print(f"Users:\t\t\t{users}")
-
- print("Defined on file(s):\t" + ", ".join(files))
-
- if desc:
- desc = desc.strip("\n")
- print(f"\n{desc}\n")
-
- if not found_keys:
- print(f"Regular expression /{expr}/ not found.")
diff --git a/scripts/lib/abi/abi_regex.py b/scripts/lib/abi/abi_regex.py
deleted file mode 100644
index 8a57846cbc69..000000000000
--- a/scripts/lib/abi/abi_regex.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env python3
-# xxpylint: disable=R0903
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-# SPDX-License-Identifier: GPL-2.0
-
-"""
-Convert ABI what into regular expressions
-"""
-
-import re
-import sys
-
-from pprint import pformat
-
-from abi_parser import AbiParser
-from helpers import AbiDebug
-
-class AbiRegex(AbiParser):
- """Extends AbiParser to search ABI nodes with regular expressions"""
-
- # Escape only ASCII visible characters
- escape_symbols = r"([\x21-\x29\x2b-\x2d\x3a-\x40\x5c\x60\x7b-\x7e])"
- leave_others = "others"
-
- # Tuples with regular expressions to be compiled and replacement data
- re_whats = [
- # Drop escape characters that might exist
- (re.compile("\\\\"), ""),
-
- # Temporarily escape dot characters
- (re.compile(r"\."), "\xf6"),
-
- # Temporarily change [0-9]+ type of patterns
- (re.compile(r"\[0\-9\]\+"), "\xff"),
-
- # Temporarily change [\d+-\d+] type of patterns
- (re.compile(r"\[0\-\d+\]"), "\xff"),
- (re.compile(r"\[0:\d+\]"), "\xff"),
- (re.compile(r"\[(\d+)\]"), "\xf4\\\\d+\xf5"),
-
- # Temporarily change [0-9] type of patterns
- (re.compile(r"\[(\d)\-(\d)\]"), "\xf4\1-\2\xf5"),
-
- # Handle multiple option patterns
- (re.compile(r"[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]"), r"(\1|\2)"),
-
- # Handle wildcards
- (re.compile(r"([^\/])\*"), "\\1\\\\w\xf7"),
- (re.compile(r"/\*/"), "/.*/"),
- (re.compile(r"/\xf6\xf6\xf6"), "/.*"),
- (re.compile(r"\<[^\>]+\>"), "\\\\w\xf7"),
- (re.compile(r"\{[^\}]+\}"), "\\\\w\xf7"),
- (re.compile(r"\[[^\]]+\]"), "\\\\w\xf7"),
-
- (re.compile(r"XX+"), "\\\\w\xf7"),
- (re.compile(r"([^A-Z])[XYZ]([^A-Z])"), "\\1\\\\w\xf7\\2"),
- (re.compile(r"([^A-Z])[XYZ]$"), "\\1\\\\w\xf7"),
- (re.compile(r"_[AB]_"), "_\\\\w\xf7_"),
-
- # Recover [0-9] type of patterns
- (re.compile(r"\xf4"), "["),
- (re.compile(r"\xf5"), "]"),
-
- # Remove duplicated spaces
- (re.compile(r"\s+"), r" "),
-
- # Special case: drop comparison as in:
- # What: foo = <something>
- # (this happens on a few IIO definitions)
- (re.compile(r"\s*\=.*$"), ""),
-
- # Escape all other symbols
- (re.compile(escape_symbols), r"\\\1"),
- (re.compile(r"\\\\"), r"\\"),
- (re.compile(r"\\([\[\]\(\)\|])"), r"\1"),
- (re.compile(r"(\d+)\\(-\d+)"), r"\1\2"),
-
- (re.compile(r"\xff"), r"\\d+"),
-
- # Special case: IIO ABI which a parenthesis.
- (re.compile(r"sqrt(.*)"), r"sqrt(.*)"),
-
- # Simplify regexes with multiple .*
- (re.compile(r"(?:\.\*){2,}"), ""),
-
- # Recover dot characters
- (re.compile(r"\xf6"), "\\."),
- # Recover plus characters
- (re.compile(r"\xf7"), "+"),
- ]
- re_has_num = re.compile(r"\\d")
-
- # Symbol name after escape_chars that are considered a devnode basename
- re_symbol_name = re.compile(r"(\w|\\[\.\-\:])+$")
-
- # List of popular group names to be skipped to minimize regex group size
- # Use AbiDebug.SUBGROUP_SIZE to detect those
- skip_names = set(["devices", "hwmon"])
-
- def regex_append(self, what, new):
- """
- Get a search group for a subset of regular expressions.
-
- As ABI may have thousands of symbols, using a for to search all
- regular expressions is at least O(n^2). When there are wildcards,
- the complexity increases substantially, eventually becoming exponential.
-
- To avoid spending too much time on them, use a logic to split
- them into groups. The smaller the group, the better, as it would
- mean that searches will be confined to a small number of regular
- expressions.
-
- The conversion to a regex subset is tricky, as we need something
- that can be easily obtained from the sysfs symbol and from the
- regular expression. So, we need to discard nodes that have
- wildcards.
-
- If it can't obtain a subgroup, place the regular expression inside
- a special group (self.leave_others).
- """
-
- search_group = None
-
- for search_group in reversed(new.split("/")):
- if not search_group or search_group in self.skip_names:
- continue
- if self.re_symbol_name.match(search_group):
- break
-
- if not search_group:
- search_group = self.leave_others
-
- if self.debug & AbiDebug.SUBGROUP_MAP:
- self.log.debug("%s: mapped as %s", what, search_group)
-
- try:
- if search_group not in self.regex_group:
- self.regex_group[search_group] = []
-
- self.regex_group[search_group].append(re.compile(new))
- if self.search_string:
- if what.find(self.search_string) >= 0:
- print(f"What: {what}")
- except re.PatternError:
- self.log.warning("Ignoring '%s' as it produced an invalid regex:\n"
- " '%s'", what, new)
-
- def get_regexes(self, what):
- """
- Given an ABI devnode, return a list of all regular expressions that
- may match it, based on the sub-groups created by regex_append()
- """
-
- re_list = []
-
- patches = what.split("/")
- patches.reverse()
- patches.append(self.leave_others)
-
- for search_group in patches:
- if search_group in self.regex_group:
- re_list += self.regex_group[search_group]
-
- return re_list
-
- def __init__(self, *args, **kwargs):
- """
- Override init method to get verbose argument
- """
-
- self.regex_group = None
- self.search_string = None
- self.re_string = None
-
- if "search_string" in kwargs:
- self.search_string = kwargs.get("search_string")
- del kwargs["search_string"]
-
- if self.search_string:
-
- try:
- self.re_string = re.compile(self.search_string)
- except re.PatternError as e:
- msg = f"{self.search_string} is not a valid regular expression"
- raise ValueError(msg) from e
-
- super().__init__(*args, **kwargs)
-
- def parse_abi(self, *args, **kwargs):
-
- super().parse_abi(*args, **kwargs)
-
- self.regex_group = {}
-
- print("Converting ABI What fields into regexes...", file=sys.stderr)
-
- for t in sorted(self.data.items(), key=lambda x: x[0]):
- v = t[1]
- if v.get("type") == "File":
- continue
-
- v["regex"] = []
-
- for what in v.get("what", []):
- if not what.startswith("/sys"):
- continue
-
- new = what
- for r, s in self.re_whats:
- try:
- new = r.sub(s, new)
- except re.PatternError as e:
- # Help debugging troubles with new regexes
- raise re.PatternError(f"{e}\nwhile re.sub('{r.pattern}', {s}, str)") from e
-
- v["regex"].append(new)
-
- if self.debug & AbiDebug.REGEX:
- self.log.debug("%-90s <== %s", new, what)
-
- # Store regex into a subgroup to speedup searches
- self.regex_append(what, new)
-
- if self.debug & AbiDebug.SUBGROUP_DICT:
- self.log.debug("%s", pformat(self.regex_group))
-
- if self.debug & AbiDebug.SUBGROUP_SIZE:
- biggestd_keys = sorted(self.regex_group.keys(),
- key= lambda k: len(self.regex_group[k]),
- reverse=True)
-
- print("Top regex subgroups:", file=sys.stderr)
- for k in biggestd_keys[:10]:
- print(f"{k} has {len(self.regex_group[k])} elements", file=sys.stderr)
diff --git a/scripts/lib/abi/helpers.py b/scripts/lib/abi/helpers.py
deleted file mode 100644
index 639b23e4ca33..000000000000
--- a/scripts/lib/abi/helpers.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python3
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-# pylint: disable=R0903
-# SPDX-License-Identifier: GPL-2.0
-
-"""
-Helper classes for ABI parser
-"""
-
-ABI_DIR = "Documentation/ABI/"
-
-
-class AbiDebug:
- """Debug levels"""
-
- WHAT_PARSING = 1
- WHAT_OPEN = 2
- DUMP_ABI_STRUCTS = 4
- UNDEFINED = 8
- REGEX = 16
- SUBGROUP_MAP = 32
- SUBGROUP_DICT = 64
- SUBGROUP_SIZE = 128
- GRAPH = 256
-
-
-DEBUG_HELP = """
-1 - enable debug parsing logic
-2 - enable debug messages on file open
-4 - enable debug for ABI parse data
-8 - enable extra debug information to identify troubles
- with ABI symbols found at the local machine that
- weren't found on ABI documentation (used only for
- undefined subcommand)
-16 - enable debug for what to regex conversion
-32 - enable debug for symbol regex subgroups
-64 - enable debug for sysfs graph tree variable
-"""
diff --git a/scripts/lib/abi/system_symbols.py b/scripts/lib/abi/system_symbols.py
deleted file mode 100644
index f15c94a6e33c..000000000000
--- a/scripts/lib/abi/system_symbols.py
+++ /dev/null
@@ -1,378 +0,0 @@
-#!/usr/bin/env python3
-# pylint: disable=R0902,R0912,R0914,R0915,R1702
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-# SPDX-License-Identifier: GPL-2.0
-
-"""
-Parse ABI documentation and produce results from it.
-"""
-
-import os
-import re
-import sys
-
-from concurrent import futures
-from datetime import datetime
-from random import shuffle
-
-from helpers import AbiDebug
-
-class SystemSymbols:
- """Stores arguments for the class and initialize class vars"""
-
- def graph_add_file(self, path, link=None):
- """
- add a file path to the sysfs graph stored at self.root
- """
-
- if path in self.files:
- return
-
- name = ""
- ref = self.root
- for edge in path.split("/"):
- name += edge + "/"
- if edge not in ref:
- ref[edge] = {"__name": [name.rstrip("/")]}
-
- ref = ref[edge]
-
- if link and link not in ref["__name"]:
- ref["__name"].append(link.rstrip("/"))
-
- self.files.add(path)
-
- def print_graph(self, root_prefix="", root=None, level=0):
- """Prints a reference tree graph using UTF-8 characters"""
-
- if not root:
- root = self.root
- level = 0
-
- # Prevent endless traverse
- if level > 5:
- return
-
- if level > 0:
- prefix = "├──"
- last_prefix = "└──"
- else:
- prefix = ""
- last_prefix = ""
-
- items = list(root.items())
-
- names = root.get("__name", [])
- for k, edge in items:
- if k == "__name":
- continue
-
- if not k:
- k = "/"
-
- if len(names) > 1:
- k += " links: " + ",".join(names[1:])
-
- if edge == items[-1][1]:
- print(root_prefix + last_prefix + k)
- p = root_prefix
- if level > 0:
- p += " "
- self.print_graph(p, edge, level + 1)
- else:
- print(root_prefix + prefix + k)
- p = root_prefix + "│ "
- self.print_graph(p, edge, level + 1)
-
- def _walk(self, root):
- """
- Walk through sysfs to get all devnodes that aren't ignored.
-
- By default, uses /sys as sysfs mounting point. If another
- directory is used, it replaces them to /sys at the patches.
- """
-
- with os.scandir(root) as obj:
- for entry in obj:
- path = os.path.join(root, entry.name)
- if self.sysfs:
- p = path.replace(self.sysfs, "/sys", count=1)
- else:
- p = path
-
- if self.re_ignore.search(p):
- return
-
- # Handle link first to avoid directory recursion
- if entry.is_symlink():
- real = os.path.realpath(path)
- if not self.sysfs:
- self.aliases[path] = real
- else:
- real = real.replace(self.sysfs, "/sys", count=1)
-
- # Add absfile location to graph if it doesn't exist
- if not self.re_ignore.search(real):
- # Add link to the graph
- self.graph_add_file(real, p)
-
- elif entry.is_file():
- self.graph_add_file(p)
-
- elif entry.is_dir():
- self._walk(path)
-
- def __init__(self, abi, sysfs="/sys", hints=False):
- """
- Initialize internal variables and get a list of all files inside
- sysfs that can currently be parsed.
-
- Please notice that there are several entries on sysfs that aren't
- documented as ABI. Ignore those.
-
- The real paths will be stored under self.files. Aliases will be
- stored in separate, as self.aliases.
- """
-
- self.abi = abi
- self.log = abi.log
-
- if sysfs != "/sys":
- self.sysfs = sysfs.rstrip("/")
- else:
- self.sysfs = None
-
- self.hints = hints
-
- self.root = {}
- self.aliases = {}
- self.files = set()
-
- dont_walk = [
- # Those require root access and aren't documented at ABI
- f"^{sysfs}/kernel/debug",
- f"^{sysfs}/kernel/tracing",
- f"^{sysfs}/fs/pstore",
- f"^{sysfs}/fs/bpf",
- f"^{sysfs}/fs/fuse",
-
- # This is not documented at ABI
- f"^{sysfs}/module",
-
- f"^{sysfs}/fs/cgroup", # this is big and has zero docs under ABI
- f"^{sysfs}/firmware", # documented elsewhere: ACPI, DT bindings
- "sections|notes", # aren't actually part of ABI
-
- # kernel-parameters.txt - not easy to parse
- "parameters",
- ]
-
- self.re_ignore = re.compile("|".join(dont_walk))
-
- print(f"Reading {sysfs} directory contents...", file=sys.stderr)
- self._walk(sysfs)
-
- def check_file(self, refs, found):
- """Check missing ABI symbols for a given sysfs file"""
-
- res_list = []
-
- try:
- for names in refs:
- fname = names[0]
-
- res = {
- "found": False,
- "fname": fname,
- "msg": "",
- }
- res_list.append(res)
-
- re_what = self.abi.get_regexes(fname)
- if not re_what:
- self.abi.log.warning(f"missing rules for {fname}")
- continue
-
- for name in names:
- for r in re_what:
- if self.abi.debug & AbiDebug.UNDEFINED:
- self.log.debug("check if %s matches '%s'", name, r.pattern)
- if r.match(name):
- res["found"] = True
- if found:
- res["msg"] += f" {fname}: regex:\n\t"
- continue
-
- if self.hints and not res["found"]:
- res["msg"] += f" {fname} not found. Tested regexes:\n"
- for r in re_what:
- res["msg"] += " " + r.pattern + "\n"
-
- except KeyboardInterrupt:
- pass
-
- return res_list
-
- def _ref_interactor(self, root):
- """Recursive function to interact over the sysfs tree"""
-
- for k, v in root.items():
- if isinstance(v, dict):
- yield from self._ref_interactor(v)
-
- if root == self.root or k == "__name":
- continue
-
- if self.abi.re_string:
- fname = v["__name"][0]
- if self.abi.re_string.search(fname):
- yield v
- else:
- yield v
-
-
- def get_fileref(self, all_refs, chunk_size):
- """Interactor to group refs into chunks"""
-
- n = 0
- refs = []
-
- for ref in all_refs:
- refs.append(ref)
-
- n += 1
- if n >= chunk_size:
- yield refs
- n = 0
- refs = []
-
- yield refs
-
- def check_undefined_symbols(self, max_workers=None, chunk_size=50,
- found=None, dry_run=None):
- """Seach ABI for sysfs symbols missing documentation"""
-
- self.abi.parse_abi()
-
- if self.abi.debug & AbiDebug.GRAPH:
- self.print_graph()
-
- all_refs = []
- for ref in self._ref_interactor(self.root):
- all_refs.append(ref["__name"])
-
- if dry_run:
- print("Would check", file=sys.stderr)
- for ref in all_refs:
- print(", ".join(ref))
-
- return
-
- print("Starting to search symbols (it may take several minutes):",
- file=sys.stderr)
- start = datetime.now()
- old_elapsed = None
-
- # Python doesn't support multithreading due to limitations on its
- # global lock (GIL). While Python 3.13 finally made GIL optional,
- # there are still issues related to it. Also, we want to have
- # backward compatibility with older versions of Python.
- #
- # So, use instead multiprocess. However, Python is very slow passing
- # data from/to multiple processes. Also, it may consume lots of memory
- # if the data to be shared is not small. So, we need to group workload
- # in chunks that are big enough to generate performance gains while
- # not being so big that would cause out-of-memory.
-
- num_refs = len(all_refs)
- print(f"Number of references to parse: {num_refs}", file=sys.stderr)
-
- if not max_workers:
- max_workers = os.cpu_count()
- elif max_workers > os.cpu_count():
- max_workers = os.cpu_count()
-
- max_workers = max(max_workers, 1)
-
- max_chunk_size = int((num_refs + max_workers - 1) / max_workers)
- chunk_size = min(chunk_size, max_chunk_size)
- chunk_size = max(1, chunk_size)
-
- if max_workers > 1:
- executor = futures.ProcessPoolExecutor
-
- # Place references in a random order. This may help improving
- # performance, by mixing complex/simple expressions when creating
- # chunks
- shuffle(all_refs)
- else:
- # Python has a high overhead with processes. When there's just
- # one worker, it is faster to not create a new process.
- # Yet, User still deserves to have a progress print. So, use
- # python's "thread", which is actually a single process, using
- # an internal schedule to switch between tasks. No performance
- # gains for non-IO tasks, but still it can be quickly interrupted
- # from time to time to display progress.
- executor = futures.ThreadPoolExecutor
-
- not_found = []
- f_list = []
- with executor(max_workers=max_workers) as exe:
- for refs in self.get_fileref(all_refs, chunk_size):
- if refs:
- try:
- f_list.append(exe.submit(self.check_file, refs, found))
-
- except KeyboardInterrupt:
- return
-
- total = len(f_list)
-
- if not total:
- if self.abi.re_string:
- print(f"No ABI symbol matches {self.abi.search_string}")
- else:
- self.abi.log.warning("No ABI symbols found")
- return
-
- print(f"{len(f_list):6d} jobs queued on {max_workers} workers",
- file=sys.stderr)
-
- while f_list:
- try:
- t = futures.wait(f_list, timeout=1,
- return_when=futures.FIRST_COMPLETED)
-
- done = t[0]
-
- for fut in done:
- res_list = fut.result()
-
- for res in res_list:
- if not res["found"]:
- not_found.append(res["fname"])
- if res["msg"]:
- print(res["msg"])
-
- f_list.remove(fut)
- except KeyboardInterrupt:
- return
-
- except RuntimeError as e:
- self.abi.log.warning(f"Future: {e}")
- break
-
- if sys.stderr.isatty():
- elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0]
- if len(f_list) < total:
- elapsed += f" ({total - len(f_list)}/{total} jobs completed). "
- if elapsed != old_elapsed:
- print(elapsed + "\r", end="", flush=True,
- file=sys.stderr)
- old_elapsed = elapsed
-
- elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0]
- print(elapsed, file=sys.stderr)
-
- for f in sorted(not_found):
- print(f"{f} not found.")
diff --git a/scripts/lib/kdoc/kdoc_files.py b/scripts/lib/kdoc/kdoc_files.py
deleted file mode 100644
index 9e09b45b02fa..000000000000
--- a/scripts/lib/kdoc/kdoc_files.py
+++ /dev/null
@@ -1,291 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-#
-# pylint: disable=R0903,R0913,R0914,R0917
-
-"""
-Parse lernel-doc tags on multiple kernel source files.
-"""
-
-import argparse
-import logging
-import os
-import re
-
-from kdoc_parser import KernelDoc
-from kdoc_output import OutputFormat
-
-
-class GlobSourceFiles:
- """
- Parse C source code file names and directories via an Interactor.
- """
-
- def __init__(self, srctree=None, valid_extensions=None):
- """
- Initialize valid extensions with a tuple.
-
- If not defined, assume default C extensions (.c and .h)
-
- It would be possible to use python's glob function, but it is
- very slow, and it is not interactive. So, it would wait to read all
- directories before actually do something.
-
- So, let's use our own implementation.
- """
-
- if not valid_extensions:
- self.extensions = (".c", ".h")
- else:
- self.extensions = valid_extensions
-
- self.srctree = srctree
-
- def _parse_dir(self, dirname):
- """Internal function to parse files recursively"""
-
- with os.scandir(dirname) as obj:
- for entry in obj:
- name = os.path.join(dirname, entry.name)
-
- if entry.is_dir():
- yield from self._parse_dir(name)
-
- if not entry.is_file():
- continue
-
- basename = os.path.basename(name)
-
- if not basename.endswith(self.extensions):
- continue
-
- yield name
-
- def parse_files(self, file_list, file_not_found_cb):
- """
- Define an interator to parse all source files from file_list,
- handling directories if any
- """
-
- if not file_list:
- return
-
- for fname in file_list:
- if self.srctree:
- f = os.path.join(self.srctree, fname)
- else:
- f = fname
-
- if os.path.isdir(f):
- yield from self._parse_dir(f)
- elif os.path.isfile(f):
- yield f
- elif file_not_found_cb:
- file_not_found_cb(fname)
-
-
-class KernelFiles():
- """
- Parse kernel-doc tags on multiple kernel source files.
-
- There are two type of parsers defined here:
- - self.parse_file(): parses both kernel-doc markups and
- EXPORT_SYMBOL* macros;
- - self.process_export_file(): parses only EXPORT_SYMBOL* macros.
- """
-
- def warning(self, msg):
- """Ancillary routine to output a warning and increment error count"""
-
- self.config.log.warning(msg)
- self.errors += 1
-
- def error(self, msg):
- """Ancillary routine to output an error and increment error count"""
-
- self.config.log.error(msg)
- self.errors += 1
-
- def parse_file(self, fname):
- """
- Parse a single Kernel source.
- """
-
- # Prevent parsing the same file twice if results are cached
- if fname in self.files:
- return
-
- doc = KernelDoc(self.config, fname)
- export_table, entries = doc.parse_kdoc()
-
- self.export_table[fname] = export_table
-
- self.files.add(fname)
- self.export_files.add(fname) # parse_kdoc() already check exports
-
- self.results[fname] = entries
-
- def process_export_file(self, fname):
- """
- Parses EXPORT_SYMBOL* macros from a single Kernel source file.
- """
-
- # Prevent parsing the same file twice if results are cached
- if fname in self.export_files:
- return
-
- doc = KernelDoc(self.config, fname)
- export_table = doc.parse_export()
-
- if not export_table:
- self.error(f"Error: Cannot check EXPORT_SYMBOL* on {fname}")
- export_table = set()
-
- self.export_table[fname] = export_table
- self.export_files.add(fname)
-
- def file_not_found_cb(self, fname):
- """
- Callback to warn if a file was not found.
- """
-
- self.error(f"Cannot find file {fname}")
-
- def __init__(self, verbose=False, out_style=None,
- werror=False, wreturn=False, wshort_desc=False,
- wcontents_before_sections=False,
- logger=None):
- """
- Initialize startup variables and parse all files
- """
-
- if not verbose:
- verbose = bool(os.environ.get("KBUILD_VERBOSE", 0))
-
- if out_style is None:
- out_style = OutputFormat()
-
- if not werror:
- kcflags = os.environ.get("KCFLAGS", None)
- if kcflags:
- match = re.search(r"(\s|^)-Werror(\s|$)/", kcflags)
- if match:
- werror = True
-
- # reading this variable is for backwards compat just in case
- # someone was calling it with the variable from outside the
- # kernel's build system
- kdoc_werror = os.environ.get("KDOC_WERROR", None)
- if kdoc_werror:
- werror = kdoc_werror
-
- # Some variables are global to the parser logic as a whole as they are
- # used to send control configuration to KernelDoc class. As such,
- # those variables are read-only inside the KernelDoc.
- self.config = argparse.Namespace
-
- self.config.verbose = verbose
- self.config.werror = werror
- self.config.wreturn = wreturn
- self.config.wshort_desc = wshort_desc
- self.config.wcontents_before_sections = wcontents_before_sections
-
- if not logger:
- self.config.log = logging.getLogger("kernel-doc")
- else:
- self.config.log = logger
-
- self.config.warning = self.warning
-
- self.config.src_tree = os.environ.get("SRCTREE", None)
-
- # Initialize variables that are internal to KernelFiles
-
- self.out_style = out_style
-
- self.errors = 0
- self.results = {}
-
- self.files = set()
- self.export_files = set()
- self.export_table = {}
-
- def parse(self, file_list, export_file=None):
- """
- Parse all files
- """
-
- glob = GlobSourceFiles(srctree=self.config.src_tree)
-
- for fname in glob.parse_files(file_list, self.file_not_found_cb):
- self.parse_file(fname)
-
- for fname in glob.parse_files(export_file, self.file_not_found_cb):
- self.process_export_file(fname)
-
- def out_msg(self, fname, name, arg):
- """
- Return output messages from a file name using the output style
- filtering.
-
- If output type was not handled by the syler, return None.
- """
-
- # NOTE: we can add rules here to filter out unwanted parts,
- # although OutputFormat.msg already does that.
-
- return self.out_style.msg(fname, name, arg)
-
- def msg(self, enable_lineno=False, export=False, internal=False,
- symbol=None, nosymbol=None, no_doc_sections=False,
- filenames=None, export_file=None):
- """
- Interacts over the kernel-doc results and output messages,
- returning kernel-doc markups on each interaction
- """
-
- self.out_style.set_config(self.config)
-
- if not filenames:
- filenames = sorted(self.results.keys())
-
- glob = GlobSourceFiles(srctree=self.config.src_tree)
-
- for fname in filenames:
- function_table = set()
-
- if internal or export:
- if not export_file:
- export_file = [fname]
-
- for f in glob.parse_files(export_file, self.file_not_found_cb):
- function_table |= self.export_table[f]
-
- if symbol:
- for s in symbol:
- function_table.add(s)
-
- self.out_style.set_filter(export, internal, symbol, nosymbol,
- function_table, enable_lineno,
- no_doc_sections)
-
- msg = ""
- if fname not in self.results:
- self.config.log.warning("No kernel-doc for file %s", fname)
- continue
-
- for arg in self.results[fname]:
- m = self.out_msg(fname, arg.name, arg)
-
- if m is None:
- ln = arg.get("ln", 0)
- dtype = arg.get('type', "")
-
- self.config.log.warning("%s:%d Can't handle %s",
- fname, ln, dtype)
- else:
- msg += m
-
- if msg:
- yield fname, msg
diff --git a/scripts/lib/kdoc/kdoc_item.py b/scripts/lib/kdoc/kdoc_item.py
deleted file mode 100644
index b3b225764550..000000000000
--- a/scripts/lib/kdoc/kdoc_item.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# A class that will, eventually, encapsulate all of the parsed data that we
-# then pass into the output modules.
-#
-
-class KdocItem:
- def __init__(self, name, type, start_line, **other_stuff):
- self.name = name
- self.type = type
- self.declaration_start_line = start_line
- self.sections = {}
- self.sections_start_lines = {}
- self.parameterlist = []
- self.parameterdesc_start_lines = []
- self.parameterdescs = {}
- self.parametertypes = {}
- #
- # Just save everything else into our own dict so that the output
- # side can grab it directly as before. As we move things into more
- # structured data, this will, hopefully, fade away.
- #
- self.other_stuff = other_stuff
-
- def get(self, key, default = None):
- return self.other_stuff.get(key, default)
-
- def __getitem__(self, key):
- return self.get(key)
-
- #
- # Tracking of section and parameter information.
- #
- def set_sections(self, sections, start_lines):
- self.sections = sections
- self.section_start_lines = start_lines
-
- def set_params(self, names, descs, types, starts):
- self.parameterlist = names
- self.parameterdescs = descs
- self.parametertypes = types
- self.parameterdesc_start_lines = starts
diff --git a/scripts/lib/kdoc/kdoc_output.py b/scripts/lib/kdoc/kdoc_output.py
deleted file mode 100644
index ea8914537ba0..000000000000
--- a/scripts/lib/kdoc/kdoc_output.py
+++ /dev/null
@@ -1,749 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-#
-# pylint: disable=C0301,R0902,R0911,R0912,R0913,R0914,R0915,R0917
-
-"""
-Implement output filters to print kernel-doc documentation.
-
-The implementation uses a virtual base class (OutputFormat) which
-contains a dispatches to virtual methods, and some code to filter
-out output messages.
-
-The actual implementation is done on one separate class per each type
-of output. Currently, there are output classes for ReST and man/troff.
-"""
-
-import os
-import re
-from datetime import datetime
-
-from kdoc_parser import KernelDoc, type_param
-from kdoc_re import KernRe
-
-
-function_pointer = KernRe(r"([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)", cache=False)
-
-# match expressions used to find embedded type information
-type_constant = KernRe(r"\b``([^\`]+)``\b", cache=False)
-type_constant2 = KernRe(r"\%([-_*\w]+)", cache=False)
-type_func = KernRe(r"(\w+)\(\)", cache=False)
-type_param_ref = KernRe(r"([\!~\*]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
-
-# Special RST handling for func ptr params
-type_fp_param = KernRe(r"\@(\w+)\(\)", cache=False)
-
-# Special RST handling for structs with func ptr params
-type_fp_param2 = KernRe(r"\@(\w+->\S+)\(\)", cache=False)
-
-type_env = KernRe(r"(\$\w+)", cache=False)
-type_enum = KernRe(r"\&(enum\s*([_\w]+))", cache=False)
-type_struct = KernRe(r"\&(struct\s*([_\w]+))", cache=False)
-type_typedef = KernRe(r"\&(typedef\s*([_\w]+))", cache=False)
-type_union = KernRe(r"\&(union\s*([_\w]+))", cache=False)
-type_member = KernRe(r"\&([_\w]+)(\.|->)([_\w]+)", cache=False)
-type_fallback = KernRe(r"\&([_\w]+)", cache=False)
-type_member_func = type_member + KernRe(r"\(\)", cache=False)
-
-
-class OutputFormat:
- """
- Base class for OutputFormat. If used as-is, it means that only
- warnings will be displayed.
- """
-
- # output mode.
- OUTPUT_ALL = 0 # output all symbols and doc sections
- OUTPUT_INCLUDE = 1 # output only specified symbols
- OUTPUT_EXPORTED = 2 # output exported symbols
- OUTPUT_INTERNAL = 3 # output non-exported symbols
-
- # Virtual member to be overriden at the inherited classes
- highlights = []
-
- def __init__(self):
- """Declare internal vars and set mode to OUTPUT_ALL"""
-
- self.out_mode = self.OUTPUT_ALL
- self.enable_lineno = None
- self.nosymbol = {}
- self.symbol = None
- self.function_table = None
- self.config = None
- self.no_doc_sections = False
-
- self.data = ""
-
- def set_config(self, config):
- """
- Setup global config variables used by both parser and output.
- """
-
- self.config = config
-
- def set_filter(self, export, internal, symbol, nosymbol, function_table,
- enable_lineno, no_doc_sections):
- """
- Initialize filter variables according with the requested mode.
-
- Only one choice is valid between export, internal and symbol.
-
- The nosymbol filter can be used on all modes.
- """
-
- self.enable_lineno = enable_lineno
- self.no_doc_sections = no_doc_sections
- self.function_table = function_table
-
- if symbol:
- self.out_mode = self.OUTPUT_INCLUDE
- elif export:
- self.out_mode = self.OUTPUT_EXPORTED
- elif internal:
- self.out_mode = self.OUTPUT_INTERNAL
- else:
- self.out_mode = self.OUTPUT_ALL
-
- if nosymbol:
- self.nosymbol = set(nosymbol)
-
-
- def highlight_block(self, block):
- """
- Apply the RST highlights to a sub-block of text.
- """
-
- for r, sub in self.highlights:
- block = r.sub(sub, block)
-
- return block
-
- def out_warnings(self, args):
- """
- Output warnings for identifiers that will be displayed.
- """
-
- for log_msg in args.warnings:
- self.config.warning(log_msg)
-
- def check_doc(self, name, args):
- """Check if DOC should be output"""
-
- if self.no_doc_sections:
- return False
-
- if name in self.nosymbol:
- return False
-
- if self.out_mode == self.OUTPUT_ALL:
- self.out_warnings(args)
- return True
-
- if self.out_mode == self.OUTPUT_INCLUDE:
- if name in self.function_table:
- self.out_warnings(args)
- return True
-
- return False
-
- def check_declaration(self, dtype, name, args):
- """
- Checks if a declaration should be output or not based on the
- filtering criteria.
- """
-
- if name in self.nosymbol:
- return False
-
- if self.out_mode == self.OUTPUT_ALL:
- self.out_warnings(args)
- return True
-
- if self.out_mode in [self.OUTPUT_INCLUDE, self.OUTPUT_EXPORTED]:
- if name in self.function_table:
- return True
-
- if self.out_mode == self.OUTPUT_INTERNAL:
- if dtype != "function":
- self.out_warnings(args)
- return True
-
- if name not in self.function_table:
- self.out_warnings(args)
- return True
-
- return False
-
- def msg(self, fname, name, args):
- """
- Handles a single entry from kernel-doc parser
- """
-
- self.data = ""
-
- dtype = args.type
-
- if dtype == "doc":
- self.out_doc(fname, name, args)
- return self.data
-
- if not self.check_declaration(dtype, name, args):
- return self.data
-
- if dtype == "function":
- self.out_function(fname, name, args)
- return self.data
-
- if dtype == "enum":
- self.out_enum(fname, name, args)
- return self.data
-
- if dtype == "typedef":
- self.out_typedef(fname, name, args)
- return self.data
-
- if dtype in ["struct", "union"]:
- self.out_struct(fname, name, args)
- return self.data
-
- # Warn if some type requires an output logic
- self.config.log.warning("doesn't now how to output '%s' block",
- dtype)
-
- return None
-
- # Virtual methods to be overridden by inherited classes
- # At the base class, those do nothing.
- def out_doc(self, fname, name, args):
- """Outputs a DOC block"""
-
- def out_function(self, fname, name, args):
- """Outputs a function"""
-
- def out_enum(self, fname, name, args):
- """Outputs an enum"""
-
- def out_typedef(self, fname, name, args):
- """Outputs a typedef"""
-
- def out_struct(self, fname, name, args):
- """Outputs a struct"""
-
-
-class RestFormat(OutputFormat):
- """Consts and functions used by ReST output"""
-
- highlights = [
- (type_constant, r"``\1``"),
- (type_constant2, r"``\1``"),
-
- # Note: need to escape () to avoid func matching later
- (type_member_func, r":c:type:`\1\2\3\\(\\) <\1>`"),
- (type_member, r":c:type:`\1\2\3 <\1>`"),
- (type_fp_param, r"**\1\\(\\)**"),
- (type_fp_param2, r"**\1\\(\\)**"),
- (type_func, r"\1()"),
- (type_enum, r":c:type:`\1 <\2>`"),
- (type_struct, r":c:type:`\1 <\2>`"),
- (type_typedef, r":c:type:`\1 <\2>`"),
- (type_union, r":c:type:`\1 <\2>`"),
-
- # in rst this can refer to any type
- (type_fallback, r":c:type:`\1`"),
- (type_param_ref, r"**\1\2**")
- ]
- blankline = "\n"
-
- sphinx_literal = KernRe(r'^[^.].*::$', cache=False)
- sphinx_cblock = KernRe(r'^\.\.\ +code-block::', cache=False)
-
- def __init__(self):
- """
- Creates class variables.
-
- Not really mandatory, but it is a good coding style and makes
- pylint happy.
- """
-
- super().__init__()
- self.lineprefix = ""
-
- def print_lineno(self, ln):
- """Outputs a line number"""
-
- if self.enable_lineno and ln is not None:
- ln += 1
- self.data += f".. LINENO {ln}\n"
-
- def output_highlight(self, args):
- """
- Outputs a C symbol that may require being converted to ReST using
- the self.highlights variable
- """
-
- input_text = args
- output = ""
- in_literal = False
- litprefix = ""
- block = ""
-
- for line in input_text.strip("\n").split("\n"):
-
- # If we're in a literal block, see if we should drop out of it.
- # Otherwise, pass the line straight through unmunged.
- if in_literal:
- if line.strip(): # If the line is not blank
- # If this is the first non-blank line in a literal block,
- # figure out the proper indent.
- if not litprefix:
- r = KernRe(r'^(\s*)')
- if r.match(line):
- litprefix = '^' + r.group(1)
- else:
- litprefix = ""
-
- output += line + "\n"
- elif not KernRe(litprefix).match(line):
- in_literal = False
- else:
- output += line + "\n"
- else:
- output += line + "\n"
-
- # Not in a literal block (or just dropped out)
- if not in_literal:
- block += line + "\n"
- if self.sphinx_literal.match(line) or self.sphinx_cblock.match(line):
- in_literal = True
- litprefix = ""
- output += self.highlight_block(block)
- block = ""
-
- # Handle any remaining block
- if block:
- output += self.highlight_block(block)
-
- # Print the output with the line prefix
- for line in output.strip("\n").split("\n"):
- self.data += self.lineprefix + line + "\n"
-
- def out_section(self, args, out_docblock=False):
- """
- Outputs a block section.
-
- This could use some work; it's used to output the DOC: sections, and
- starts by putting out the name of the doc section itself, but that
- tends to duplicate a header already in the template file.
- """
- for section, text in args.sections.items():
- # Skip sections that are in the nosymbol_table
- if section in self.nosymbol:
- continue
-
- if out_docblock:
- if not self.out_mode == self.OUTPUT_INCLUDE:
- self.data += f".. _{section}:\n\n"
- self.data += f'{self.lineprefix}**{section}**\n\n'
- else:
- self.data += f'{self.lineprefix}**{section}**\n\n'
-
- self.print_lineno(args.section_start_lines.get(section, 0))
- self.output_highlight(text)
- self.data += "\n"
- self.data += "\n"
-
- def out_doc(self, fname, name, args):
- if not self.check_doc(name, args):
- return
- self.out_section(args, out_docblock=True)
-
- def out_function(self, fname, name, args):
-
- oldprefix = self.lineprefix
- signature = ""
-
- func_macro = args.get('func_macro', False)
- if func_macro:
- signature = name
- else:
- if args.get('functiontype'):
- signature = args['functiontype'] + " "
- signature += name + " ("
-
- ln = args.declaration_start_line
- count = 0
- for parameter in args.parameterlist:
- if count != 0:
- signature += ", "
- count += 1
- dtype = args.parametertypes.get(parameter, "")
-
- if function_pointer.search(dtype):
- signature += function_pointer.group(1) + parameter + function_pointer.group(3)
- else:
- signature += dtype
-
- if not func_macro:
- signature += ")"
-
- self.print_lineno(ln)
- if args.get('typedef') or not args.get('functiontype'):
- self.data += f".. c:macro:: {name}\n\n"
-
- if args.get('typedef'):
- self.data += " **Typedef**: "
- self.lineprefix = ""
- self.output_highlight(args.get('purpose', ""))
- self.data += "\n\n**Syntax**\n\n"
- self.data += f" ``{signature}``\n\n"
- else:
- self.data += f"``{signature}``\n\n"
- else:
- self.data += f".. c:function:: {signature}\n\n"
-
- if not args.get('typedef'):
- self.print_lineno(ln)
- self.lineprefix = " "
- self.output_highlight(args.get('purpose', ""))
- self.data += "\n"
-
- # Put descriptive text into a container (HTML <div>) to help set
- # function prototypes apart
- self.lineprefix = " "
-
- if args.parameterlist:
- self.data += ".. container:: kernelindent\n\n"
- self.data += f"{self.lineprefix}**Parameters**\n\n"
-
- for parameter in args.parameterlist:
- parameter_name = KernRe(r'\[.*').sub('', parameter)
- dtype = args.parametertypes.get(parameter, "")
-
- if dtype:
- self.data += f"{self.lineprefix}``{dtype}``\n"
- else:
- self.data += f"{self.lineprefix}``{parameter}``\n"
-
- self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0))
-
- self.lineprefix = " "
- if parameter_name in args.parameterdescs and \
- args.parameterdescs[parameter_name] != KernelDoc.undescribed:
-
- self.output_highlight(args.parameterdescs[parameter_name])
- self.data += "\n"
- else:
- self.data += f"{self.lineprefix}*undescribed*\n\n"
- self.lineprefix = " "
-
- self.out_section(args)
- self.lineprefix = oldprefix
-
- def out_enum(self, fname, name, args):
-
- oldprefix = self.lineprefix
- ln = args.declaration_start_line
-
- self.data += f"\n\n.. c:enum:: {name}\n\n"
-
- self.print_lineno(ln)
- self.lineprefix = " "
- self.output_highlight(args.get('purpose', ''))
- self.data += "\n"
-
- self.data += ".. container:: kernelindent\n\n"
- outer = self.lineprefix + " "
- self.lineprefix = outer + " "
- self.data += f"{outer}**Constants**\n\n"
-
- for parameter in args.parameterlist:
- self.data += f"{outer}``{parameter}``\n"
-
- if args.parameterdescs.get(parameter, '') != KernelDoc.undescribed:
- self.output_highlight(args.parameterdescs[parameter])
- else:
- self.data += f"{self.lineprefix}*undescribed*\n\n"
- self.data += "\n"
-
- self.lineprefix = oldprefix
- self.out_section(args)
-
- def out_typedef(self, fname, name, args):
-
- oldprefix = self.lineprefix
- ln = args.declaration_start_line
-
- self.data += f"\n\n.. c:type:: {name}\n\n"
-
- self.print_lineno(ln)
- self.lineprefix = " "
-
- self.output_highlight(args.get('purpose', ''))
-
- self.data += "\n"
-
- self.lineprefix = oldprefix
- self.out_section(args)
-
- def out_struct(self, fname, name, args):
-
- purpose = args.get('purpose', "")
- declaration = args.get('definition', "")
- dtype = args.type
- ln = args.declaration_start_line
-
- self.data += f"\n\n.. c:{dtype}:: {name}\n\n"
-
- self.print_lineno(ln)
-
- oldprefix = self.lineprefix
- self.lineprefix += " "
-
- self.output_highlight(purpose)
- self.data += "\n"
-
- self.data += ".. container:: kernelindent\n\n"
- self.data += f"{self.lineprefix}**Definition**::\n\n"
-
- self.lineprefix = self.lineprefix + " "
-
- declaration = declaration.replace("\t", self.lineprefix)
-
- self.data += f"{self.lineprefix}{dtype} {name}" + ' {' + "\n"
- self.data += f"{declaration}{self.lineprefix}" + "};\n\n"
-
- self.lineprefix = " "
- self.data += f"{self.lineprefix}**Members**\n\n"
- for parameter in args.parameterlist:
- if not parameter or parameter.startswith("#"):
- continue
-
- parameter_name = parameter.split("[", maxsplit=1)[0]
-
- if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed:
- continue
-
- self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0))
-
- self.data += f"{self.lineprefix}``{parameter}``\n"
-
- self.lineprefix = " "
- self.output_highlight(args.parameterdescs[parameter_name])
- self.lineprefix = " "
-
- self.data += "\n"
-
- self.data += "\n"
-
- self.lineprefix = oldprefix
- self.out_section(args)
-
-
-class ManFormat(OutputFormat):
- """Consts and functions used by man pages output"""
-
- highlights = (
- (type_constant, r"\1"),
- (type_constant2, r"\1"),
- (type_func, r"\\fB\1\\fP"),
- (type_enum, r"\\fI\1\\fP"),
- (type_struct, r"\\fI\1\\fP"),
- (type_typedef, r"\\fI\1\\fP"),
- (type_union, r"\\fI\1\\fP"),
- (type_param, r"\\fI\1\\fP"),
- (type_param_ref, r"\\fI\1\2\\fP"),
- (type_member, r"\\fI\1\2\3\\fP"),
- (type_fallback, r"\\fI\1\\fP")
- )
- blankline = ""
-
- date_formats = [
- "%a %b %d %H:%M:%S %Z %Y",
- "%a %b %d %H:%M:%S %Y",
- "%Y-%m-%d",
- "%b %d %Y",
- "%B %d %Y",
- "%m %d %Y",
- ]
-
- def __init__(self, modulename):
- """
- Creates class variables.
-
- Not really mandatory, but it is a good coding style and makes
- pylint happy.
- """
-
- super().__init__()
- self.modulename = modulename
-
- dt = None
- tstamp = os.environ.get("KBUILD_BUILD_TIMESTAMP")
- if tstamp:
- for fmt in self.date_formats:
- try:
- dt = datetime.strptime(tstamp, fmt)
- break
- except ValueError:
- pass
-
- if not dt:
- dt = datetime.now()
-
- self.man_date = dt.strftime("%B %Y")
-
- def output_highlight(self, block):
- """
- Outputs a C symbol that may require being highlighted with
- self.highlights variable using troff syntax
- """
-
- contents = self.highlight_block(block)
-
- if isinstance(contents, list):
- contents = "\n".join(contents)
-
- for line in contents.strip("\n").split("\n"):
- line = KernRe(r"^\s*").sub("", line)
- if not line:
- continue
-
- if line[0] == ".":
- self.data += "\\&" + line + "\n"
- else:
- self.data += line + "\n"
-
- def out_doc(self, fname, name, args):
- if not self.check_doc(name, args):
- return
-
- self.data += f'.TH "{self.modulename}" 9 "{self.modulename}" "{self.man_date}" "API Manual" LINUX' + "\n"
-
- for section, text in args.sections.items():
- self.data += f'.SH "{section}"' + "\n"
- self.output_highlight(text)
-
- def out_function(self, fname, name, args):
- """output function in man"""
-
- self.data += f'.TH "{name}" 9 "{name}" "{self.man_date}" "Kernel Hacker\'s Manual" LINUX' + "\n"
-
- self.data += ".SH NAME\n"
- self.data += f"{name} \\- {args['purpose']}\n"
-
- self.data += ".SH SYNOPSIS\n"
- if args.get('functiontype', ''):
- self.data += f'.B "{args["functiontype"]}" {name}' + "\n"
- else:
- self.data += f'.B "{name}' + "\n"
-
- count = 0
- parenth = "("
- post = ","
-
- for parameter in args.parameterlist:
- if count == len(args.parameterlist) - 1:
- post = ");"
-
- dtype = args.parametertypes.get(parameter, "")
- if function_pointer.match(dtype):
- # Pointer-to-function
- self.data += f'".BI "{parenth}{function_pointer.group(1)}" " ") ({function_pointer.group(2)}){post}"' + "\n"
- else:
- dtype = KernRe(r'([^\*])$').sub(r'\1 ', dtype)
-
- self.data += f'.BI "{parenth}{dtype}" "{post}"' + "\n"
- count += 1
- parenth = ""
-
- if args.parameterlist:
- self.data += ".SH ARGUMENTS\n"
-
- for parameter in args.parameterlist:
- parameter_name = re.sub(r'\[.*', '', parameter)
-
- self.data += f'.IP "{parameter}" 12' + "\n"
- self.output_highlight(args.parameterdescs.get(parameter_name, ""))
-
- for section, text in args.sections.items():
- self.data += f'.SH "{section.upper()}"' + "\n"
- self.output_highlight(text)
-
- def out_enum(self, fname, name, args):
- self.data += f'.TH "{self.modulename}" 9 "enum {name}" "{self.man_date}" "API Manual" LINUX' + "\n"
-
- self.data += ".SH NAME\n"
- self.data += f"enum {name} \\- {args['purpose']}\n"
-
- self.data += ".SH SYNOPSIS\n"
- self.data += f"enum {name}" + " {\n"
-
- count = 0
- for parameter in args.parameterlist:
- self.data += f'.br\n.BI " {parameter}"' + "\n"
- if count == len(args.parameterlist) - 1:
- self.data += "\n};\n"
- else:
- self.data += ", \n.br\n"
-
- count += 1
-
- self.data += ".SH Constants\n"
-
- for parameter in args.parameterlist:
- parameter_name = KernRe(r'\[.*').sub('', parameter)
- self.data += f'.IP "{parameter}" 12' + "\n"
- self.output_highlight(args.parameterdescs.get(parameter_name, ""))
-
- for section, text in args.sections.items():
- self.data += f'.SH "{section}"' + "\n"
- self.output_highlight(text)
-
- def out_typedef(self, fname, name, args):
- module = self.modulename
- purpose = args.get('purpose')
-
- self.data += f'.TH "{module}" 9 "{name}" "{self.man_date}" "API Manual" LINUX' + "\n"
-
- self.data += ".SH NAME\n"
- self.data += f"typedef {name} \\- {purpose}\n"
-
- for section, text in args.sections.items():
- self.data += f'.SH "{section}"' + "\n"
- self.output_highlight(text)
-
- def out_struct(self, fname, name, args):
- module = self.modulename
- purpose = args.get('purpose')
- definition = args.get('definition')
-
- self.data += f'.TH "{module}" 9 "{args.type} {name}" "{self.man_date}" "API Manual" LINUX' + "\n"
-
- self.data += ".SH NAME\n"
- self.data += f"{args.type} {name} \\- {purpose}\n"
-
- # Replace tabs with two spaces and handle newlines
- declaration = definition.replace("\t", " ")
- declaration = KernRe(r"\n").sub('"\n.br\n.BI "', declaration)
-
- self.data += ".SH SYNOPSIS\n"
- self.data += f"{args.type} {name} " + "{" + "\n.br\n"
- self.data += f'.BI "{declaration}\n' + "};\n.br\n\n"
-
- self.data += ".SH Members\n"
- for parameter in args.parameterlist:
- if parameter.startswith("#"):
- continue
-
- parameter_name = re.sub(r"\[.*", "", parameter)
-
- if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed:
- continue
-
- self.data += f'.IP "{parameter}" 12' + "\n"
- self.output_highlight(args.parameterdescs.get(parameter_name))
-
- for section, text in args.sections.items():
- self.data += f'.SH "{section}"' + "\n"
- self.output_highlight(text)
diff --git a/scripts/lib/kdoc/kdoc_parser.py b/scripts/lib/kdoc/kdoc_parser.py
deleted file mode 100644
index 2376f180b1fa..000000000000
--- a/scripts/lib/kdoc/kdoc_parser.py
+++ /dev/null
@@ -1,1649 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-#
-# pylint: disable=C0301,C0302,R0904,R0912,R0913,R0914,R0915,R0917,R1702
-
-"""
-kdoc_parser
-===========
-
-Read a C language source or header FILE and extract embedded
-documentation comments
-"""
-
-import sys
-import re
-from pprint import pformat
-
-from kdoc_re import NestedMatch, KernRe
-from kdoc_item import KdocItem
-
-#
-# Regular expressions used to parse kernel-doc markups at KernelDoc class.
-#
-# Let's declare them in lowercase outside any class to make easier to
-# convert from the python script.
-#
-# As those are evaluated at the beginning, no need to cache them
-#
-
-# Allow whitespace at end of comment start.
-doc_start = KernRe(r'^/\*\*\s*$', cache=False)
-
-doc_end = KernRe(r'\*/', cache=False)
-doc_com = KernRe(r'\s*\*\s*', cache=False)
-doc_com_body = KernRe(r'\s*\* ?', cache=False)
-doc_decl = doc_com + KernRe(r'(\w+)', cache=False)
-
-# @params and a strictly limited set of supported section names
-# Specifically:
-# Match @word:
-# @...:
-# @{section-name}:
-# while trying to not match literal block starts like "example::"
-#
-known_section_names = 'description|context|returns?|notes?|examples?'
-known_sections = KernRe(known_section_names, flags = re.I)
-doc_sect = doc_com + \
- KernRe(r'\s*(@[.\w]+|@\.\.\.|' + known_section_names + r')\s*:([^:].*)?$',
- flags=re.I, cache=False)
-
-doc_content = doc_com_body + KernRe(r'(.*)', cache=False)
-doc_inline_start = KernRe(r'^\s*/\*\*\s*$', cache=False)
-doc_inline_sect = KernRe(r'\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)', cache=False)
-doc_inline_end = KernRe(r'^\s*\*/\s*$', cache=False)
-doc_inline_oneline = KernRe(r'^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$', cache=False)
-
-export_symbol = KernRe(r'^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*', cache=False)
-export_symbol_ns = KernRe(r'^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+"\)\s*', cache=False)
-
-type_param = KernRe(r"@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
-
-#
-# Tests for the beginning of a kerneldoc block in its various forms.
-#
-doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False)
-doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)", cache = False)
-doc_begin_func = KernRe(str(doc_com) + # initial " * '
- r"(?:\w+\s*\*\s*)?" + # type (not captured)
- r'(?:define\s+)?' + # possible "define" (not captured)
- r'(\w+)\s*(?:\(\w*\))?\s*' + # name and optional "(...)"
- r'(?:[-:].*)?$', # description (not captured)
- cache = False)
-
-#
-# Here begins a long set of transformations to turn structure member prefixes
-# and macro invocations into something we can parse and generate kdoc for.
-#
-struct_args_pattern = r'([^,)]+)'
-
-struct_xforms = [
- # Strip attributes
- (KernRe(r"__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)", flags=re.I | re.S, cache=False), ' '),
- (KernRe(r'\s*__aligned\s*\([^;]*\)', re.S), ' '),
- (KernRe(r'\s*__counted_by\s*\([^;]*\)', re.S), ' '),
- (KernRe(r'\s*__counted_by_(le|be)\s*\([^;]*\)', re.S), ' '),
- (KernRe(r'\s*__packed\s*', re.S), ' '),
- (KernRe(r'\s*CRYPTO_MINALIGN_ATTR', re.S), ' '),
- (KernRe(r'\s*____cacheline_aligned_in_smp', re.S), ' '),
- (KernRe(r'\s*____cacheline_aligned', re.S), ' '),
- (KernRe(r'\s*__cacheline_group_(begin|end)\([^\)]+\);'), ''),
- #
- # Unwrap struct_group macros based on this definition:
- # __struct_group(TAG, NAME, ATTRS, MEMBERS...)
- # which has variants like: struct_group(NAME, MEMBERS...)
- # Only MEMBERS arguments require documentation.
- #
- # Parsing them happens on two steps:
- #
- # 1. drop struct group arguments that aren't at MEMBERS,
- # storing them as STRUCT_GROUP(MEMBERS)
- #
- # 2. remove STRUCT_GROUP() ancillary macro.
- #
- # The original logic used to remove STRUCT_GROUP() using an
- # advanced regex:
- #
- # \bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*;
- #
- # with two patterns that are incompatible with
- # Python re module, as it has:
- #
- # - a recursive pattern: (?1)
- # - an atomic grouping: (?>...)
- #
- # I tried a simpler version: but it didn't work either:
- # \bSTRUCT_GROUP\(([^\)]+)\)[^;]*;
- #
- # As it doesn't properly match the end parenthesis on some cases.
- #
- # So, a better solution was crafted: there's now a NestedMatch
- # class that ensures that delimiters after a search are properly
- # matched. So, the implementation to drop STRUCT_GROUP() will be
- # handled in separate.
- #
- (KernRe(r'\bstruct_group\s*\(([^,]*,)', re.S), r'STRUCT_GROUP('),
- (KernRe(r'\bstruct_group_attr\s*\(([^,]*,){2}', re.S), r'STRUCT_GROUP('),
- (KernRe(r'\bstruct_group_tagged\s*\(([^,]*),([^,]*),', re.S), r'struct \1 \2; STRUCT_GROUP('),
- (KernRe(r'\b__struct_group\s*\(([^,]*,){3}', re.S), r'STRUCT_GROUP('),
- #
- # Replace macros
- #
- # TODO: use NestedMatch for FOO($1, $2, ...) matches
- #
- # it is better to also move those to the NestedMatch logic,
- # to ensure that parenthesis will be properly matched.
- #
- (KernRe(r'__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)', re.S),
- r'DECLARE_BITMAP(\1, __ETHTOOL_LINK_MODE_MASK_NBITS)'),
- (KernRe(r'DECLARE_PHY_INTERFACE_MASK\s*\(([^\)]+)\)', re.S),
- r'DECLARE_BITMAP(\1, PHY_INTERFACE_MODE_MAX)'),
- (KernRe(r'DECLARE_BITMAP\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern + r'\)',
- re.S), r'unsigned long \1[BITS_TO_LONGS(\2)]'),
- (KernRe(r'DECLARE_HASHTABLE\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern + r'\)',
- re.S), r'unsigned long \1[1 << ((\2) - 1)]'),
- (KernRe(r'DECLARE_KFIFO\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern +
- r',\s*' + struct_args_pattern + r'\)', re.S), r'\2 *\1'),
- (KernRe(r'DECLARE_KFIFO_PTR\s*\(' + struct_args_pattern + r',\s*' +
- struct_args_pattern + r'\)', re.S), r'\2 *\1'),
- (KernRe(r'(?:__)?DECLARE_FLEX_ARRAY\s*\(' + struct_args_pattern + r',\s*' +
- struct_args_pattern + r'\)', re.S), r'\1 \2[]'),
- (KernRe(r'DEFINE_DMA_UNMAP_ADDR\s*\(' + struct_args_pattern + r'\)', re.S), r'dma_addr_t \1'),
- (KernRe(r'DEFINE_DMA_UNMAP_LEN\s*\(' + struct_args_pattern + r'\)', re.S), r'__u32 \1'),
-]
-#
-# Regexes here are guaranteed to have the end limiter matching
-# the start delimiter. Yet, right now, only one replace group
-# is allowed.
-#
-struct_nested_prefixes = [
- (re.compile(r'\bSTRUCT_GROUP\('), r'\1'),
-]
-
-#
-# Transforms for function prototypes
-#
-function_xforms = [
- (KernRe(r"^static +"), ""),
- (KernRe(r"^extern +"), ""),
- (KernRe(r"^asmlinkage +"), ""),
- (KernRe(r"^inline +"), ""),
- (KernRe(r"^__inline__ +"), ""),
- (KernRe(r"^__inline +"), ""),
- (KernRe(r"^__always_inline +"), ""),
- (KernRe(r"^noinline +"), ""),
- (KernRe(r"^__FORTIFY_INLINE +"), ""),
- (KernRe(r"__init +"), ""),
- (KernRe(r"__init_or_module +"), ""),
- (KernRe(r"__deprecated +"), ""),
- (KernRe(r"__flatten +"), ""),
- (KernRe(r"__meminit +"), ""),
- (KernRe(r"__must_check +"), ""),
- (KernRe(r"__weak +"), ""),
- (KernRe(r"__sched +"), ""),
- (KernRe(r"_noprof"), ""),
- (KernRe(r"__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +"), ""),
- (KernRe(r"__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +"), ""),
- (KernRe(r"__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +"), ""),
- (KernRe(r"DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)"), r"\1, \2"),
- (KernRe(r"__attribute_const__ +"), ""),
- (KernRe(r"__attribute__\s*\(\((?:[\w\s]+(?:\([^)]*\))?\s*,?)+\)\)\s+"), ""),
-]
-
-#
-# Apply a set of transforms to a block of text.
-#
-def apply_transforms(xforms, text):
- for search, subst in xforms:
- text = search.sub(subst, text)
- return text
-
-#
-# A little helper to get rid of excess white space
-#
-multi_space = KernRe(r'\s\s+')
-def trim_whitespace(s):
- return multi_space.sub(' ', s.strip())
-
-#
-# Remove struct/enum members that have been marked "private".
-#
-def trim_private_members(text):
- #
- # First look for a "public:" block that ends a private region, then
- # handle the "private until the end" case.
- #
- text = KernRe(r'/\*\s*private:.*?/\*\s*public:.*?\*/', flags=re.S).sub('', text)
- text = KernRe(r'/\*\s*private:.*', flags=re.S).sub('', text)
- #
- # We needed the comments to do the above, but now we can take them out.
- #
- return KernRe(r'\s*/\*.*?\*/\s*', flags=re.S).sub('', text).strip()
-
-class state:
- """
- State machine enums
- """
-
- # Parser states
- NORMAL = 0 # normal code
- NAME = 1 # looking for function name
- DECLARATION = 2 # We have seen a declaration which might not be done
- BODY = 3 # the body of the comment
- SPECIAL_SECTION = 4 # doc section ending with a blank line
- PROTO = 5 # scanning prototype
- DOCBLOCK = 6 # documentation block
- INLINE_NAME = 7 # gathering doc outside main block
- INLINE_TEXT = 8 # reading the body of inline docs
-
- name = [
- "NORMAL",
- "NAME",
- "DECLARATION",
- "BODY",
- "SPECIAL_SECTION",
- "PROTO",
- "DOCBLOCK",
- "INLINE_NAME",
- "INLINE_TEXT",
- ]
-
-
-SECTION_DEFAULT = "Description" # default section
-
-class KernelEntry:
-
- def __init__(self, config, ln):
- self.config = config
-
- self._contents = []
- self.prototype = ""
-
- self.warnings = []
-
- self.parameterlist = []
- self.parameterdescs = {}
- self.parametertypes = {}
- self.parameterdesc_start_lines = {}
-
- self.section_start_lines = {}
- self.sections = {}
-
- self.anon_struct_union = False
-
- self.leading_space = None
-
- # State flags
- self.brcount = 0
- self.declaration_start_line = ln + 1
-
- #
- # Management of section contents
- #
- def add_text(self, text):
- self._contents.append(text)
-
- def contents(self):
- return '\n'.join(self._contents) + '\n'
-
- # TODO: rename to emit_message after removal of kernel-doc.pl
- def emit_msg(self, log_msg, warning=True):
- """Emit a message"""
-
- if not warning:
- self.config.log.info(log_msg)
- return
-
- # Delegate warning output to output logic, as this way it
- # will report warnings/info only for symbols that are output
-
- self.warnings.append(log_msg)
- return
-
- #
- # Begin a new section.
- #
- def begin_section(self, line_no, title = SECTION_DEFAULT, dump = False):
- if dump:
- self.dump_section(start_new = True)
- self.section = title
- self.new_start_line = line_no
-
- def dump_section(self, start_new=True):
- """
- Dumps section contents to arrays/hashes intended for that purpose.
- """
- #
- # If we have accumulated no contents in the default ("description")
- # section, don't bother.
- #
- if self.section == SECTION_DEFAULT and not self._contents:
- return
- name = self.section
- contents = self.contents()
-
- if type_param.match(name):
- name = type_param.group(1)
-
- self.parameterdescs[name] = contents
- self.parameterdesc_start_lines[name] = self.new_start_line
-
- self.new_start_line = 0
-
- else:
- if name in self.sections and self.sections[name] != "":
- # Only warn on user-specified duplicate section names
- if name != SECTION_DEFAULT:
- self.emit_msg(self.new_start_line,
- f"duplicate section name '{name}'\n")
- # Treat as a new paragraph - add a blank line
- self.sections[name] += '\n' + contents
- else:
- self.sections[name] = contents
- self.section_start_lines[name] = self.new_start_line
- self.new_start_line = 0
-
-# self.config.log.debug("Section: %s : %s", name, pformat(vars(self)))
-
- if start_new:
- self.section = SECTION_DEFAULT
- self._contents = []
-
-
-class KernelDoc:
- """
- Read a C language source or header FILE and extract embedded
- documentation comments.
- """
-
- # Section names
-
- section_context = "Context"
- section_return = "Return"
-
- undescribed = "-- undescribed --"
-
- def __init__(self, config, fname):
- """Initialize internal variables"""
-
- self.fname = fname
- self.config = config
-
- # Initial state for the state machines
- self.state = state.NORMAL
-
- # Store entry currently being processed
- self.entry = None
-
- # Place all potential outputs into an array
- self.entries = []
-
- #
- # We need Python 3.7 for its "dicts remember the insertion
- # order" guarantee
- #
- if sys.version_info.major == 3 and sys.version_info.minor < 7:
- self.emit_msg(0,
- 'Python 3.7 or later is required for correct results')
-
- def emit_msg(self, ln, msg, warning=True):
- """Emit a message"""
-
- log_msg = f"{self.fname}:{ln} {msg}"
-
- if self.entry:
- self.entry.emit_msg(log_msg, warning)
- return
-
- if warning:
- self.config.log.warning(log_msg)
- else:
- self.config.log.info(log_msg)
-
- def dump_section(self, start_new=True):
- """
- Dumps section contents to arrays/hashes intended for that purpose.
- """
-
- if self.entry:
- self.entry.dump_section(start_new)
-
- # TODO: rename it to store_declaration after removal of kernel-doc.pl
- def output_declaration(self, dtype, name, **args):
- """
- Stores the entry into an entry array.
-
- The actual output and output filters will be handled elsewhere
- """
-
- item = KdocItem(name, dtype, self.entry.declaration_start_line, **args)
- item.warnings = self.entry.warnings
-
- # Drop empty sections
- # TODO: improve empty sections logic to emit warnings
- sections = self.entry.sections
- for section in ["Description", "Return"]:
- if section in sections and not sections[section].rstrip():
- del sections[section]
- item.set_sections(sections, self.entry.section_start_lines)
- item.set_params(self.entry.parameterlist, self.entry.parameterdescs,
- self.entry.parametertypes,
- self.entry.parameterdesc_start_lines)
- self.entries.append(item)
-
- self.config.log.debug("Output: %s:%s = %s", dtype, name, pformat(args))
-
- def reset_state(self, ln):
- """
- Ancillary routine to create a new entry. It initializes all
- variables used by the state machine.
- """
-
- self.entry = KernelEntry(self.config, ln)
-
- # State flags
- self.state = state.NORMAL
-
- def push_parameter(self, ln, decl_type, param, dtype,
- org_arg, declaration_name):
- """
- Store parameters and their descriptions at self.entry.
- """
-
- if self.entry.anon_struct_union and dtype == "" and param == "}":
- return # Ignore the ending }; from anonymous struct/union
-
- self.entry.anon_struct_union = False
-
- param = KernRe(r'[\[\)].*').sub('', param, count=1)
-
- #
- # Look at various "anonymous type" cases.
- #
- if dtype == '':
- if param.endswith("..."):
- if len(param) > 3: # there is a name provided, use that
- param = param[:-3]
- if not self.entry.parameterdescs.get(param):
- self.entry.parameterdescs[param] = "variable arguments"
-
- elif (not param) or param == "void":
- param = "void"
- self.entry.parameterdescs[param] = "no arguments"
-
- elif param in ["struct", "union"]:
- # Handle unnamed (anonymous) union or struct
- dtype = param
- param = "{unnamed_" + param + "}"
- self.entry.parameterdescs[param] = "anonymous\n"
- self.entry.anon_struct_union = True
-
- # Warn if parameter has no description
- # (but ignore ones starting with # as these are not parameters
- # but inline preprocessor statements)
- if param not in self.entry.parameterdescs and not param.startswith("#"):
- self.entry.parameterdescs[param] = self.undescribed
-
- if "." not in param:
- if decl_type == 'function':
- dname = f"{decl_type} parameter"
- else:
- dname = f"{decl_type} member"
-
- self.emit_msg(ln,
- f"{dname} '{param}' not described in '{declaration_name}'")
-
- # Strip spaces from param so that it is one continuous string on
- # parameterlist. This fixes a problem where check_sections()
- # cannot find a parameter like "addr[6 + 2]" because it actually
- # appears as "addr[6", "+", "2]" on the parameter list.
- # However, it's better to maintain the param string unchanged for
- # output, so just weaken the string compare in check_sections()
- # to ignore "[blah" in a parameter string.
-
- self.entry.parameterlist.append(param)
- org_arg = KernRe(r'\s\s+').sub(' ', org_arg)
- self.entry.parametertypes[param] = org_arg
-
-
- def create_parameter_list(self, ln, decl_type, args,
- splitter, declaration_name):
- """
- Creates a list of parameters, storing them at self.entry.
- """
-
- # temporarily replace all commas inside function pointer definition
- arg_expr = KernRe(r'(\([^\),]+),')
- while arg_expr.search(args):
- args = arg_expr.sub(r"\1#", args)
-
- for arg in args.split(splitter):
- # Ignore argument attributes
- arg = KernRe(r'\sPOS0?\s').sub(' ', arg)
-
- # Strip leading/trailing spaces
- arg = arg.strip()
- arg = KernRe(r'\s+').sub(' ', arg, count=1)
-
- if arg.startswith('#'):
- # Treat preprocessor directive as a typeless variable just to fill
- # corresponding data structures "correctly". Catch it later in
- # output_* subs.
-
- # Treat preprocessor directive as a typeless variable
- self.push_parameter(ln, decl_type, arg, "",
- "", declaration_name)
- #
- # The pointer-to-function case.
- #
- elif KernRe(r'\(.+\)\s*\(').search(arg):
- arg = arg.replace('#', ',')
- r = KernRe(r'[^\(]+\(\*?\s*' # Everything up to "(*"
- r'([\w\[\].]*)' # Capture the name and possible [array]
- r'\s*\)') # Make sure the trailing ")" is there
- if r.match(arg):
- param = r.group(1)
- else:
- self.emit_msg(ln, f"Invalid param: {arg}")
- param = arg
- dtype = arg.replace(param, '')
- self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name)
- #
- # The array-of-pointers case. Dig the parameter name out from the middle
- # of the declaration.
- #
- elif KernRe(r'\(.+\)\s*\[').search(arg):
- r = KernRe(r'[^\(]+\(\s*\*\s*' # Up to "(" and maybe "*"
- r'([\w.]*?)' # The actual pointer name
- r'\s*(\[\s*\w+\s*\]\s*)*\)') # The [array portion]
- if r.match(arg):
- param = r.group(1)
- else:
- self.emit_msg(ln, f"Invalid param: {arg}")
- param = arg
- dtype = arg.replace(param, '')
- self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name)
- elif arg:
- #
- # Clean up extraneous spaces and split the string at commas; the first
- # element of the resulting list will also include the type information.
- #
- arg = KernRe(r'\s*:\s*').sub(":", arg)
- arg = KernRe(r'\s*\[').sub('[', arg)
- args = KernRe(r'\s*,\s*').split(arg)
- args[0] = re.sub(r'(\*+)\s*', r' \1', args[0])
- #
- # args[0] has a string of "type a". If "a" includes an [array]
- # declaration, we want to not be fooled by any white space inside
- # the brackets, so detect and handle that case specially.
- #
- r = KernRe(r'^([^[\]]*\s+)(.*)$')
- if r.match(args[0]):
- args[0] = r.group(2)
- dtype = r.group(1)
- else:
- # No space in args[0]; this seems wrong but preserves previous behavior
- dtype = ''
-
- bitfield_re = KernRe(r'(.*?):(\w+)')
- for param in args:
- #
- # For pointers, shift the star(s) from the variable name to the
- # type declaration.
- #
- r = KernRe(r'^(\*+)\s*(.*)')
- if r.match(param):
- self.push_parameter(ln, decl_type, r.group(2),
- f"{dtype} {r.group(1)}",
- arg, declaration_name)
- #
- # Perform a similar shift for bitfields.
- #
- elif bitfield_re.search(param):
- if dtype != "": # Skip unnamed bit-fields
- self.push_parameter(ln, decl_type, bitfield_re.group(1),
- f"{dtype}:{bitfield_re.group(2)}",
- arg, declaration_name)
- else:
- self.push_parameter(ln, decl_type, param, dtype,
- arg, declaration_name)
-
- def check_sections(self, ln, decl_name, decl_type):
- """
- Check for errors inside sections, emitting warnings if not found
- parameters are described.
- """
- for section in self.entry.sections:
- if section not in self.entry.parameterlist and \
- not known_sections.search(section):
- if decl_type == 'function':
- dname = f"{decl_type} parameter"
- else:
- dname = f"{decl_type} member"
- self.emit_msg(ln,
- f"Excess {dname} '{section}' description in '{decl_name}'")
-
- def check_return_section(self, ln, declaration_name, return_type):
- """
- If the function doesn't return void, warns about the lack of a
- return description.
- """
-
- if not self.config.wreturn:
- return
-
- # Ignore an empty return type (It's a macro)
- # Ignore functions with a "void" return type (but not "void *")
- if not return_type or KernRe(r'void\s*\w*\s*$').search(return_type):
- return
-
- if not self.entry.sections.get("Return", None):
- self.emit_msg(ln,
- f"No description found for return value of '{declaration_name}'")
-
- #
- # Split apart a structure prototype; returns (struct|union, name, members) or None
- #
- def split_struct_proto(self, proto):
- type_pattern = r'(struct|union)'
- qualifiers = [
- "__attribute__",
- "__packed",
- "__aligned",
- "____cacheline_aligned_in_smp",
- "____cacheline_aligned",
- ]
- definition_body = r'\{(.*)\}\s*' + "(?:" + '|'.join(qualifiers) + ")?"
-
- r = KernRe(type_pattern + r'\s+(\w+)\s*' + definition_body)
- if r.search(proto):
- return (r.group(1), r.group(2), r.group(3))
- else:
- r = KernRe(r'typedef\s+' + type_pattern + r'\s*' + definition_body + r'\s*(\w+)\s*;')
- if r.search(proto):
- return (r.group(1), r.group(3), r.group(2))
- return None
- #
- # Rewrite the members of a structure or union for easier formatting later on.
- # Among other things, this function will turn a member like:
- #
- # struct { inner_members; } foo;
- #
- # into:
- #
- # struct foo; inner_members;
- #
- def rewrite_struct_members(self, members):
- #
- # Process struct/union members from the most deeply nested outward. The
- # trick is in the ^{ below - it prevents a match of an outer struct/union
- # until the inner one has been munged (removing the "{" in the process).
- #
- struct_members = KernRe(r'(struct|union)' # 0: declaration type
- r'([^\{\};]+)' # 1: possible name
- r'(\{)'
- r'([^\{\}]*)' # 3: Contents of declaration
- r'(\})'
- r'([^\{\};]*)(;)') # 5: Remaining stuff after declaration
- tuples = struct_members.findall(members)
- while tuples:
- for t in tuples:
- newmember = ""
- oldmember = "".join(t) # Reconstruct the original formatting
- dtype, name, lbr, content, rbr, rest, semi = t
- #
- # Pass through each field name, normalizing the form and formatting.
- #
- for s_id in rest.split(','):
- s_id = s_id.strip()
- newmember += f"{dtype} {s_id}; "
- #
- # Remove bitfield/array/pointer info, getting the bare name.
- #
- s_id = KernRe(r'[:\[].*').sub('', s_id)
- s_id = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', s_id)
- #
- # Pass through the members of this inner structure/union.
- #
- for arg in content.split(';'):
- arg = arg.strip()
- #
- # Look for (type)(*name)(args) - pointer to function
- #
- r = KernRe(r'^([^\(]+\(\*?\s*)([\w.]*)(\s*\).*)')
- if r.match(arg):
- dtype, name, extra = r.group(1), r.group(2), r.group(3)
- # Pointer-to-function
- if not s_id:
- # Anonymous struct/union
- newmember += f"{dtype}{name}{extra}; "
- else:
- newmember += f"{dtype}{s_id}.{name}{extra}; "
- #
- # Otherwise a non-function member.
- #
- else:
- #
- # Remove bitmap and array portions and spaces around commas
- #
- arg = KernRe(r':\s*\d+\s*').sub('', arg)
- arg = KernRe(r'\[.*\]').sub('', arg)
- arg = KernRe(r'\s*,\s*').sub(',', arg)
- #
- # Look for a normal decl - "type name[,name...]"
- #
- r = KernRe(r'(.*)\s+([\S+,]+)')
- if r.search(arg):
- for name in r.group(2).split(','):
- name = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', name)
- if not s_id:
- # Anonymous struct/union
- newmember += f"{r.group(1)} {name}; "
- else:
- newmember += f"{r.group(1)} {s_id}.{name}; "
- else:
- newmember += f"{arg}; "
- #
- # At the end of the s_id loop, replace the original declaration with
- # the munged version.
- #
- members = members.replace(oldmember, newmember)
- #
- # End of the tuple loop - search again and see if there are outer members
- # that now turn up.
- #
- tuples = struct_members.findall(members)
- return members
-
- #
- # Format the struct declaration into a standard form for inclusion in the
- # resulting docs.
- #
- def format_struct_decl(self, declaration):
- #
- # Insert newlines, get rid of extra spaces.
- #
- declaration = KernRe(r'([\{;])').sub(r'\1\n', declaration)
- declaration = KernRe(r'\}\s+;').sub('};', declaration)
- #
- # Format inline enums with each member on its own line.
- #
- r = KernRe(r'(enum\s+\{[^\}]+),([^\n])')
- while r.search(declaration):
- declaration = r.sub(r'\1,\n\2', declaration)
- #
- # Now go through and supply the right number of tabs
- # for each line.
- #
- def_args = declaration.split('\n')
- level = 1
- declaration = ""
- for clause in def_args:
- clause = KernRe(r'\s+').sub(' ', clause.strip(), count=1)
- if clause:
- if '}' in clause and level > 1:
- level -= 1
- if not clause.startswith('#'):
- declaration += "\t" * level
- declaration += "\t" + clause + "\n"
- if "{" in clause and "}" not in clause:
- level += 1
- return declaration
-
-
- def dump_struct(self, ln, proto):
- """
- Store an entry for an struct or union
- """
- #
- # Do the basic parse to get the pieces of the declaration.
- #
- struct_parts = self.split_struct_proto(proto)
- if not struct_parts:
- self.emit_msg(ln, f"{proto} error: Cannot parse struct or union!")
- return
- decl_type, declaration_name, members = struct_parts
-
- if self.entry.identifier != declaration_name:
- self.emit_msg(ln, f"expecting prototype for {decl_type} {self.entry.identifier}. "
- f"Prototype was for {decl_type} {declaration_name} instead\n")
- return
- #
- # Go through the list of members applying all of our transformations.
- #
- members = trim_private_members(members)
- members = apply_transforms(struct_xforms, members)
-
- nested = NestedMatch()
- for search, sub in struct_nested_prefixes:
- members = nested.sub(search, sub, members)
- #
- # Deal with embedded struct and union members, and drop enums entirely.
- #
- declaration = members
- members = self.rewrite_struct_members(members)
- members = re.sub(r'(\{[^\{\}]*\})', '', members)
- #
- # Output the result and we are done.
- #
- self.create_parameter_list(ln, decl_type, members, ';',
- declaration_name)
- self.check_sections(ln, declaration_name, decl_type)
- self.output_declaration(decl_type, declaration_name,
- definition=self.format_struct_decl(declaration),
- purpose=self.entry.declaration_purpose)
-
- def dump_enum(self, ln, proto):
- """
- Stores an enum inside self.entries array.
- """
- #
- # Strip preprocessor directives. Note that this depends on the
- # trailing semicolon we added in process_proto_type().
- #
- proto = KernRe(r'#\s*((define|ifdef|if)\s+|endif)[^;]*;', flags=re.S).sub('', proto)
- #
- # Parse out the name and members of the enum. Typedef form first.
- #
- r = KernRe(r'typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;')
- if r.search(proto):
- declaration_name = r.group(2)
- members = trim_private_members(r.group(1))
- #
- # Failing that, look for a straight enum
- #
- else:
- r = KernRe(r'enum\s+(\w*)\s*\{(.*)\}')
- if r.match(proto):
- declaration_name = r.group(1)
- members = trim_private_members(r.group(2))
- #
- # OK, this isn't going to work.
- #
- else:
- self.emit_msg(ln, f"{proto}: error: Cannot parse enum!")
- return
- #
- # Make sure we found what we were expecting.
- #
- if self.entry.identifier != declaration_name:
- if self.entry.identifier == "":
- self.emit_msg(ln,
- f"{proto}: wrong kernel-doc identifier on prototype")
- else:
- self.emit_msg(ln,
- f"expecting prototype for enum {self.entry.identifier}. "
- f"Prototype was for enum {declaration_name} instead")
- return
-
- if not declaration_name:
- declaration_name = "(anonymous)"
- #
- # Parse out the name of each enum member, and verify that we
- # have a description for it.
- #
- member_set = set()
- members = KernRe(r'\([^;)]*\)').sub('', members)
- for arg in members.split(','):
- if not arg:
- continue
- arg = KernRe(r'^\s*(\w+).*').sub(r'\1', arg)
- self.entry.parameterlist.append(arg)
- if arg not in self.entry.parameterdescs:
- self.entry.parameterdescs[arg] = self.undescribed
- self.emit_msg(ln,
- f"Enum value '{arg}' not described in enum '{declaration_name}'")
- member_set.add(arg)
- #
- # Ensure that every described member actually exists in the enum.
- #
- for k in self.entry.parameterdescs:
- if k not in member_set:
- self.emit_msg(ln,
- f"Excess enum value '%{k}' description in '{declaration_name}'")
-
- self.output_declaration('enum', declaration_name,
- purpose=self.entry.declaration_purpose)
-
- def dump_declaration(self, ln, prototype):
- """
- Stores a data declaration inside self.entries array.
- """
-
- if self.entry.decl_type == "enum":
- self.dump_enum(ln, prototype)
- elif self.entry.decl_type == "typedef":
- self.dump_typedef(ln, prototype)
- elif self.entry.decl_type in ["union", "struct"]:
- self.dump_struct(ln, prototype)
- else:
- # This would be a bug
- self.emit_message(ln, f'Unknown declaration type: {self.entry.decl_type}')
-
- def dump_function(self, ln, prototype):
- """
- Stores a function of function macro inside self.entries array.
- """
-
- found = func_macro = False
- return_type = ''
- decl_type = 'function'
- #
- # Apply the initial transformations.
- #
- prototype = apply_transforms(function_xforms, prototype)
- #
- # If we have a macro, remove the "#define" at the front.
- #
- new_proto = KernRe(r"^#\s*define\s+").sub("", prototype)
- if new_proto != prototype:
- prototype = new_proto
- #
- # Dispense with the simple "#define A B" case here; the key
- # is the space after the name of the symbol being defined.
- # NOTE that the seemingly misnamed "func_macro" indicates a
- # macro *without* arguments.
- #
- r = KernRe(r'^(\w+)\s+')
- if r.search(prototype):
- return_type = ''
- declaration_name = r.group(1)
- func_macro = True
- found = True
-
- # Yes, this truly is vile. We are looking for:
- # 1. Return type (may be nothing if we're looking at a macro)
- # 2. Function name
- # 3. Function parameters.
- #
- # All the while we have to watch out for function pointer parameters
- # (which IIRC is what the two sections are for), C types (these
- # regexps don't even start to express all the possibilities), and
- # so on.
- #
- # If you mess with these regexps, it's a good idea to check that
- # the following functions' documentation still comes out right:
- # - parport_register_device (function pointer parameters)
- # - atomic_set (macro)
- # - pci_match_device, __copy_to_user (long return type)
-
- name = r'\w+'
- type1 = r'(?:[\w\s]+)?'
- type2 = r'(?:[\w\s]+\*+)+'
- #
- # Attempt to match first on (args) with no internal parentheses; this
- # lets us easily filter out __acquires() and other post-args stuff. If
- # that fails, just grab the rest of the line to the last closing
- # parenthesis.
- #
- proto_args = r'\(([^\(]*|.*)\)'
- #
- # (Except for the simple macro case) attempt to split up the prototype
- # in the various ways we understand.
- #
- if not found:
- patterns = [
- rf'^()({name})\s*{proto_args}',
- rf'^({type1})\s+({name})\s*{proto_args}',
- rf'^({type2})\s*({name})\s*{proto_args}',
- ]
-
- for p in patterns:
- r = KernRe(p)
- if r.match(prototype):
- return_type = r.group(1)
- declaration_name = r.group(2)
- args = r.group(3)
- self.create_parameter_list(ln, decl_type, args, ',',
- declaration_name)
- found = True
- break
- #
- # Parsing done; make sure that things are as we expect.
- #
- if not found:
- self.emit_msg(ln,
- f"cannot understand function prototype: '{prototype}'")
- return
- if self.entry.identifier != declaration_name:
- self.emit_msg(ln, f"expecting prototype for {self.entry.identifier}(). "
- f"Prototype was for {declaration_name}() instead")
- return
- self.check_sections(ln, declaration_name, "function")
- self.check_return_section(ln, declaration_name, return_type)
- #
- # Store the result.
- #
- self.output_declaration(decl_type, declaration_name,
- typedef=('typedef' in return_type),
- functiontype=return_type,
- purpose=self.entry.declaration_purpose,
- func_macro=func_macro)
-
-
- def dump_typedef(self, ln, proto):
- """
- Stores a typedef inside self.entries array.
- """
- #
- # We start by looking for function typedefs.
- #
- typedef_type = r'typedef((?:\s+[\w*]+\b){0,7}\s+(?:\w+\b|\*+))\s*'
- typedef_ident = r'\*?\s*(\w\S+)\s*'
- typedef_args = r'\s*\((.*)\);'
-
- typedef1 = KernRe(typedef_type + r'\(' + typedef_ident + r'\)' + typedef_args)
- typedef2 = KernRe(typedef_type + typedef_ident + typedef_args)
-
- # Parse function typedef prototypes
- for r in [typedef1, typedef2]:
- if not r.match(proto):
- continue
-
- return_type = r.group(1).strip()
- declaration_name = r.group(2)
- args = r.group(3)
-
- if self.entry.identifier != declaration_name:
- self.emit_msg(ln,
- f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n")
- return
-
- self.create_parameter_list(ln, 'function', args, ',', declaration_name)
-
- self.output_declaration('function', declaration_name,
- typedef=True,
- functiontype=return_type,
- purpose=self.entry.declaration_purpose)
- return
- #
- # Not a function, try to parse a simple typedef.
- #
- r = KernRe(r'typedef.*\s+(\w+)\s*;')
- if r.match(proto):
- declaration_name = r.group(1)
-
- if self.entry.identifier != declaration_name:
- self.emit_msg(ln,
- f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n")
- return
-
- self.output_declaration('typedef', declaration_name,
- purpose=self.entry.declaration_purpose)
- return
-
- self.emit_msg(ln, "error: Cannot parse typedef!")
-
- @staticmethod
- def process_export(function_set, line):
- """
- process EXPORT_SYMBOL* tags
-
- This method doesn't use any variable from the class, so declare it
- with a staticmethod decorator.
- """
-
- # We support documenting some exported symbols with different
- # names. A horrible hack.
- suffixes = [ '_noprof' ]
-
- # Note: it accepts only one EXPORT_SYMBOL* per line, as having
- # multiple export lines would violate Kernel coding style.
-
- if export_symbol.search(line):
- symbol = export_symbol.group(2)
- elif export_symbol_ns.search(line):
- symbol = export_symbol_ns.group(2)
- else:
- return False
- #
- # Found an export, trim out any special suffixes
- #
- for suffix in suffixes:
- # Be backward compatible with Python < 3.9
- if symbol.endswith(suffix):
- symbol = symbol[:-len(suffix)]
- function_set.add(symbol)
- return True
-
- def process_normal(self, ln, line):
- """
- STATE_NORMAL: looking for the /** to begin everything.
- """
-
- if not doc_start.match(line):
- return
-
- # start a new entry
- self.reset_state(ln)
-
- # next line is always the function name
- self.state = state.NAME
-
- def process_name(self, ln, line):
- """
- STATE_NAME: Looking for the "name - description" line
- """
- #
- # Check for a DOC: block and handle them specially.
- #
- if doc_block.search(line):
-
- if not doc_block.group(1):
- self.entry.begin_section(ln, "Introduction")
- else:
- self.entry.begin_section(ln, doc_block.group(1))
-
- self.entry.identifier = self.entry.section
- self.state = state.DOCBLOCK
- #
- # Otherwise we're looking for a normal kerneldoc declaration line.
- #
- elif doc_decl.search(line):
- self.entry.identifier = doc_decl.group(1)
-
- # Test for data declaration
- if doc_begin_data.search(line):
- self.entry.decl_type = doc_begin_data.group(1)
- self.entry.identifier = doc_begin_data.group(2)
- #
- # Look for a function description
- #
- elif doc_begin_func.search(line):
- self.entry.identifier = doc_begin_func.group(1)
- self.entry.decl_type = "function"
- #
- # We struck out.
- #
- else:
- self.emit_msg(ln,
- f"This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst\n{line}")
- self.state = state.NORMAL
- return
- #
- # OK, set up for a new kerneldoc entry.
- #
- self.state = state.BODY
- self.entry.identifier = self.entry.identifier.strip(" ")
- # if there's no @param blocks need to set up default section here
- self.entry.begin_section(ln + 1)
- #
- # Find the description portion, which *should* be there but
- # isn't always.
- # (We should be able to capture this from the previous parsing - someday)
- #
- r = KernRe("[-:](.*)")
- if r.search(line):
- self.entry.declaration_purpose = trim_whitespace(r.group(1))
- self.state = state.DECLARATION
- else:
- self.entry.declaration_purpose = ""
-
- if not self.entry.declaration_purpose and self.config.wshort_desc:
- self.emit_msg(ln,
- f"missing initial short description on line:\n{line}")
-
- if not self.entry.identifier and self.entry.decl_type != "enum":
- self.emit_msg(ln,
- f"wrong kernel-doc identifier on line:\n{line}")
- self.state = state.NORMAL
-
- if self.config.verbose:
- self.emit_msg(ln,
- f"Scanning doc for {self.entry.decl_type} {self.entry.identifier}",
- warning=False)
- #
- # Failed to find an identifier. Emit a warning
- #
- else:
- self.emit_msg(ln, f"Cannot find identifier on line:\n{line}")
-
- #
- # Helper function to determine if a new section is being started.
- #
- def is_new_section(self, ln, line):
- if doc_sect.search(line):
- self.state = state.BODY
- #
- # Pick out the name of our new section, tweaking it if need be.
- #
- newsection = doc_sect.group(1)
- if newsection.lower() == 'description':
- newsection = 'Description'
- elif newsection.lower() == 'context':
- newsection = 'Context'
- self.state = state.SPECIAL_SECTION
- elif newsection.lower() in ["@return", "@returns",
- "return", "returns"]:
- newsection = "Return"
- self.state = state.SPECIAL_SECTION
- elif newsection[0] == '@':
- self.state = state.SPECIAL_SECTION
- #
- # Initialize the contents, and get the new section going.
- #
- newcontents = doc_sect.group(2)
- if not newcontents:
- newcontents = ""
- self.dump_section()
- self.entry.begin_section(ln, newsection)
- self.entry.leading_space = None
-
- self.entry.add_text(newcontents.lstrip())
- return True
- return False
-
- #
- # Helper function to detect (and effect) the end of a kerneldoc comment.
- #
- def is_comment_end(self, ln, line):
- if doc_end.search(line):
- self.dump_section()
-
- # Look for doc_com + <text> + doc_end:
- r = KernRe(r'\s*\*\s*[a-zA-Z_0-9:.]+\*/')
- if r.match(line):
- self.emit_msg(ln, f"suspicious ending line: {line}")
-
- self.entry.prototype = ""
- self.entry.new_start_line = ln + 1
-
- self.state = state.PROTO
- return True
- return False
-
-
- def process_decl(self, ln, line):
- """
- STATE_DECLARATION: We've seen the beginning of a declaration
- """
- if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
- return
- #
- # Look for anything with the " * " line beginning.
- #
- if doc_content.search(line):
- cont = doc_content.group(1)
- #
- # A blank line means that we have moved out of the declaration
- # part of the comment (without any "special section" parameter
- # descriptions).
- #
- if cont == "":
- self.state = state.BODY
- #
- # Otherwise we have more of the declaration section to soak up.
- #
- else:
- self.entry.declaration_purpose = \
- trim_whitespace(self.entry.declaration_purpose + ' ' + cont)
- else:
- # Unknown line, ignore
- self.emit_msg(ln, f"bad line: {line}")
-
-
- def process_special(self, ln, line):
- """
- STATE_SPECIAL_SECTION: a section ending with a blank line
- """
- #
- # If we have hit a blank line (only the " * " marker), then this
- # section is done.
- #
- if KernRe(r"\s*\*\s*$").match(line):
- self.entry.begin_section(ln, dump = True)
- self.state = state.BODY
- return
- #
- # Not a blank line, look for the other ways to end the section.
- #
- if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
- return
- #
- # OK, we should have a continuation of the text for this section.
- #
- if doc_content.search(line):
- cont = doc_content.group(1)
- #
- # If the lines of text after the first in a special section have
- # leading white space, we need to trim it out or Sphinx will get
- # confused. For the second line (the None case), see what we
- # find there and remember it.
- #
- if self.entry.leading_space is None:
- r = KernRe(r'^(\s+)')
- if r.match(cont):
- self.entry.leading_space = len(r.group(1))
- else:
- self.entry.leading_space = 0
- #
- # Otherwise, before trimming any leading chars, be *sure*
- # that they are white space. We should maybe warn if this
- # isn't the case.
- #
- for i in range(0, self.entry.leading_space):
- if cont[i] != " ":
- self.entry.leading_space = i
- break
- #
- # Add the trimmed result to the section and we're done.
- #
- self.entry.add_text(cont[self.entry.leading_space:])
- else:
- # Unknown line, ignore
- self.emit_msg(ln, f"bad line: {line}")
-
- def process_body(self, ln, line):
- """
- STATE_BODY: the bulk of a kerneldoc comment.
- """
- if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
- return
-
- if doc_content.search(line):
- cont = doc_content.group(1)
- self.entry.add_text(cont)
- else:
- # Unknown line, ignore
- self.emit_msg(ln, f"bad line: {line}")
-
- def process_inline_name(self, ln, line):
- """STATE_INLINE_NAME: beginning of docbook comments within a prototype."""
-
- if doc_inline_sect.search(line):
- self.entry.begin_section(ln, doc_inline_sect.group(1))
- self.entry.add_text(doc_inline_sect.group(2).lstrip())
- self.state = state.INLINE_TEXT
- elif doc_inline_end.search(line):
- self.dump_section()
- self.state = state.PROTO
- elif doc_content.search(line):
- self.emit_msg(ln, f"Incorrect use of kernel-doc format: {line}")
- self.state = state.PROTO
- # else ... ??
-
- def process_inline_text(self, ln, line):
- """STATE_INLINE_TEXT: docbook comments within a prototype."""
-
- if doc_inline_end.search(line):
- self.dump_section()
- self.state = state.PROTO
- elif doc_content.search(line):
- self.entry.add_text(doc_content.group(1))
- # else ... ??
-
- def syscall_munge(self, ln, proto): # pylint: disable=W0613
- """
- Handle syscall definitions
- """
-
- is_void = False
-
- # Strip newlines/CR's
- proto = re.sub(r'[\r\n]+', ' ', proto)
-
- # Check if it's a SYSCALL_DEFINE0
- if 'SYSCALL_DEFINE0' in proto:
- is_void = True
-
- # Replace SYSCALL_DEFINE with correct return type & function name
- proto = KernRe(r'SYSCALL_DEFINE.*\(').sub('long sys_', proto)
-
- r = KernRe(r'long\s+(sys_.*?),')
- if r.search(proto):
- proto = KernRe(',').sub('(', proto, count=1)
- elif is_void:
- proto = KernRe(r'\)').sub('(void)', proto, count=1)
-
- # Now delete all of the odd-numbered commas in the proto
- # so that argument types & names don't have a comma between them
- count = 0
- length = len(proto)
-
- if is_void:
- length = 0 # skip the loop if is_void
-
- for ix in range(length):
- if proto[ix] == ',':
- count += 1
- if count % 2 == 1:
- proto = proto[:ix] + ' ' + proto[ix + 1:]
-
- return proto
-
- def tracepoint_munge(self, ln, proto):
- """
- Handle tracepoint definitions
- """
-
- tracepointname = None
- tracepointargs = None
-
- # Match tracepoint name based on different patterns
- r = KernRe(r'TRACE_EVENT\((.*?),')
- if r.search(proto):
- tracepointname = r.group(1)
-
- r = KernRe(r'DEFINE_SINGLE_EVENT\((.*?),')
- if r.search(proto):
- tracepointname = r.group(1)
-
- r = KernRe(r'DEFINE_EVENT\((.*?),(.*?),')
- if r.search(proto):
- tracepointname = r.group(2)
-
- if tracepointname:
- tracepointname = tracepointname.lstrip()
-
- r = KernRe(r'TP_PROTO\((.*?)\)')
- if r.search(proto):
- tracepointargs = r.group(1)
-
- if not tracepointname or not tracepointargs:
- self.emit_msg(ln,
- f"Unrecognized tracepoint format:\n{proto}\n")
- else:
- proto = f"static inline void trace_{tracepointname}({tracepointargs})"
- self.entry.identifier = f"trace_{self.entry.identifier}"
-
- return proto
-
- def process_proto_function(self, ln, line):
- """Ancillary routine to process a function prototype"""
-
- # strip C99-style comments to end of line
- line = KernRe(r"//.*$", re.S).sub('', line)
- #
- # Soak up the line's worth of prototype text, stopping at { or ; if present.
- #
- if KernRe(r'\s*#\s*define').match(line):
- self.entry.prototype = line
- elif not line.startswith('#'): # skip other preprocessor stuff
- r = KernRe(r'([^\{]*)')
- if r.match(line):
- self.entry.prototype += r.group(1) + " "
- #
- # If we now have the whole prototype, clean it up and declare victory.
- #
- if '{' in line or ';' in line or KernRe(r'\s*#\s*define').match(line):
- # strip comments and surrounding spaces
- self.entry.prototype = KernRe(r'/\*.*\*/').sub('', self.entry.prototype).strip()
- #
- # Handle self.entry.prototypes for function pointers like:
- # int (*pcs_config)(struct foo)
- # by turning it into
- # int pcs_config(struct foo)
- #
- r = KernRe(r'^(\S+\s+)\(\s*\*(\S+)\)')
- self.entry.prototype = r.sub(r'\1\2', self.entry.prototype)
- #
- # Handle special declaration syntaxes
- #
- if 'SYSCALL_DEFINE' in self.entry.prototype:
- self.entry.prototype = self.syscall_munge(ln,
- self.entry.prototype)
- else:
- r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT')
- if r.search(self.entry.prototype):
- self.entry.prototype = self.tracepoint_munge(ln,
- self.entry.prototype)
- #
- # ... and we're done
- #
- self.dump_function(ln, self.entry.prototype)
- self.reset_state(ln)
-
- def process_proto_type(self, ln, line):
- """Ancillary routine to process a type"""
-
- # Strip C99-style comments and surrounding whitespace
- line = KernRe(r"//.*$", re.S).sub('', line).strip()
- if not line:
- return # nothing to see here
-
- # To distinguish preprocessor directive from regular declaration later.
- if line.startswith('#'):
- line += ";"
- #
- # Split the declaration on any of { } or ;, and accumulate pieces
- # until we hit a semicolon while not inside {brackets}
- #
- r = KernRe(r'(.*?)([{};])')
- for chunk in r.split(line):
- if chunk: # Ignore empty matches
- self.entry.prototype += chunk
- #
- # This cries out for a match statement ... someday after we can
- # drop Python 3.9 ...
- #
- if chunk == '{':
- self.entry.brcount += 1
- elif chunk == '}':
- self.entry.brcount -= 1
- elif chunk == ';' and self.entry.brcount <= 0:
- self.dump_declaration(ln, self.entry.prototype)
- self.reset_state(ln)
- return
- #
- # We hit the end of the line while still in the declaration; put
- # in a space to represent the newline.
- #
- self.entry.prototype += ' '
-
- def process_proto(self, ln, line):
- """STATE_PROTO: reading a function/whatever prototype."""
-
- if doc_inline_oneline.search(line):
- self.entry.begin_section(ln, doc_inline_oneline.group(1))
- self.entry.add_text(doc_inline_oneline.group(2))
- self.dump_section()
-
- elif doc_inline_start.search(line):
- self.state = state.INLINE_NAME
-
- elif self.entry.decl_type == 'function':
- self.process_proto_function(ln, line)
-
- else:
- self.process_proto_type(ln, line)
-
- def process_docblock(self, ln, line):
- """STATE_DOCBLOCK: within a DOC: block."""
-
- if doc_end.search(line):
- self.dump_section()
- self.output_declaration("doc", self.entry.identifier)
- self.reset_state(ln)
-
- elif doc_content.search(line):
- self.entry.add_text(doc_content.group(1))
-
- def parse_export(self):
- """
- Parses EXPORT_SYMBOL* macros from a single Kernel source file.
- """
-
- export_table = set()
-
- try:
- with open(self.fname, "r", encoding="utf8",
- errors="backslashreplace") as fp:
-
- for line in fp:
- self.process_export(export_table, line)
-
- except IOError:
- return None
-
- return export_table
-
- #
- # The state/action table telling us which function to invoke in
- # each state.
- #
- state_actions = {
- state.NORMAL: process_normal,
- state.NAME: process_name,
- state.BODY: process_body,
- state.DECLARATION: process_decl,
- state.SPECIAL_SECTION: process_special,
- state.INLINE_NAME: process_inline_name,
- state.INLINE_TEXT: process_inline_text,
- state.PROTO: process_proto,
- state.DOCBLOCK: process_docblock,
- }
-
- def parse_kdoc(self):
- """
- Open and process each line of a C source file.
- The parsing is controlled via a state machine, and the line is passed
- to a different process function depending on the state. The process
- function may update the state as needed.
-
- Besides parsing kernel-doc tags, it also parses export symbols.
- """
-
- prev = ""
- prev_ln = None
- export_table = set()
-
- try:
- with open(self.fname, "r", encoding="utf8",
- errors="backslashreplace") as fp:
- for ln, line in enumerate(fp):
-
- line = line.expandtabs().strip("\n")
-
- # Group continuation lines on prototypes
- if self.state == state.PROTO:
- if line.endswith("\\"):
- prev += line.rstrip("\\")
- if not prev_ln:
- prev_ln = ln
- continue
-
- if prev:
- ln = prev_ln
- line = prev + line
- prev = ""
- prev_ln = None
-
- self.config.log.debug("%d %s: %s",
- ln, state.name[self.state],
- line)
-
- # This is an optimization over the original script.
- # There, when export_file was used for the same file,
- # it was read twice. Here, we use the already-existing
- # loop to parse exported symbols as well.
- #
- if (self.state != state.NORMAL) or \
- not self.process_export(export_table, line):
- # Hand this line to the appropriate state handler
- self.state_actions[self.state](self, ln, line)
-
- except OSError:
- self.config.log.error(f"Error: Cannot open file {self.fname}")
-
- return export_table, self.entries
diff --git a/scripts/lib/kdoc/kdoc_re.py b/scripts/lib/kdoc/kdoc_re.py
deleted file mode 100644
index 612223e1e723..000000000000
--- a/scripts/lib/kdoc/kdoc_re.py
+++ /dev/null
@@ -1,270 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
-
-"""
-Regular expression ancillary classes.
-
-Those help caching regular expressions and do matching for kernel-doc.
-"""
-
-import re
-
-# Local cache for regular expressions
-re_cache = {}
-
-
-class KernRe:
- """
- Helper class to simplify regex declaration and usage,
-
- It calls re.compile for a given pattern. It also allows adding
- regular expressions and define sub at class init time.
-
- Regular expressions can be cached via an argument, helping to speedup
- searches.
- """
-
- def _add_regex(self, string, flags):
- """
- Adds a new regex or re-use it from the cache.
- """
- self.regex = re_cache.get(string, None)
- if not self.regex:
- self.regex = re.compile(string, flags=flags)
- if self.cache:
- re_cache[string] = self.regex
-
- def __init__(self, string, cache=True, flags=0):
- """
- Compile a regular expression and initialize internal vars.
- """
-
- self.cache = cache
- self.last_match = None
-
- self._add_regex(string, flags)
-
- def __str__(self):
- """
- Return the regular expression pattern.
- """
- return self.regex.pattern
-
- def __add__(self, other):
- """
- Allows adding two regular expressions into one.
- """
-
- return KernRe(str(self) + str(other), cache=self.cache or other.cache,
- flags=self.regex.flags | other.regex.flags)
-
- def match(self, string):
- """
- Handles a re.match storing its results
- """
-
- self.last_match = self.regex.match(string)
- return self.last_match
-
- def search(self, string):
- """
- Handles a re.search storing its results
- """
-
- self.last_match = self.regex.search(string)
- return self.last_match
-
- def findall(self, string):
- """
- Alias to re.findall
- """
-
- return self.regex.findall(string)
-
- def split(self, string):
- """
- Alias to re.split
- """
-
- return self.regex.split(string)
-
- def sub(self, sub, string, count=0):
- """
- Alias to re.sub
- """
-
- return self.regex.sub(sub, string, count=count)
-
- def group(self, num):
- """
- Returns the group results of the last match
- """
-
- return self.last_match.group(num)
-
-
-class NestedMatch:
- """
- Finding nested delimiters is hard with regular expressions. It is
- even harder on Python with its normal re module, as there are several
- advanced regular expressions that are missing.
-
- This is the case of this pattern:
-
- '\\bSTRUCT_GROUP(\\(((?:(?>[^)(]+)|(?1))*)\\))[^;]*;'
-
- which is used to properly match open/close parenthesis of the
- string search STRUCT_GROUP(),
-
- Add a class that counts pairs of delimiters, using it to match and
- replace nested expressions.
-
- The original approach was suggested by:
- https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
-
- Although I re-implemented it to make it more generic and match 3 types
- of delimiters. The logic checks if delimiters are paired. If not, it
- will ignore the search string.
- """
-
- # TODO: make NestedMatch handle multiple match groups
- #
- # Right now, regular expressions to match it are defined only up to
- # the start delimiter, e.g.:
- #
- # \bSTRUCT_GROUP\(
- #
- # is similar to: STRUCT_GROUP\((.*)\)
- # except that the content inside the match group is delimiter's aligned.
- #
- # The content inside parenthesis are converted into a single replace
- # group (e.g. r`\1').
- #
- # It would be nice to change such definition to support multiple
- # match groups, allowing a regex equivalent to.
- #
- # FOO\((.*), (.*), (.*)\)
- #
- # it is probably easier to define it not as a regular expression, but
- # with some lexical definition like:
- #
- # FOO(arg1, arg2, arg3)
-
- DELIMITER_PAIRS = {
- '{': '}',
- '(': ')',
- '[': ']',
- }
-
- RE_DELIM = re.compile(r'[\{\}\[\]\(\)]')
-
- def _search(self, regex, line):
- """
- Finds paired blocks for a regex that ends with a delimiter.
-
- The suggestion of using finditer to match pairs came from:
- https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
- but I ended using a different implementation to align all three types
- of delimiters and seek for an initial regular expression.
-
- The algorithm seeks for open/close paired delimiters and place them
- into a stack, yielding a start/stop position of each match when the
- stack is zeroed.
-
- The algorithm shoud work fine for properly paired lines, but will
- silently ignore end delimiters that preceeds an start delimiter.
- This should be OK for kernel-doc parser, as unaligned delimiters
- would cause compilation errors. So, we don't need to rise exceptions
- to cover such issues.
- """
-
- stack = []
-
- for match_re in regex.finditer(line):
- start = match_re.start()
- offset = match_re.end()
-
- d = line[offset - 1]
- if d not in self.DELIMITER_PAIRS:
- continue
-
- end = self.DELIMITER_PAIRS[d]
- stack.append(end)
-
- for match in self.RE_DELIM.finditer(line[offset:]):
- pos = match.start() + offset
-
- d = line[pos]
-
- if d in self.DELIMITER_PAIRS:
- end = self.DELIMITER_PAIRS[d]
-
- stack.append(end)
- continue
-
- # Does the end delimiter match what it is expected?
- if stack and d == stack[-1]:
- stack.pop()
-
- if not stack:
- yield start, offset, pos + 1
- break
-
- def search(self, regex, line):
- """
- This is similar to re.search:
-
- It matches a regex that it is followed by a delimiter,
- returning occurrences only if all delimiters are paired.
- """
-
- for t in self._search(regex, line):
-
- yield line[t[0]:t[2]]
-
- def sub(self, regex, sub, line, count=0):
- """
- This is similar to re.sub:
-
- It matches a regex that it is followed by a delimiter,
- replacing occurrences only if all delimiters are paired.
-
- if r'\1' is used, it works just like re: it places there the
- matched paired data with the delimiter stripped.
-
- If count is different than zero, it will replace at most count
- items.
- """
- out = ""
-
- cur_pos = 0
- n = 0
-
- for start, end, pos in self._search(regex, line):
- out += line[cur_pos:start]
-
- # Value, ignoring start/end delimiters
- value = line[end:pos - 1]
-
- # replaces \1 at the sub string, if \1 is used there
- new_sub = sub
- new_sub = new_sub.replace(r'\1', value)
-
- out += new_sub
-
- # Drop end ';' if any
- if line[pos] == ';':
- pos += 1
-
- cur_pos = pos
- n += 1
-
- if count and count >= n:
- break
-
- # Append the remaining string
- l = len(line)
- out += line[cur_pos:l]
-
- return out
diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs
index c8f9dc2ab976..be0561049660 100644
--- a/scripts/rustdoc_test_gen.rs
+++ b/scripts/rustdoc_test_gen.rs
@@ -208,6 +208,7 @@ pub extern "C" fn {kunit_name}(__kunit_test: *mut ::kernel::bindings::kunit) {{
#[allow(unused)]
static __DOCTEST_ANCHOR: i32 = ::core::line!() as i32 + {body_offset} + 1;
{{
+ #![allow(unreachable_pub, clippy::disallowed_names)]
{body}
main();
}}
diff --git a/scripts/sphinx-build-wrapper b/scripts/sphinx-build-wrapper
deleted file mode 100755
index abe8c26ae137..000000000000
--- a/scripts/sphinx-build-wrapper
+++ /dev/null
@@ -1,719 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0
-# Copyright (C) 2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
-#
-# pylint: disable=R0902, R0912, R0913, R0914, R0915, R0917, C0103
-#
-# Converted from docs Makefile and parallel-wrapper.sh, both under
-# GPLv2, copyrighted since 2008 by the following authors:
-#
-# Akira Yokosawa <akiyks@gmail.com>
-# Arnd Bergmann <arnd@arndb.de>
-# Breno Leitao <leitao@debian.org>
-# Carlos Bilbao <carlos.bilbao@amd.com>
-# Dave Young <dyoung@redhat.com>
-# Donald Hunter <donald.hunter@gmail.com>
-# Geert Uytterhoeven <geert+renesas@glider.be>
-# Jani Nikula <jani.nikula@intel.com>
-# Jan Stancek <jstancek@redhat.com>
-# Jonathan Corbet <corbet@lwn.net>
-# Joshua Clayton <stillcompiling@gmail.com>
-# Kees Cook <keescook@chromium.org>
-# Linus Torvalds <torvalds@linux-foundation.org>
-# Magnus Damm <damm+renesas@opensource.se>
-# Masahiro Yamada <masahiroy@kernel.org>
-# Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
-# Maxim Cournoyer <maxim.cournoyer@gmail.com>
-# Peter Foley <pefoley2@pefoley.com>
-# Randy Dunlap <rdunlap@infradead.org>
-# Rob Herring <robh@kernel.org>
-# Shuah Khan <shuahkh@osg.samsung.com>
-# Thorsten Blum <thorsten.blum@toblux.com>
-# Tomas Winkler <tomas.winkler@intel.com>
-
-
-"""
-Sphinx build wrapper that handles Kernel-specific business rules:
-
-- it gets the Kernel build environment vars;
-- it determines what's the best parallelism;
-- it handles SPHINXDIRS
-
-This tool ensures that MIN_PYTHON_VERSION is satisfied. If version is
-below that, it seeks for a new Python version. If found, it re-runs using
-the newer version.
-"""
-
-import argparse
-import locale
-import os
-import re
-import shlex
-import shutil
-import subprocess
-import sys
-
-from concurrent import futures
-from glob import glob
-
-LIB_DIR = "lib"
-SRC_DIR = os.path.dirname(os.path.realpath(__file__))
-
-sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
-
-from jobserver import JobserverExec # pylint: disable=C0413
-
-
-def parse_version(version):
- """Convert a major.minor.patch version into a tuple"""
- return tuple(int(x) for x in version.split("."))
-
-def ver_str(version):
- """Returns a version tuple as major.minor.patch"""
-
- return ".".join([str(x) for x in version])
-
-# Minimal supported Python version needed by Sphinx and its extensions
-MIN_PYTHON_VERSION = parse_version("3.7")
-
-# Default value for --venv parameter
-VENV_DEFAULT = "sphinx_latest"
-
-# List of make targets and its corresponding builder and output directory
-TARGETS = {
- "cleandocs": {
- "builder": "clean",
- },
- "htmldocs": {
- "builder": "html",
- },
- "epubdocs": {
- "builder": "epub",
- "out_dir": "epub",
- },
- "texinfodocs": {
- "builder": "texinfo",
- "out_dir": "texinfo",
- },
- "infodocs": {
- "builder": "texinfo",
- "out_dir": "texinfo",
- },
- "latexdocs": {
- "builder": "latex",
- "out_dir": "latex",
- },
- "pdfdocs": {
- "builder": "latex",
- "out_dir": "latex",
- },
- "xmldocs": {
- "builder": "xml",
- "out_dir": "xml",
- },
- "linkcheckdocs": {
- "builder": "linkcheck"
- },
-}
-
-# Paper sizes. An empty value will pick the default
-PAPER = ["", "a4", "letter"]
-
-class SphinxBuilder:
- """
- Handles a sphinx-build target, adding needed arguments to build
- with the Kernel.
- """
-
- def is_rust_enabled(self):
- """Check if rust is enabled at .config"""
- config_path = os.path.join(self.srctree, ".config")
- if os.path.isfile(config_path):
- with open(config_path, "r", encoding="utf-8") as f:
- return "CONFIG_RUST=y" in f.read()
- return False
-
- def get_path(self, path, abs_path=False):
- """
- Ancillary routine to handle patches the right way, as shell does.
-
- It first expands "~" and "~user". Then, if patch is not absolute,
- join self.srctree. Finally, if requested, convert to abspath.
- """
-
- path = os.path.expanduser(path)
- if not path.startswith("/"):
- path = os.path.join(self.srctree, path)
-
- if abs_path:
- return os.path.abspath(path)
-
- return path
-
- def __init__(self, venv=None, verbose=False, n_jobs=None, interactive=None):
- """Initialize internal variables"""
- self.venv = venv
- self.verbose = None
-
- # Normal variables passed from Kernel's makefile
- self.kernelversion = os.environ.get("KERNELVERSION", "unknown")
- self.kernelrelease = os.environ.get("KERNELRELEASE", "unknown")
- self.pdflatex = os.environ.get("PDFLATEX", "xelatex")
-
- if not interactive:
- self.latexopts = os.environ.get("LATEXOPTS", "-interaction=batchmode -no-shell-escape")
- else:
- self.latexopts = os.environ.get("LATEXOPTS", "")
-
- if not verbose:
- verbose = bool(os.environ.get("KBUILD_VERBOSE", "") != "")
-
- # Handle SPHINXOPTS evironment
- sphinxopts = shlex.split(os.environ.get("SPHINXOPTS", ""))
-
- # As we handle number of jobs and quiet in separate, we need to pick
- # it the same way as sphinx-build would pick, so let's use argparse
- # do to the right argument expansion
- parser = argparse.ArgumentParser()
- parser.add_argument('-j', '--jobs', type=int)
- parser.add_argument('-q', '--quiet', type=int)
-
- # Other sphinx-build arguments go as-is, so place them
- # at self.sphinxopts
- sphinx_args, self.sphinxopts = parser.parse_known_args(sphinxopts)
- if sphinx_args.quiet == True:
- self.verbose = False
-
- if sphinx_args.jobs:
- self.n_jobs = sphinx_args.jobs
-
- # Command line arguments was passed, override SPHINXOPTS
- if verbose is not None:
- self.verbose = verbose
-
- self.n_jobs = n_jobs
-
- # Source tree directory. This needs to be at os.environ, as
- # Sphinx extensions and media uAPI makefile needs it
- self.srctree = os.environ.get("srctree")
- if not self.srctree:
- self.srctree = "."
- os.environ["srctree"] = self.srctree
-
- # Now that we can expand srctree, get other directories as well
- self.sphinxbuild = os.environ.get("SPHINXBUILD", "sphinx-build")
- self.kerneldoc = self.get_path(os.environ.get("KERNELDOC",
- "scripts/kernel-doc.py"))
- self.obj = os.environ.get("obj", "Documentation")
- self.builddir = self.get_path(os.path.join(self.obj, "output"),
- abs_path=True)
-
- # Media uAPI needs it
- os.environ["BUILDDIR"] = self.builddir
-
- # Detect if rust is enabled
- self.config_rust = self.is_rust_enabled()
-
- # Get directory locations for LaTeX build toolchain
- self.pdflatex_cmd = shutil.which(self.pdflatex)
- self.latexmk_cmd = shutil.which("latexmk")
-
- self.env = os.environ.copy()
-
- # If venv parameter is specified, run Sphinx from venv
- if venv:
- bin_dir = os.path.join(venv, "bin")
- if os.path.isfile(os.path.join(bin_dir, "activate")):
- # "activate" virtual env
- self.env["PATH"] = bin_dir + ":" + self.env["PATH"]
- self.env["VIRTUAL_ENV"] = venv
- if "PYTHONHOME" in self.env:
- del self.env["PYTHONHOME"]
- print(f"Setting venv to {venv}")
- else:
- sys.exit(f"Venv {venv} not found.")
-
- def run_sphinx(self, sphinx_build, build_args, *args, **pwargs):
- """
- Executes sphinx-build using current python3 command and setting
- -j parameter if possible to run the build in parallel.
- """
-
- with JobserverExec() as jobserver:
- if jobserver.claim:
- n_jobs = str(jobserver.claim)
- else:
- n_jobs = "auto" # Supported since Sphinx 1.7
-
- cmd = []
-
- if self.venv:
- cmd.append("python")
- else:
- cmd.append(sys.executable)
-
- cmd.append(sphinx_build)
-
- # if present, SPHINXOPTS or command line --jobs overrides default
- if self.n_jobs:
- n_jobs = str(self.n_jobs)
-
- if n_jobs:
- cmd += [f"-j{n_jobs}"]
-
- if not self.verbose:
- cmd.append("-q")
-
- cmd += self.sphinxopts
-
- cmd += build_args
-
- if self.verbose:
- print(" ".join(cmd))
-
- rc = subprocess.call(cmd, *args, **pwargs)
-
- def handle_html(self, css, output_dir):
- """
- Extra steps for HTML and epub output.
-
- For such targets, we need to ensure that CSS will be properly
- copied to the output _static directory
- """
-
- if not css:
- return
-
- css = os.path.expanduser(css)
- if not css.startswith("/"):
- css = os.path.join(self.srctree, css)
-
- static_dir = os.path.join(output_dir, "_static")
- os.makedirs(static_dir, exist_ok=True)
-
- try:
- shutil.copy2(css, static_dir)
- except (OSError, IOError) as e:
- print(f"Warning: Failed to copy CSS: {e}", file=sys.stderr)
-
- def build_pdf_file(self, latex_cmd, from_dir, path):
- """Builds a single pdf file using latex_cmd"""
- try:
- subprocess.run(latex_cmd + [path],
- cwd=from_dir, check=True)
-
- return True
- except subprocess.CalledProcessError:
- # LaTeX PDF error code is almost useless: it returns
- # error codes even when build succeeds but has warnings.
- # So, we'll ignore the results
- return False
-
- def pdf_parallel_build(self, tex_suffix, latex_cmd, tex_files, n_jobs):
- """Build PDF files in parallel if possible"""
- builds = {}
- build_failed = False
- max_len = 0
- has_tex = False
-
- # Process files in parallel
- with futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:
- jobs = {}
-
- for from_dir, pdf_dir, entry in tex_files:
- name = entry.name
-
- if not name.endswith(tex_suffix):
- continue
-
- name = name[:-len(tex_suffix)]
-
- max_len = max(max_len, len(name))
-
- has_tex = True
-
- future = executor.submit(self.build_pdf_file, latex_cmd,
- from_dir, entry.path)
- jobs[future] = (from_dir, name, entry.path)
-
- for future in futures.as_completed(jobs):
- from_dir, name, path = jobs[future]
-
- pdf_name = name + ".pdf"
- pdf_from = os.path.join(from_dir, pdf_name)
-
- try:
- success = future.result()
-
- if success and os.path.exists(pdf_from):
- pdf_to = os.path.join(pdf_dir, pdf_name)
-
- os.rename(pdf_from, pdf_to)
- builds[name] = os.path.relpath(pdf_to, self.builddir)
- else:
- builds[name] = "FAILED"
- build_failed = True
- except Exception as e:
- builds[name] = f"FAILED ({str(e)})"
- build_failed = True
-
- # Handle case where no .tex files were found
- if not has_tex:
- name = "Sphinx LaTeX builder"
- max_len = max(max_len, len(name))
- builds[name] = "FAILED (no .tex file was generated)"
- build_failed = True
-
- return builds, build_failed, max_len
-
- def handle_pdf(self, output_dirs):
- """
- Extra steps for PDF output.
-
- As PDF is handled via a LaTeX output, after building the .tex file,
- a new build is needed to create the PDF output from the latex
- directory.
- """
- builds = {}
- max_len = 0
- tex_suffix = ".tex"
-
- # Get all tex files that will be used for PDF build
- tex_files = []
- for from_dir in output_dirs:
- pdf_dir = os.path.join(from_dir, "../pdf")
- os.makedirs(pdf_dir, exist_ok=True)
-
- if self.latexmk_cmd:
- latex_cmd = [self.latexmk_cmd, f"-{self.pdflatex}"]
- else:
- latex_cmd = [self.pdflatex]
-
- latex_cmd.extend(shlex.split(self.latexopts))
-
- # Get a list of tex files to process
- with os.scandir(from_dir) as it:
- for entry in it:
- if entry.name.endswith(tex_suffix):
- tex_files.append((from_dir, pdf_dir, entry))
-
- # When using make, this won't be used, as the number of jobs comes
- # from POSIX jobserver. So, this covers the case where build comes
- # from command line. On such case, serialize by default, except if
- # the user explicitly sets the number of jobs.
- n_jobs = 1
-
- # n_jobs is either an integer or "auto". Only use it if it is a number
- if self.n_jobs:
- try:
- n_jobs = int(self.n_jobs)
- except ValueError:
- pass
-
- # When using make, jobserver.claim is the number of jobs that were
- # used with "-j" and that aren't used by other make targets
- with JobserverExec() as jobserver:
- n_jobs = 1
-
- # Handle the case when a parameter is passed via command line,
- # using it as default, if jobserver doesn't claim anything
- if self.n_jobs:
- try:
- n_jobs = int(self.n_jobs)
- except ValueError:
- pass
-
- if jobserver.claim:
- n_jobs = jobserver.claim
-
- # Build files in parallel
- builds, build_failed, max_len = self.pdf_parallel_build(tex_suffix,
- latex_cmd,
- tex_files,
- n_jobs)
-
- msg = "Summary"
- msg += "\n" + "=" * len(msg)
- print()
- print(msg)
-
- for pdf_name, pdf_file in builds.items():
- print(f"{pdf_name:<{max_len}}: {pdf_file}")
-
- print()
-
- # return an error if a PDF file is missing
-
- if build_failed:
- sys.exit(f"PDF build failed: not all PDF files were created.")
- else:
- print("All PDF files were built.")
-
- def handle_info(self, output_dirs):
- """
- Extra steps for Info output.
-
- For texinfo generation, an additional make is needed from the
- texinfo directory.
- """
-
- for output_dir in output_dirs:
- try:
- subprocess.run(["make", "info"], cwd=output_dir, check=True)
- except subprocess.CalledProcessError as e:
- sys.exit(f"Error generating info docs: {e}")
-
- def cleandocs(self, builder):
-
- shutil.rmtree(self.builddir, ignore_errors=True)
-
- def build(self, target, sphinxdirs=None, conf="conf.py",
- theme=None, css=None, paper=None):
- """
- Build documentation using Sphinx. This is the core function of this
- module. It prepares all arguments required by sphinx-build.
- """
-
- builder = TARGETS[target]["builder"]
- out_dir = TARGETS[target].get("out_dir", "")
-
- # Cleandocs doesn't require sphinx-build
- if target == "cleandocs":
- self.cleandocs(builder)
- return
-
- # Other targets require sphinx-build
- sphinxbuild = shutil.which(self.sphinxbuild, path=self.env["PATH"])
- if not sphinxbuild:
- sys.exit(f"Error: {self.sphinxbuild} not found in PATH.\n")
-
- if builder == "latex":
- if not self.pdflatex_cmd and not self.latexmk_cmd:
- sys.exit("Error: pdflatex or latexmk required for PDF generation")
-
- docs_dir = os.path.abspath(os.path.join(self.srctree, "Documentation"))
-
- # Prepare base arguments for Sphinx build
- kerneldoc = self.kerneldoc
- if kerneldoc.startswith(self.srctree):
- kerneldoc = os.path.relpath(kerneldoc, self.srctree)
-
- # Prepare common Sphinx options
- args = [
- "-b", builder,
- "-c", docs_dir,
- ]
-
- if builder == "latex":
- if not paper:
- paper = PAPER[1]
-
- args.extend(["-D", f"latex_elements.papersize={paper}paper"])
-
- if self.config_rust:
- args.extend(["-t", "rustdoc"])
-
- if conf:
- self.env["SPHINX_CONF"] = self.get_path(conf, abs_path=True)
-
- if not sphinxdirs:
- sphinxdirs = os.environ.get("SPHINXDIRS", ".")
-
- # The sphinx-build tool has a bug: internally, it tries to set
- # locale with locale.setlocale(locale.LC_ALL, ''). This causes a
- # crash if language is not set. Detect and fix it.
- try:
- locale.setlocale(locale.LC_ALL, '')
- except Exception:
- self.env["LC_ALL"] = "C"
- self.env["LANG"] = "C"
-
- # sphinxdirs can be a list or a whitespace-separated string
- sphinxdirs_list = []
- for sphinxdir in sphinxdirs:
- if isinstance(sphinxdir, list):
- sphinxdirs_list += sphinxdir
- else:
- for name in sphinxdir.split(" "):
- sphinxdirs_list.append(name)
-
- # Build each directory
- output_dirs = []
- for sphinxdir in sphinxdirs_list:
- src_dir = os.path.join(docs_dir, sphinxdir)
- doctree_dir = os.path.join(self.builddir, ".doctrees")
- output_dir = os.path.join(self.builddir, sphinxdir, out_dir)
-
- # Make directory names canonical
- src_dir = os.path.normpath(src_dir)
- doctree_dir = os.path.normpath(doctree_dir)
- output_dir = os.path.normpath(output_dir)
-
- os.makedirs(doctree_dir, exist_ok=True)
- os.makedirs(output_dir, exist_ok=True)
-
- output_dirs.append(output_dir)
-
- build_args = args + [
- "-d", doctree_dir,
- "-D", f"kerneldoc_bin={kerneldoc}",
- "-D", f"version={self.kernelversion}",
- "-D", f"release={self.kernelrelease}",
- "-D", f"kerneldoc_srctree={self.srctree}",
- src_dir,
- output_dir,
- ]
-
- # Execute sphinx-build
- try:
- self.run_sphinx(sphinxbuild, build_args, env=self.env)
- except Exception as e:
- sys.exit(f"Build failed: {e}")
-
- # Ensure that html/epub will have needed static files
- if target in ["htmldocs", "epubdocs"]:
- self.handle_html(css, output_dir)
-
- # PDF and Info require a second build step
- if target == "pdfdocs":
- self.handle_pdf(output_dirs)
- elif target == "infodocs":
- self.handle_info(output_dirs)
-
- @staticmethod
- def get_python_version(cmd):
- """
- Get python version from a Python binary. As we need to detect if
- are out there newer python binaries, we can't rely on sys.release here.
- """
-
- result = subprocess.run([cmd, "--version"], check=True,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- universal_newlines=True)
- version = result.stdout.strip()
-
- match = re.search(r"(\d+\.\d+\.\d+)", version)
- if match:
- return parse_version(match.group(1))
-
- print(f"Can't parse version {version}")
- return (0, 0, 0)
-
- @staticmethod
- def find_python():
- """
- Detect if are out there any python 3.xy version newer than the
- current one.
-
- Note: this routine is limited to up to 2 digits for python3. We
- may need to update it one day, hopefully on a distant future.
- """
- patterns = [
- "python3.[0-9]",
- "python3.[0-9][0-9]",
- ]
-
- # Seek for a python binary newer than MIN_PYTHON_VERSION
- for path in os.getenv("PATH", "").split(":"):
- for pattern in patterns:
- for cmd in glob(os.path.join(path, pattern)):
- if os.path.isfile(cmd) and os.access(cmd, os.X_OK):
- version = SphinxBuilder.get_python_version(cmd)
- if version >= MIN_PYTHON_VERSION:
- return cmd
-
- return None
-
- @staticmethod
- def check_python():
- """
- Check if the current python binary satisfies our minimal requirement
- for Sphinx build. If not, re-run with a newer version if found.
- """
- cur_ver = sys.version_info[:3]
- if cur_ver >= MIN_PYTHON_VERSION:
- return
-
- python_ver = ver_str(cur_ver)
-
- new_python_cmd = SphinxBuilder.find_python()
- if not new_python_cmd:
- sys.exit(f"Python version {python_ver} is not supported anymore.")
-
- # Restart script using the newer version
- script_path = os.path.abspath(sys.argv[0])
- args = [new_python_cmd, script_path] + sys.argv[1:]
-
- print(f"Python {python_ver} not supported. Changing to {new_python_cmd}")
-
- try:
- os.execv(new_python_cmd, args)
- except OSError as e:
- sys.exit(f"Failed to restart with {new_python_cmd}: {e}")
-
-def jobs_type(value):
- """
- Handle valid values for -j. Accepts Sphinx "-jauto", plus a number
- equal or bigger than one.
- """
- if value is None:
- return None
-
- if value.lower() == 'auto':
- return value.lower()
-
- try:
- if int(value) >= 1:
- return value
-
- raise argparse.ArgumentTypeError(f"Minimum jobs is 1, got {value}")
- except ValueError:
- raise argparse.ArgumentTypeError(f"Must be 'auto' or positive integer, got {value}")
-
-def main():
- """
- Main function. The only mandatory argument is the target. If not
- specified, the other arguments will use default values if not
- specified at os.environ.
- """
- parser = argparse.ArgumentParser(description="Kernel documentation builder")
-
- parser.add_argument("target", choices=list(TARGETS.keys()),
- help="Documentation target to build")
- parser.add_argument("--sphinxdirs", nargs="+",
- help="Specific directories to build")
- parser.add_argument("--conf", default="conf.py",
- help="Sphinx configuration file")
-
- parser.add_argument("--theme", help="Sphinx theme to use")
-
- parser.add_argument("--css", help="Custom CSS file for HTML/EPUB")
-
- parser.add_argument("--paper", choices=PAPER, default=PAPER[0],
- help="Paper size for LaTeX/PDF output")
-
- parser.add_argument("-v", "--verbose", action='store_true',
- help="place build in verbose mode")
-
- parser.add_argument('-j', '--jobs', type=jobs_type,
- help="Sets number of jobs to use with sphinx-build")
-
- parser.add_argument('-i', '--interactive', action='store_true',
- help="Change latex default to run in interactive mode")
-
- parser.add_argument("-V", "--venv", nargs='?', const=f'{VENV_DEFAULT}',
- default=None,
- help=f'If used, run Sphinx from a venv dir (default dir: {VENV_DEFAULT})')
-
- args = parser.parse_args()
-
- SphinxBuilder.check_python()
-
- builder = SphinxBuilder(venv=args.venv, verbose=args.verbose,
- n_jobs=args.jobs, interactive=args.interactive)
-
- builder.build(args.target, sphinxdirs=args.sphinxdirs, conf=args.conf,
- theme=args.theme, css=args.css, paper=args.paper)
-
-if __name__ == "__main__":
- main()
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
deleted file mode 100755
index 954ed3dc0645..000000000000
--- a/scripts/sphinx-pre-install
+++ /dev/null
@@ -1,1621 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Copyright (c) 2017-2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
-#
-# pylint: disable=C0103,C0114,C0115,C0116,C0301,C0302
-# pylint: disable=R0902,R0904,R0911,R0912,R0914,R0915,R1705,R1710,E1121
-
-# Note: this script requires at least Python 3.6 to run.
-# Don't add changes not compatible with it, it is meant to report
-# incompatible python versions.
-
-"""
-Dependency checker for Sphinx documentation Kernel build.
-
-This module provides tools to check for all required dependencies needed to
-build documentation using Sphinx, including system packages, Python modules
-and LaTeX packages for PDF generation.
-
-It detect packages for a subset of Linux distributions used by Kernel
-maintainers, showing hints and missing dependencies.
-
-The main class SphinxDependencyChecker handles the dependency checking logic
-and provides recommendations for installing missing packages. It supports both
-system package installations and Python virtual environments. By default,
-system pacage install is recommended.
-"""
-
-import argparse
-import os
-import re
-import subprocess
-import sys
-from glob import glob
-
-
-def parse_version(version):
- """Convert a major.minor.patch version into a tuple"""
- return tuple(int(x) for x in version.split("."))
-
-
-def ver_str(version):
- """Returns a version tuple as major.minor.patch"""
-
- return ".".join([str(x) for x in version])
-
-
-RECOMMENDED_VERSION = parse_version("3.4.3")
-MIN_PYTHON_VERSION = parse_version("3.7")
-
-
-class DepManager:
- """
- Manage package dependencies. There are three types of dependencies:
-
- - System: dependencies required for docs build;
- - Python: python dependencies for a native distro Sphinx install;
- - PDF: dependencies needed by PDF builds.
-
- Each dependency can be mandatory or optional. Not installing an optional
- dependency won't break the build, but will cause degradation at the
- docs output.
- """
-
- # Internal types of dependencies. Don't use them outside DepManager class.
- _SYS_TYPE = 0
- _PHY_TYPE = 1
- _PDF_TYPE = 2
-
- # Dependencies visible outside the class.
- # The keys are tuple with: (type, is_mandatory flag).
- #
- # Currently we're not using all optional dep types. Yet, we'll keep all
- # possible combinations here. They're not many, and that makes easier
- # if later needed and for the name() method below
-
- SYSTEM_MANDATORY = (_SYS_TYPE, True)
- PYTHON_MANDATORY = (_PHY_TYPE, True)
- PDF_MANDATORY = (_PDF_TYPE, True)
-
- SYSTEM_OPTIONAL = (_SYS_TYPE, False)
- PYTHON_OPTIONAL = (_PHY_TYPE, False)
- PDF_OPTIONAL = (_PDF_TYPE, True)
-
- def __init__(self, pdf):
- """
- Initialize internal vars:
-
- - missing: missing dependencies list, containing a distro-independent
- name for a missing dependency and its type.
- - missing_pkg: ancillary dict containing missing dependencies in
- distro namespace, organized by type.
- - need: total number of needed dependencies. Never cleaned.
- - optional: total number of optional dependencies. Never cleaned.
- - pdf: Is PDF support enabled?
- """
- self.missing = {}
- self.missing_pkg = {}
- self.need = 0
- self.optional = 0
- self.pdf = pdf
-
- @staticmethod
- def name(dtype):
- """
- Ancillary routine to output a warn/error message reporting
- missing dependencies.
- """
- if dtype[0] == DepManager._SYS_TYPE:
- msg = "build"
- elif dtype[0] == DepManager._PHY_TYPE:
- msg = "Python"
- else:
- msg = "PDF"
-
- if dtype[1]:
- return f"ERROR: {msg} mandatory deps missing"
- else:
- return f"Warning: {msg} optional deps missing"
-
- @staticmethod
- def is_optional(dtype):
- """Ancillary routine to report if a dependency is optional"""
- return not dtype[1]
-
- @staticmethod
- def is_pdf(dtype):
- """Ancillary routine to report if a dependency is for PDF generation"""
- if dtype[0] == DepManager._PDF_TYPE:
- return True
-
- return False
-
- def add_package(self, package, dtype):
- """
- Add a package at the self.missing() dictionary.
- Doesn't update missing_pkg.
- """
- is_optional = DepManager.is_optional(dtype)
- self.missing[package] = dtype
- if is_optional:
- self.optional += 1
- else:
- self.need += 1
-
- def del_package(self, package):
- """
- Remove a package at the self.missing() dictionary.
- Doesn't update missing_pkg.
- """
- if package in self.missing:
- del self.missing[package]
-
- def clear_deps(self):
- """
- Clear dependencies without changing needed/optional.
-
- This is an ackward way to have a separate section to recommend
- a package after system main dependencies.
-
- TODO: rework the logic to prevent needing it.
- """
-
- self.missing = {}
- self.missing_pkg = {}
-
- def check_missing(self, progs):
- """
- Update self.missing_pkg, using progs dict to convert from the
- agnostic package name to distro-specific one.
-
- Returns an string with the packages to be installed, sorted and
- with eventual duplicates removed.
- """
-
- self.missing_pkg = {}
-
- for prog, dtype in sorted(self.missing.items()):
- # At least on some LTS distros like CentOS 7, texlive doesn't
- # provide all packages we need. When such distros are
- # detected, we have to disable PDF output.
- #
- # So, we need to ignore the packages that distros would
- # need for LaTeX to work
- if DepManager.is_pdf(dtype) and not self.pdf:
- self.optional -= 1
- continue
-
- if not dtype in self.missing_pkg:
- self.missing_pkg[dtype] = []
-
- self.missing_pkg[dtype].append(progs.get(prog, prog))
-
- install = []
- for dtype, pkgs in self.missing_pkg.items():
- install += pkgs
-
- return " ".join(sorted(set(install)))
-
- def warn_install(self):
- """
- Emit warnings/errors related to missing packages.
- """
-
- output_msg = ""
-
- for dtype in sorted(self.missing_pkg.keys()):
- progs = " ".join(sorted(set(self.missing_pkg[dtype])))
-
- try:
- name = DepManager.name(dtype)
- output_msg += f'{name}:\t{progs}\n'
- except KeyError:
- raise KeyError(f"ERROR!!!: invalid dtype for {progs}: {dtype}")
-
- if output_msg:
- print(f"\n{output_msg}")
-
-class AncillaryMethods:
- """
- Ancillary methods that checks for missing dependencies for different
- types of types, like binaries, python modules, rpm deps, etc.
- """
-
- @staticmethod
- def which(prog):
- """
- Our own implementation of which(). We could instead use
- shutil.which(), but this function is simple enough.
- Probably faster to use this implementation than to import shutil.
- """
- for path in os.environ.get("PATH", "").split(":"):
- full_path = os.path.join(path, prog)
- if os.access(full_path, os.X_OK):
- return full_path
-
- return None
-
- @staticmethod
- def get_python_version(cmd):
- """
- Get python version from a Python binary. As we need to detect if
- are out there newer python binaries, we can't rely on sys.release here.
- """
-
- result = SphinxDependencyChecker.run([cmd, "--version"],
- capture_output=True, text=True)
- version = result.stdout.strip()
-
- match = re.search(r"(\d+\.\d+\.\d+)", version)
- if match:
- return parse_version(match.group(1))
-
- print(f"Can't parse version {version}")
- return (0, 0, 0)
-
- @staticmethod
- def find_python():
- """
- Detect if are out there any python 3.xy version newer than the
- current one.
-
- Note: this routine is limited to up to 2 digits for python3. We
- may need to update it one day, hopefully on a distant future.
- """
- patterns = [
- "python3.[0-9]",
- "python3.[0-9][0-9]",
- ]
-
- # Seek for a python binary newer than MIN_PYTHON_VERSION
- for path in os.getenv("PATH", "").split(":"):
- for pattern in patterns:
- for cmd in glob(os.path.join(path, pattern)):
- if os.path.isfile(cmd) and os.access(cmd, os.X_OK):
- version = SphinxDependencyChecker.get_python_version(cmd)
- if version >= MIN_PYTHON_VERSION:
- return cmd
-
- @staticmethod
- def check_python():
- """
- Check if the current python binary satisfies our minimal requirement
- for Sphinx build. If not, re-run with a newer version if found.
- """
- cur_ver = sys.version_info[:3]
- if cur_ver >= MIN_PYTHON_VERSION:
- ver = ver_str(cur_ver)
- print(f"Python version: {ver}")
-
- # This could be useful for debugging purposes
- if SphinxDependencyChecker.which("docutils"):
- result = SphinxDependencyChecker.run(["docutils", "--version"],
- capture_output=True, text=True)
- ver = result.stdout.strip()
- match = re.search(r"(\d+\.\d+\.\d+)", ver)
- if match:
- ver = match.group(1)
-
- print(f"Docutils version: {ver}")
-
- return
-
- python_ver = ver_str(cur_ver)
-
- new_python_cmd = SphinxDependencyChecker.find_python()
- if not new_python_cmd:
- print(f"ERROR: Python version {python_ver} is not spported anymore\n")
- print(" Can't find a new version. This script may fail")
- return
-
- # Restart script using the newer version
- script_path = os.path.abspath(sys.argv[0])
- args = [new_python_cmd, script_path] + sys.argv[1:]
-
- print(f"Python {python_ver} not supported. Changing to {new_python_cmd}")
-
- try:
- os.execv(new_python_cmd, args)
- except OSError as e:
- sys.exit(f"Failed to restart with {new_python_cmd}: {e}")
-
- @staticmethod
- def run(*args, **kwargs):
- """
- Excecute a command, hiding its output by default.
- Preserve comatibility with older Python versions.
- """
-
- capture_output = kwargs.pop('capture_output', False)
-
- if capture_output:
- if 'stdout' not in kwargs:
- kwargs['stdout'] = subprocess.PIPE
- if 'stderr' not in kwargs:
- kwargs['stderr'] = subprocess.PIPE
- else:
- if 'stdout' not in kwargs:
- kwargs['stdout'] = subprocess.DEVNULL
- if 'stderr' not in kwargs:
- kwargs['stderr'] = subprocess.DEVNULL
-
- # Don't break with older Python versions
- if 'text' in kwargs and sys.version_info < (3, 7):
- kwargs['universal_newlines'] = kwargs.pop('text')
-
- return subprocess.run(*args, **kwargs)
-
-class MissingCheckers(AncillaryMethods):
- """
- Contains some ancillary checkers for different types of binaries and
- package managers.
- """
-
- def __init__(self, args, texlive):
- """
- Initialize its internal variables
- """
- self.pdf = args.pdf
- self.virtualenv = args.virtualenv
- self.version_check = args.version_check
- self.texlive = texlive
-
- self.min_version = (0, 0, 0)
- self.cur_version = (0, 0, 0)
-
- self.deps = DepManager(self.pdf)
-
- self.need_symlink = 0
- self.need_sphinx = 0
-
- self.verbose_warn_install = 1
-
- self.virtenv_dir = ""
- self.install = ""
- self.python_cmd = ""
-
- self.virtenv_prefix = ["sphinx_", "Sphinx_" ]
-
- def check_missing_file(self, files, package, dtype):
- """
- Does the file exists? If not, add it to missing dependencies.
- """
- for f in files:
- if os.path.exists(f):
- return
- self.deps.add_package(package, dtype)
-
- def check_program(self, prog, dtype):
- """
- Does the program exists and it is at the PATH?
- If not, add it to missing dependencies.
- """
- found = self.which(prog)
- if found:
- return found
-
- self.deps.add_package(prog, dtype)
-
- return None
-
- def check_perl_module(self, prog, dtype):
- """
- Does perl have a dependency? Is it available?
- If not, add it to missing dependencies.
-
- Right now, we still need Perl for doc build, as it is required
- by some tools called at docs or kernel build time, like:
-
- scripts/documentation-file-ref-check
-
- Also, checkpatch is on Perl.
- """
-
- # While testing with lxc download template, one of the
- # distros (Oracle) didn't have perl - nor even an option to install
- # before installing oraclelinux-release-el9 package.
- #
- # Check it before running an error. If perl is not there,
- # add it as a mandatory package, as some parts of the doc builder
- # needs it.
- if not self.which("perl"):
- self.deps.add_package("perl", DepManager.SYSTEM_MANDATORY)
- self.deps.add_package(prog, dtype)
- return
-
- try:
- self.run(["perl", f"-M{prog}", "-e", "1"], check=True)
- except subprocess.CalledProcessError:
- self.deps.add_package(prog, dtype)
-
- def check_python_module(self, module, is_optional=False):
- """
- Does a python module exists outside venv? If not, add it to missing
- dependencies.
- """
- if is_optional:
- dtype = DepManager.PYTHON_OPTIONAL
- else:
- dtype = DepManager.PYTHON_MANDATORY
-
- try:
- self.run([self.python_cmd, "-c", f"import {module}"], check=True)
- except subprocess.CalledProcessError:
- self.deps.add_package(module, dtype)
-
- def check_rpm_missing(self, pkgs, dtype):
- """
- Does a rpm package exists? If not, add it to missing dependencies.
- """
- for prog in pkgs:
- try:
- self.run(["rpm", "-q", prog], check=True)
- except subprocess.CalledProcessError:
- self.deps.add_package(prog, dtype)
-
- def check_pacman_missing(self, pkgs, dtype):
- """
- Does a pacman package exists? If not, add it to missing dependencies.
- """
- for prog in pkgs:
- try:
- self.run(["pacman", "-Q", prog], check=True)
- except subprocess.CalledProcessError:
- self.deps.add_package(prog, dtype)
-
- def check_missing_tex(self, is_optional=False):
- """
- Does a LaTeX package exists? If not, add it to missing dependencies.
- """
- if is_optional:
- dtype = DepManager.PDF_OPTIONAL
- else:
- dtype = DepManager.PDF_MANDATORY
-
- kpsewhich = self.which("kpsewhich")
- for prog, package in self.texlive.items():
-
- # If kpsewhich is not there, just add it to deps
- if not kpsewhich:
- self.deps.add_package(package, dtype)
- continue
-
- # Check if the package is needed
- try:
- result = self.run(
- [kpsewhich, prog], stdout=subprocess.PIPE, text=True, check=True
- )
-
- # Didn't find. Add it
- if not result.stdout.strip():
- self.deps.add_package(package, dtype)
-
- except subprocess.CalledProcessError:
- # kpsewhich returned an error. Add it, just in case
- self.deps.add_package(package, dtype)
-
- def get_sphinx_fname(self):
- """
- Gets the binary filename for sphinx-build.
- """
- if "SPHINXBUILD" in os.environ:
- return os.environ["SPHINXBUILD"]
-
- fname = "sphinx-build"
- if self.which(fname):
- return fname
-
- fname = "sphinx-build-3"
- if self.which(fname):
- self.need_symlink = 1
- return fname
-
- return ""
-
- def get_sphinx_version(self, cmd):
- """
- Gets sphinx-build version.
- """
- try:
- result = self.run([cmd, "--version"],
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- text=True, check=True)
- except (subprocess.CalledProcessError, FileNotFoundError):
- return None
-
- for line in result.stdout.split("\n"):
- match = re.match(r"^sphinx-build\s+([\d\.]+)(?:\+(?:/[\da-f]+)|b\d+)?\s*$", line)
- if match:
- return parse_version(match.group(1))
-
- match = re.match(r"^Sphinx.*\s+([\d\.]+)\s*$", line)
- if match:
- return parse_version(match.group(1))
-
- def check_sphinx(self, conf):
- """
- Checks Sphinx minimal requirements
- """
- try:
- with open(conf, "r", encoding="utf-8") as f:
- for line in f:
- match = re.match(r"^\s*needs_sphinx\s*=\s*[\'\"]([\d\.]+)[\'\"]", line)
- if match:
- self.min_version = parse_version(match.group(1))
- break
- except IOError:
- sys.exit(f"Can't open {conf}")
-
- if not self.min_version:
- sys.exit(f"Can't get needs_sphinx version from {conf}")
-
- self.virtenv_dir = self.virtenv_prefix[0] + "latest"
-
- sphinx = self.get_sphinx_fname()
- if not sphinx:
- self.need_sphinx = 1
- return
-
- self.cur_version = self.get_sphinx_version(sphinx)
- if not self.cur_version:
- sys.exit(f"{sphinx} didn't return its version")
-
- if self.cur_version < self.min_version:
- curver = ver_str(self.cur_version)
- minver = ver_str(self.min_version)
-
- print(f"ERROR: Sphinx version is {curver}. It should be >= {minver}")
- self.need_sphinx = 1
- return
-
- # On version check mode, just assume Sphinx has all mandatory deps
- if self.version_check and self.cur_version >= RECOMMENDED_VERSION:
- sys.exit(0)
-
- def catcheck(self, filename):
- """
- Reads a file if it exists, returning as string.
- If not found, returns an empty string.
- """
- if os.path.exists(filename):
- with open(filename, "r", encoding="utf-8") as f:
- return f.read().strip()
- return ""
-
- def get_system_release(self):
- """
- Determine the system type. There's no unique way that would work
- with all distros with a minimal package install. So, several
- methods are used here.
-
- By default, it will use lsb_release function. If not available, it will
- fail back to reading the known different places where the distro name
- is stored.
-
- Several modern distros now have /etc/os-release, which usually have
- a decent coverage.
- """
-
- system_release = ""
-
- if self.which("lsb_release"):
- result = self.run(["lsb_release", "-d"], capture_output=True, text=True)
- system_release = result.stdout.replace("Description:", "").strip()
-
- release_files = [
- "/etc/system-release",
- "/etc/redhat-release",
- "/etc/lsb-release",
- "/etc/gentoo-release",
- ]
-
- if not system_release:
- for f in release_files:
- system_release = self.catcheck(f)
- if system_release:
- break
-
- # This seems more common than LSB these days
- if not system_release:
- os_var = {}
- try:
- with open("/etc/os-release", "r", encoding="utf-8") as f:
- for line in f:
- match = re.match(r"^([\w\d\_]+)=\"?([^\"]*)\"?\n", line)
- if match:
- os_var[match.group(1)] = match.group(2)
-
- system_release = os_var.get("NAME", "")
- if "VERSION_ID" in os_var:
- system_release += " " + os_var["VERSION_ID"]
- elif "VERSION" in os_var:
- system_release += " " + os_var["VERSION"]
- except IOError:
- pass
-
- if not system_release:
- system_release = self.catcheck("/etc/issue")
-
- system_release = system_release.strip()
-
- return system_release
-
-class SphinxDependencyChecker(MissingCheckers):
- """
- Main class for checking Sphinx documentation build dependencies.
-
- - Check for missing system packages;
- - Check for missing Python modules;
- - Check for missing LaTeX packages needed by PDF generation;
- - Propose Sphinx install via Python Virtual environment;
- - Propose Sphinx install via distro-specific package install.
- """
- def __init__(self, args):
- """Initialize checker variables"""
-
- # List of required texlive packages on Fedora and OpenSuse
- texlive = {
- "amsfonts.sty": "texlive-amsfonts",
- "amsmath.sty": "texlive-amsmath",
- "amssymb.sty": "texlive-amsfonts",
- "amsthm.sty": "texlive-amscls",
- "anyfontsize.sty": "texlive-anyfontsize",
- "atbegshi.sty": "texlive-oberdiek",
- "bm.sty": "texlive-tools",
- "capt-of.sty": "texlive-capt-of",
- "cmap.sty": "texlive-cmap",
- "ctexhook.sty": "texlive-ctex",
- "ecrm1000.tfm": "texlive-ec",
- "eqparbox.sty": "texlive-eqparbox",
- "eu1enc.def": "texlive-euenc",
- "fancybox.sty": "texlive-fancybox",
- "fancyvrb.sty": "texlive-fancyvrb",
- "float.sty": "texlive-float",
- "fncychap.sty": "texlive-fncychap",
- "footnote.sty": "texlive-mdwtools",
- "framed.sty": "texlive-framed",
- "luatex85.sty": "texlive-luatex85",
- "multirow.sty": "texlive-multirow",
- "needspace.sty": "texlive-needspace",
- "palatino.sty": "texlive-psnfss",
- "parskip.sty": "texlive-parskip",
- "polyglossia.sty": "texlive-polyglossia",
- "tabulary.sty": "texlive-tabulary",
- "threeparttable.sty": "texlive-threeparttable",
- "titlesec.sty": "texlive-titlesec",
- "ucs.sty": "texlive-ucs",
- "upquote.sty": "texlive-upquote",
- "wrapfig.sty": "texlive-wrapfig",
- }
-
- super().__init__(args, texlive)
-
- self.need_pip = False
- self.rec_sphinx_upgrade = 0
-
- self.system_release = self.get_system_release()
- self.activate_cmd = ""
-
- # Some distros may not have a Sphinx shipped package compatible with
- # our minimal requirements
- self.package_supported = True
-
- # Recommend a new python version
- self.recommend_python = None
-
- # Certain hints are meant to be shown only once
- self.distro_msg = None
-
- self.latest_avail_ver = (0, 0, 0)
- self.venv_ver = (0, 0, 0)
-
- prefix = os.environ.get("srctree", ".") + "/"
-
- self.conf = prefix + "Documentation/conf.py"
- self.requirement_file = prefix + "Documentation/sphinx/requirements.txt"
-
- def get_install_progs(self, progs, cmd, extra=None):
- """
- Check for missing dependencies using the provided program mapping.
-
- The actual distro-specific programs are mapped via progs argument.
- """
- install = self.deps.check_missing(progs)
-
- if self.verbose_warn_install:
- self.deps.warn_install()
-
- if not install:
- return
-
- if cmd:
- if self.verbose_warn_install:
- msg = "You should run:"
- else:
- msg = ""
-
- if extra:
- msg += "\n\t" + extra.replace("\n", "\n\t")
-
- return(msg + "\n\tsudo " + cmd + " " + install)
-
- return None
-
- #
- # Distro-specific hints methods
- #
-
- def give_debian_hints(self):
- """
- Provide package installation hints for Debian-based distros.
- """
- progs = {
- "Pod::Usage": "perl-modules",
- "convert": "imagemagick",
- "dot": "graphviz",
- "ensurepip": "python3-venv",
- "python-sphinx": "python3-sphinx",
- "rsvg-convert": "librsvg2-bin",
- "virtualenv": "virtualenv",
- "xelatex": "texlive-xetex",
- "yaml": "python3-yaml",
- }
-
- if self.pdf:
- pdf_pkgs = {
- "fonts-dejavu": [
- "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
- ],
- "fonts-noto-cjk": [
- "/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
- "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc",
- "/usr/share/fonts/opentype/noto/NotoSerifCJK-Regular.ttc",
- ],
- "tex-gyre": [
- "/usr/share/texmf/tex/latex/tex-gyre/tgtermes.sty"
- ],
- "texlive-fonts-recommended": [
- "/usr/share/texlive/texmf-dist/fonts/tfm/adobe/zapfding/pzdr.tfm",
- ],
- "texlive-lang-chinese": [
- "/usr/share/texlive/texmf-dist/tex/latex/ctex/ctexhook.sty",
- ],
- }
-
- for package, files in pdf_pkgs.items():
- self.check_missing_file(files, package, DepManager.PDF_MANDATORY)
-
- self.check_program("dvipng", DepManager.PDF_MANDATORY)
-
- if not self.distro_msg:
- self.distro_msg = \
- "Note: ImageMagick is broken on some distros, affecting PDF output. For more details:\n" \
- "\thttps://askubuntu.com/questions/1158894/imagemagick-still-broken-using-with-usr-bin-convert"
-
- return self.get_install_progs(progs, "apt-get install")
-
- def give_redhat_hints(self):
- """
- Provide package installation hints for RedHat-based distros
- (Fedora, RHEL and RHEL-based variants).
- """
- progs = {
- "Pod::Usage": "perl-Pod-Usage",
- "convert": "ImageMagick",
- "dot": "graphviz",
- "python-sphinx": "python3-sphinx",
- "rsvg-convert": "librsvg2-tools",
- "virtualenv": "python3-virtualenv",
- "xelatex": "texlive-xetex-bin",
- "yaml": "python3-pyyaml",
- }
-
- fedora_tex_pkgs = [
- "dejavu-sans-fonts",
- "dejavu-sans-mono-fonts",
- "dejavu-serif-fonts",
- "texlive-collection-fontsrecommended",
- "texlive-collection-latex",
- "texlive-xecjk",
- ]
-
- fedora = False
- rel = None
-
- match = re.search(r"(release|Linux)\s+(\d+)", self.system_release)
- if match:
- rel = int(match.group(2))
-
- if not rel:
- print("Couldn't identify release number")
- noto_sans_redhat = None
- self.pdf = False
- elif re.search("Fedora", self.system_release):
- # Fedora 38 and upper use this CJK font
-
- noto_sans_redhat = "google-noto-sans-cjk-fonts"
- fedora = True
- else:
- # Almalinux, CentOS, RHEL, ...
-
- # at least up to version 9 (and Fedora < 38), that's the CJK font
- noto_sans_redhat = "google-noto-sans-cjk-ttc-fonts"
-
- progs["virtualenv"] = "python-virtualenv"
-
- if not rel or rel < 8:
- print("ERROR: Distro not supported. Too old?")
- return
-
- # RHEL 8 uses Python 3.6, which is not compatible with
- # the build system anymore. Suggest Python 3.11
- if rel == 8:
- self.check_program("python3.9", DepManager.SYSTEM_MANDATORY)
- progs["python3.9"] = "python39"
- progs["yaml"] = "python39-pyyaml"
-
- self.recommend_python = True
-
- # There's no python39-sphinx package. Only pip is supported
- self.package_supported = False
-
- if not self.distro_msg:
- self.distro_msg = \
- "Note: RHEL-based distros typically require extra repositories.\n" \
- "For most, enabling epel and crb are enough:\n" \
- "\tsudo dnf install -y epel-release\n" \
- "\tsudo dnf config-manager --set-enabled crb\n" \
- "Yet, some may have other required repositories. Those commands could be useful:\n" \
- "\tsudo dnf repolist all\n" \
- "\tsudo dnf repoquery --available --info <pkgs>\n" \
- "\tsudo dnf config-manager --set-enabled '*' # enable all - probably not what you want"
-
- if self.pdf:
- pdf_pkgs = [
- "/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
- "/usr/share/fonts/google-noto-sans-cjk-fonts/NotoSansCJK-Regular.ttc",
- ]
-
- self.check_missing_file(pdf_pkgs, noto_sans_redhat, DepManager.PDF_MANDATORY)
-
- self.check_rpm_missing(fedora_tex_pkgs, DepManager.PDF_MANDATORY)
-
- self.check_missing_tex(DepManager.PDF_MANDATORY)
-
- # There's no texlive-ctex on RHEL 8 repositories. This will
- # likely affect CJK pdf build only.
- if not fedora and rel == 8:
- self.deps.del_package("texlive-ctex")
-
- return self.get_install_progs(progs, "dnf install")
-
- def give_opensuse_hints(self):
- """
- Provide package installation hints for openSUSE-based distros
- (Leap and Tumbleweed).
- """
- progs = {
- "Pod::Usage": "perl-Pod-Usage",
- "convert": "ImageMagick",
- "dot": "graphviz",
- "python-sphinx": "python3-sphinx",
- "virtualenv": "python3-virtualenv",
- "xelatex": "texlive-xetex-bin texlive-dejavu",
- "yaml": "python3-pyyaml",
- }
-
- suse_tex_pkgs = [
- "texlive-babel-english",
- "texlive-caption",
- "texlive-colortbl",
- "texlive-courier",
- "texlive-dvips",
- "texlive-helvetic",
- "texlive-makeindex",
- "texlive-metafont",
- "texlive-metapost",
- "texlive-palatino",
- "texlive-preview",
- "texlive-times",
- "texlive-zapfchan",
- "texlive-zapfding",
- ]
-
- progs["latexmk"] = "texlive-latexmk-bin"
-
- match = re.search(r"(Leap)\s+(\d+).(\d)", self.system_release)
- if match:
- rel = int(match.group(2))
-
- # Leap 15.x uses Python 3.6, which is not compatible with
- # the build system anymore. Suggest Python 3.11
- if rel == 15:
- if not self.which(self.python_cmd):
- self.check_program("python3.11", DepManager.SYSTEM_MANDATORY)
- progs["python3.11"] = "python311"
- self.recommend_python = True
-
- progs.update({
- "python-sphinx": "python311-Sphinx python311-Sphinx-latex",
- "virtualenv": "python311-virtualenv",
- "yaml": "python311-PyYAML",
- })
- else:
- # Tumbleweed defaults to Python 3.11
-
- progs.update({
- "python-sphinx": "python313-Sphinx python313-Sphinx-latex",
- "virtualenv": "python313-virtualenv",
- "yaml": "python313-PyYAML",
- })
-
- # FIXME: add support for installing CJK fonts
- #
- # I tried hard, but was unable to find a way to install
- # "Noto Sans CJK SC" on openSUSE
-
- if self.pdf:
- self.check_rpm_missing(suse_tex_pkgs, DepManager.PDF_MANDATORY)
- if self.pdf:
- self.check_missing_tex()
-
- return self.get_install_progs(progs, "zypper install --no-recommends")
-
- def give_mageia_hints(self):
- """
- Provide package installation hints for Mageia and OpenMandriva.
- """
- progs = {
- "Pod::Usage": "perl-Pod-Usage",
- "convert": "ImageMagick",
- "dot": "graphviz",
- "python-sphinx": "python3-sphinx",
- "rsvg-convert": "librsvg2",
- "virtualenv": "python3-virtualenv",
- "xelatex": "texlive",
- "yaml": "python3-yaml",
- }
-
- tex_pkgs = [
- "texlive-fontsextra",
- "texlive-fonts-asian",
- "fonts-ttf-dejavu",
- ]
-
- if re.search(r"OpenMandriva", self.system_release):
- packager_cmd = "dnf install"
- noto_sans = "noto-sans-cjk-fonts"
- tex_pkgs = [
- "texlive-collection-basic",
- "texlive-collection-langcjk",
- "texlive-collection-fontsextra",
- "texlive-collection-fontsrecommended"
- ]
-
- # Tested on OpenMandriva Lx 4.3
- progs["convert"] = "imagemagick"
- progs["yaml"] = "python-pyyaml"
- progs["python-virtualenv"] = "python-virtualenv"
- progs["python-sphinx"] = "python-sphinx"
- progs["xelatex"] = "texlive"
-
- self.check_program("python-virtualenv", DepManager.PYTHON_MANDATORY)
-
- # On my tests with openMandriva LX 4.0 docker image, upgraded
- # to 4.3, python-virtualenv package is broken: it is missing
- # ensurepip. Without it, the alternative would be to run:
- # python3 -m venv --without-pip ~/sphinx_latest, but running
- # pip there won't install sphinx at venv.
- #
- # Add a note about that.
-
- if not self.distro_msg:
- self.distro_msg = \
- "Notes:\n"\
- "1. for venv, ensurepip could be broken, preventing its install method.\n" \
- "2. at least on OpenMandriva LX 4.3, texlive packages seem broken"
-
- else:
- packager_cmd = "urpmi"
- noto_sans = "google-noto-sans-cjk-ttc-fonts"
-
- progs["latexmk"] = "texlive-collection-basic"
-
- if self.pdf:
- pdf_pkgs = [
- "/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
- "/usr/share/fonts/TTF/NotoSans-Regular.ttf",
- ]
-
- self.check_missing_file(pdf_pkgs, noto_sans, DepManager.PDF_MANDATORY)
- self.check_rpm_missing(tex_pkgs, DepManager.PDF_MANDATORY)
-
- return self.get_install_progs(progs, packager_cmd)
-
- def give_arch_linux_hints(self):
- """
- Provide package installation hints for ArchLinux.
- """
- progs = {
- "convert": "imagemagick",
- "dot": "graphviz",
- "latexmk": "texlive-core",
- "rsvg-convert": "extra/librsvg",
- "virtualenv": "python-virtualenv",
- "xelatex": "texlive-xetex",
- "yaml": "python-yaml",
- }
-
- archlinux_tex_pkgs = [
- "texlive-basic",
- "texlive-binextra",
- "texlive-core",
- "texlive-fontsrecommended",
- "texlive-langchinese",
- "texlive-langcjk",
- "texlive-latexextra",
- "ttf-dejavu",
- ]
-
- if self.pdf:
- self.check_pacman_missing(archlinux_tex_pkgs,
- DepManager.PDF_MANDATORY)
-
- self.check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc"],
- "noto-fonts-cjk",
- DepManager.PDF_MANDATORY)
-
-
- return self.get_install_progs(progs, "pacman -S")
-
- def give_gentoo_hints(self):
- """
- Provide package installation hints for Gentoo.
- """
- texlive_deps = [
- "dev-texlive/texlive-fontsrecommended",
- "dev-texlive/texlive-latexextra",
- "dev-texlive/texlive-xetex",
- "media-fonts/dejavu",
- ]
-
- progs = {
- "convert": "media-gfx/imagemagick",
- "dot": "media-gfx/graphviz",
- "rsvg-convert": "gnome-base/librsvg",
- "virtualenv": "dev-python/virtualenv",
- "xelatex": " ".join(texlive_deps),
- "yaml": "dev-python/pyyaml",
- "python-sphinx": "dev-python/sphinx",
- }
-
- if self.pdf:
- pdf_pkgs = {
- "media-fonts/dejavu": [
- "/usr/share/fonts/dejavu/DejaVuSans.ttf",
- ],
- "media-fonts/noto-cjk": [
- "/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf",
- "/usr/share/fonts/noto-cjk/NotoSerifCJK-Regular.ttc",
- ],
- }
- for package, files in pdf_pkgs.items():
- self.check_missing_file(files, package, DepManager.PDF_MANDATORY)
-
- # Handling dependencies is a nightmare, as Gentoo refuses to emerge
- # some packages if there's no package.use file describing them.
- # To make it worse, compilation flags shall also be present there
- # for some packages. If USE is not perfect, error/warning messages
- # like those are shown:
- #
- # !!! The following binary packages have been ignored due to non matching USE:
- #
- # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_13 qt6 svg
- # =media-gfx/graphviz-12.2.1-r1 X pdf python_single_target_python3_12 -python_single_target_python3_13 qt6 svg
- # =media-gfx/graphviz-12.2.1-r1 X pdf qt6 svg
- # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_10 qt6 svg
- # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_10 python_single_target_python3_12 -python_single_target_python3_13 qt6 svg
- # =media-fonts/noto-cjk-20190416 X
- # =app-text/texlive-core-2024-r1 X cjk -xetex
- # =app-text/texlive-core-2024-r1 X -xetex
- # =app-text/texlive-core-2024-r1 -xetex
- # =dev-libs/zziplib-0.13.79-r1 sdl
- #
- # And will ignore such packages, installing the remaining ones. That
- # affects mostly the image extension and PDF generation.
-
- # Package dependencies and the minimal needed args:
- portages = {
- "graphviz": "media-gfx/graphviz",
- "imagemagick": "media-gfx/imagemagick",
- "media-libs": "media-libs/harfbuzz icu",
- "media-fonts": "media-fonts/noto-cjk",
- "texlive": "app-text/texlive-core xetex",
- "zziblib": "dev-libs/zziplib sdl",
- }
-
- extra_cmds = ""
- if not self.distro_msg:
- self.distro_msg = "Note: Gentoo requires package.use to be adjusted before emerging packages"
-
- use_base = "/etc/portage/package.use"
- files = glob(f"{use_base}/*")
-
- for fname, portage in portages.items():
- install = False
-
- while install is False:
- if not files:
- # No files under package.usage. Install all
- install = True
- break
-
- args = portage.split(" ")
-
- name = args.pop(0)
-
- cmd = ["grep", "-l", "-E", rf"^{name}\b" ] + files
- result = self.run(cmd, stdout=subprocess.PIPE, text=True)
- if result.returncode or not result.stdout.strip():
- # File containing portage name not found
- install = True
- break
-
- # Ensure that needed USE flags are present
- if args:
- match_fname = result.stdout.strip()
- with open(match_fname, 'r', encoding='utf8',
- errors='backslashreplace') as fp:
- for line in fp:
- for arg in args:
- if arg.startswith("-"):
- continue
-
- if not re.search(rf"\s*{arg}\b", line):
- # Needed file argument not found
- install = True
- break
-
- # Everything looks ok, don't install
- break
-
- # emit a code to setup missing USE
- if install:
- extra_cmds += (f"sudo su -c 'echo \"{portage}\" > {use_base}/{fname}'\n")
-
- # Now, we can use emerge and let it respect USE
- return self.get_install_progs(progs,
- "emerge --ask --changed-use --binpkg-respect-use=y",
- extra_cmds)
-
- def get_install(self):
- """
- OS-specific hints logic. Seeks for a hinter. If found, use it to
- provide package-manager specific install commands.
-
- Otherwise, outputs install instructions for the meta-packages.
-
- Returns a string with the command to be executed to install the
- the needed packages, if distro found. Otherwise, return just a
- list of packages that require installation.
- """
- os_hints = {
- re.compile("Red Hat Enterprise Linux"): self.give_redhat_hints,
- re.compile("Fedora"): self.give_redhat_hints,
- re.compile("AlmaLinux"): self.give_redhat_hints,
- re.compile("Amazon Linux"): self.give_redhat_hints,
- re.compile("CentOS"): self.give_redhat_hints,
- re.compile("openEuler"): self.give_redhat_hints,
- re.compile("Oracle Linux Server"): self.give_redhat_hints,
- re.compile("Rocky Linux"): self.give_redhat_hints,
- re.compile("Springdale Open Enterprise"): self.give_redhat_hints,
-
- re.compile("Ubuntu"): self.give_debian_hints,
- re.compile("Debian"): self.give_debian_hints,
- re.compile("Devuan"): self.give_debian_hints,
- re.compile("Kali"): self.give_debian_hints,
- re.compile("Mint"): self.give_debian_hints,
-
- re.compile("openSUSE"): self.give_opensuse_hints,
-
- re.compile("Mageia"): self.give_mageia_hints,
- re.compile("OpenMandriva"): self.give_mageia_hints,
-
- re.compile("Arch Linux"): self.give_arch_linux_hints,
- re.compile("Gentoo"): self.give_gentoo_hints,
- }
-
- # If the OS is detected, use per-OS hint logic
- for regex, os_hint in os_hints.items():
- if regex.search(self.system_release):
- return os_hint()
-
- #
- # Fall-back to generic hint code for other distros
- # That's far from ideal, specially for LaTeX dependencies.
- #
- progs = {"sphinx-build": "sphinx"}
- if self.pdf:
- self.check_missing_tex()
-
- self.distro_msg = \
- f"I don't know distro {self.system_release}.\n" \
- "So, I can't provide you a hint with the install procedure.\n" \
- "There are likely missing dependencies."
-
- return self.get_install_progs(progs, None)
-
- #
- # Common dependencies
- #
- def deactivate_help(self):
- """
- Print a helper message to disable a virtual environment.
- """
-
- print("\n If you want to exit the virtualenv, you can use:")
- print("\tdeactivate")
-
- def get_virtenv(self):
- """
- Give a hint about how to activate an already-existing virtual
- environment containing sphinx-build.
-
- Returns a tuble with (activate_cmd_path, sphinx_version) with
- the newest available virtual env.
- """
-
- cwd = os.getcwd()
-
- activates = []
-
- # Add all sphinx prefixes with possible version numbers
- for p in self.virtenv_prefix:
- activates += glob(f"{cwd}/{p}[0-9]*/bin/activate")
-
- activates.sort(reverse=True, key=str.lower)
-
- # Place sphinx_latest first, if it exists
- for p in self.virtenv_prefix:
- activates = glob(f"{cwd}/{p}*latest/bin/activate") + activates
-
- ver = (0, 0, 0)
- for f in activates:
- # Discard too old Sphinx virtual environments
- match = re.search(r"(\d+)\.(\d+)\.(\d+)", f)
- if match:
- ver = (int(match.group(1)), int(match.group(2)), int(match.group(3)))
-
- if ver < self.min_version:
- continue
-
- sphinx_cmd = f.replace("activate", "sphinx-build")
- if not os.path.isfile(sphinx_cmd):
- continue
-
- ver = self.get_sphinx_version(sphinx_cmd)
-
- if not ver:
- venv_dir = f.replace("/bin/activate", "")
- print(f"Warning: virtual environment {venv_dir} is not working.\n" \
- "Python version upgrade? Remove it with:\n\n" \
- "\trm -rf {venv_dir}\n\n")
- else:
- if self.need_sphinx and ver >= self.min_version:
- return (f, ver)
- elif parse_version(ver) > self.cur_version:
- return (f, ver)
-
- return ("", ver)
-
- def recommend_sphinx_upgrade(self):
- """
- Check if Sphinx needs to be upgraded.
-
- Returns a tuple with the higest available Sphinx version if found.
- Otherwise, returns None to indicate either that no upgrade is needed
- or no venv was found.
- """
-
- # Avoid running sphinx-builds from venv if cur_version is good
- if self.cur_version and self.cur_version >= RECOMMENDED_VERSION:
- self.latest_avail_ver = self.cur_version
- return None
-
- # Get the highest version from sphinx_*/bin/sphinx-build and the
- # corresponding command to activate the venv/virtenv
- self.activate_cmd, self.venv_ver = self.get_virtenv()
-
- # Store the highest version from Sphinx existing virtualenvs
- if self.activate_cmd and self.venv_ver > self.cur_version:
- self.latest_avail_ver = self.venv_ver
- else:
- if self.cur_version:
- self.latest_avail_ver = self.cur_version
- else:
- self.latest_avail_ver = (0, 0, 0)
-
- # As we don't know package version of Sphinx, and there's no
- # virtual environments, don't check if upgrades are needed
- if not self.virtualenv:
- if not self.latest_avail_ver:
- return None
-
- return self.latest_avail_ver
-
- # Either there are already a virtual env or a new one should be created
- self.need_pip = True
-
- if not self.latest_avail_ver:
- return None
-
- # Return if the reason is due to an upgrade or not
- if self.latest_avail_ver != (0, 0, 0):
- if self.latest_avail_ver < RECOMMENDED_VERSION:
- self.rec_sphinx_upgrade = 1
-
- return self.latest_avail_ver
-
- def recommend_package(self):
- """
- Recommend installing Sphinx as a distro-specific package.
- """
-
- print("\n2) As a package with:")
-
- old_need = self.deps.need
- old_optional = self.deps.optional
-
- self.pdf = False
- self.deps.optional = 0
- old_verbose = self.verbose_warn_install
- self.verbose_warn_install = 0
-
- self.deps.clear_deps()
-
- self.deps.add_package("python-sphinx", DepManager.PYTHON_MANDATORY)
-
- cmd = self.get_install()
- if cmd:
- print(cmd)
-
- self.deps.need = old_need
- self.deps.optional = old_optional
- self.verbose_warn_install = old_verbose
-
- def recommend_sphinx_version(self, virtualenv_cmd):
- """
- Provide recommendations for installing or upgrading Sphinx based
- on current version.
-
- The logic here is complex, as it have to deal with different versions:
-
- - minimal supported version;
- - minimal PDF version;
- - recommended version.
-
- It also needs to work fine with both distro's package and
- venv/virtualenv
- """
-
- if self.recommend_python:
- cur_ver = sys.version_info[:3]
- if cur_ver < MIN_PYTHON_VERSION:
- print(f"\nPython version {cur_ver} is incompatible with doc build.\n" \
- "Please upgrade it and re-run.\n")
- return
-
- # Version is OK. Nothing to do.
- if self.cur_version != (0, 0, 0) and self.cur_version >= RECOMMENDED_VERSION:
- return
-
- if self.latest_avail_ver:
- latest_avail_ver = ver_str(self.latest_avail_ver)
-
- if not self.need_sphinx:
- # sphinx-build is present and its version is >= $min_version
-
- # only recommend enabling a newer virtenv version if makes sense.
- if self.latest_avail_ver and self.latest_avail_ver > self.cur_version:
- print(f"\nYou may also use the newer Sphinx version {latest_avail_ver} with:")
- if f"{self.virtenv_prefix}" in os.getcwd():
- print("\tdeactivate")
- print(f"\t. {self.activate_cmd}")
- self.deactivate_help()
- return
-
- if self.latest_avail_ver and self.latest_avail_ver >= RECOMMENDED_VERSION:
- return
-
- if not self.virtualenv:
- # No sphinx either via package or via virtenv. As we can't
- # Compare the versions here, just return, recommending the
- # user to install it from the package distro.
- if not self.latest_avail_ver or self.latest_avail_ver == (0, 0, 0):
- return
-
- # User doesn't want a virtenv recommendation, but he already
- # installed one via virtenv with a newer version.
- # So, print commands to enable it
- if self.latest_avail_ver > self.cur_version:
- print(f"\nYou may also use the Sphinx virtualenv version {latest_avail_ver} with:")
- if f"{self.virtenv_prefix}" in os.getcwd():
- print("\tdeactivate")
- print(f"\t. {self.activate_cmd}")
- self.deactivate_help()
- return
- print("\n")
- else:
- if self.need_sphinx:
- self.deps.need += 1
-
- # Suggest newer versions if current ones are too old
- if self.latest_avail_ver and self.latest_avail_ver >= self.min_version:
- if self.latest_avail_ver >= RECOMMENDED_VERSION:
- print(f"\nNeed to activate Sphinx (version {latest_avail_ver}) on virtualenv with:")
- print(f"\t. {self.activate_cmd}")
- self.deactivate_help()
- return
-
- # Version is above the minimal required one, but may be
- # below the recommended one. So, print warnings/notes
- if self.latest_avail_ver < RECOMMENDED_VERSION:
- print(f"Warning: It is recommended at least Sphinx version {RECOMMENDED_VERSION}.")
-
- # At this point, either it needs Sphinx or upgrade is recommended,
- # both via pip
-
- if self.rec_sphinx_upgrade:
- if not self.virtualenv:
- print("Instead of install/upgrade Python Sphinx pkg, you could use pip/pypi with:\n\n")
- else:
- print("To upgrade Sphinx, use:\n\n")
- else:
- print("\nSphinx needs to be installed either:\n1) via pip/pypi with:\n")
-
- if not virtualenv_cmd:
- print(" Currently not possible.\n")
- print(" Please upgrade Python to a newer version and run this script again")
- else:
- print(f"\t{virtualenv_cmd} {self.virtenv_dir}")
- print(f"\t. {self.virtenv_dir}/bin/activate")
- print(f"\tpip install -r {self.requirement_file}")
- self.deactivate_help()
-
- if self.package_supported:
- self.recommend_package()
-
- print("\n" \
- " Please note that Sphinx currentlys produce false-positive\n" \
- " warnings when the same name is used for more than one type (functions,\n" \
- " structs, enums,...). This is known Sphinx bug. For more details, see:\n" \
- "\thttps://github.com/sphinx-doc/sphinx/pull/8313")
-
- def check_needs(self):
- """
- Main method that checks needed dependencies and provides
- recommendations.
- """
- self.python_cmd = sys.executable
-
- # Check if Sphinx is already accessible from current environment
- self.check_sphinx(self.conf)
-
- if self.system_release:
- print(f"Detected OS: {self.system_release}.")
- else:
- print("Unknown OS")
- if self.cur_version != (0, 0, 0):
- ver = ver_str(self.cur_version)
- print(f"Sphinx version: {ver}\n")
-
- # Check the type of virtual env, depending on Python version
- virtualenv_cmd = None
-
- if sys.version_info < MIN_PYTHON_VERSION:
- min_ver = ver_str(MIN_PYTHON_VERSION)
- print(f"ERROR: at least python {min_ver} is required to build the kernel docs")
- self.need_sphinx = 1
-
- self.venv_ver = self.recommend_sphinx_upgrade()
-
- if self.need_pip:
- if sys.version_info < MIN_PYTHON_VERSION:
- self.need_pip = False
- print("Warning: python version is not supported.")
- else:
- virtualenv_cmd = f"{self.python_cmd} -m venv"
- self.check_python_module("ensurepip")
-
- # Check for needed programs/tools
- self.check_perl_module("Pod::Usage", DepManager.SYSTEM_MANDATORY)
-
- self.check_program("make", DepManager.SYSTEM_MANDATORY)
- self.check_program("which", DepManager.SYSTEM_MANDATORY)
-
- self.check_program("dot", DepManager.SYSTEM_OPTIONAL)
- self.check_program("convert", DepManager.SYSTEM_OPTIONAL)
-
- self.check_python_module("yaml")
-
- if self.pdf:
- self.check_program("xelatex", DepManager.PDF_MANDATORY)
- self.check_program("rsvg-convert", DepManager.PDF_MANDATORY)
- self.check_program("latexmk", DepManager.PDF_MANDATORY)
-
- # Do distro-specific checks and output distro-install commands
- cmd = self.get_install()
- if cmd:
- print(cmd)
-
- # If distro requires some special instructions, print here.
- # Please notice that get_install() needs to be called first.
- if self.distro_msg:
- print("\n" + self.distro_msg)
-
- if not self.python_cmd:
- if self.need == 1:
- sys.exit("Can't build as 1 mandatory dependency is missing")
- elif self.need:
- sys.exit(f"Can't build as {self.need} mandatory dependencies are missing")
-
- # Check if sphinx-build is called sphinx-build-3
- if self.need_symlink:
- sphinx_path = self.which("sphinx-build-3")
- if sphinx_path:
- print(f"\tsudo ln -sf {sphinx_path} /usr/bin/sphinx-build\n")
-
- self.recommend_sphinx_version(virtualenv_cmd)
- print("")
-
- if not self.deps.optional:
- print("All optional dependencies are met.")
-
- if self.deps.need == 1:
- sys.exit("Can't build as 1 mandatory dependency is missing")
- elif self.deps.need:
- sys.exit(f"Can't build as {self.deps.need} mandatory dependencies are missing")
-
- print("Needed package dependencies are met.")
-
-DESCRIPTION = """
-Process some flags related to Sphinx installation and documentation build.
-"""
-
-
-def main():
- """Main function"""
- parser = argparse.ArgumentParser(description=DESCRIPTION)
-
- parser.add_argument(
- "--no-virtualenv",
- action="store_false",
- dest="virtualenv",
- help="Recommend installing Sphinx instead of using a virtualenv",
- )
-
- parser.add_argument(
- "--no-pdf",
- action="store_false",
- dest="pdf",
- help="Don't check for dependencies required to build PDF docs",
- )
-
- parser.add_argument(
- "--version-check",
- action="store_true",
- dest="version_check",
- help="If version is compatible, don't check for missing dependencies",
- )
-
- args = parser.parse_args()
-
- checker = SphinxDependencyChecker(args)
-
- checker.check_python()
- checker.check_needs()
-
-# Call main if not used as module
-if __name__ == "__main__":
- main()
diff --git a/scripts/split-man.pl b/scripts/split-man.pl
deleted file mode 100755
index 96bd99dc977a..000000000000
--- a/scripts/split-man.pl
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-#
-# Author: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
-#
-# Produce manpages from kernel-doc.
-# See Documentation/doc-guide/kernel-doc.rst for instructions
-
-if ($#ARGV < 0) {
- die "where do I put the results?\n";
-}
-
-mkdir $ARGV[0],0777;
-$state = 0;
-while (<STDIN>) {
- if (/^\.TH \"[^\"]*\" 9 \"([^\"]*)\"/) {
- if ($state == 1) { close OUT }
- $state = 1;
- $fn = "$ARGV[0]/$1.9";
- print STDERR "Creating $fn\n";
- open OUT, ">$fn" or die "can't open $fn: $!\n";
- print OUT $_;
- } elsif ($state != 0) {
- print OUT $_;
- }
-}
-
-close OUT;
diff --git a/scripts/test_doc_build.py b/scripts/test_doc_build.py
deleted file mode 100755
index 47b4606569f9..000000000000
--- a/scripts/test_doc_build.py
+++ /dev/null
@@ -1,513 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
-#
-# pylint: disable=R0903,R0912,R0913,R0914,R0917,C0301
-
-"""
-Install minimal supported requirements for different Sphinx versions
-and optionally test the build.
-"""
-
-import argparse
-import asyncio
-import os.path
-import shutil
-import sys
-import time
-import subprocess
-
-# Minimal python version supported by the building system.
-
-PYTHON = os.path.basename(sys.executable)
-
-min_python_bin = None
-
-for i in range(9, 13):
- p = f"python3.{i}"
- if shutil.which(p):
- min_python_bin = p
- break
-
-if not min_python_bin:
- min_python_bin = PYTHON
-
-# Starting from 8.0, Python 3.9 is not supported anymore.
-PYTHON_VER_CHANGES = {(8, 0, 0): PYTHON}
-
-DEFAULT_VERSIONS_TO_TEST = [
- (3, 4, 3), # Minimal supported version
- (5, 3, 0), # CentOS Stream 9 / AlmaLinux 9
- (6, 1, 1), # Debian 12
- (7, 2, 1), # openSUSE Leap 15.6
- (7, 2, 6), # Ubuntu 24.04 LTS
- (7, 4, 7), # Ubuntu 24.10
- (7, 3, 0), # openSUSE Tumbleweed
- (8, 1, 3), # Fedora 42
- (8, 2, 3) # Latest version - covers rolling distros
-]
-
-# Sphinx versions to be installed and their incremental requirements
-SPHINX_REQUIREMENTS = {
- # Oldest versions we support for each package required by Sphinx 3.4.3
- (3, 4, 3): {
- "docutils": "0.16",
- "alabaster": "0.7.12",
- "babel": "2.8.0",
- "certifi": "2020.6.20",
- "docutils": "0.16",
- "idna": "2.10",
- "imagesize": "1.2.0",
- "Jinja2": "2.11.2",
- "MarkupSafe": "1.1.1",
- "packaging": "20.4",
- "Pygments": "2.6.1",
- "PyYAML": "5.1",
- "requests": "2.24.0",
- "snowballstemmer": "2.0.0",
- "sphinxcontrib-applehelp": "1.0.2",
- "sphinxcontrib-devhelp": "1.0.2",
- "sphinxcontrib-htmlhelp": "1.0.3",
- "sphinxcontrib-jsmath": "1.0.1",
- "sphinxcontrib-qthelp": "1.0.3",
- "sphinxcontrib-serializinghtml": "1.1.4",
- "urllib3": "1.25.9",
- },
-
- # Update package dependencies to a more modern base. The goal here
- # is to avoid to many incremental changes for the next entries
- (3, 5, 0): {
- "alabaster": "0.7.13",
- "babel": "2.17.0",
- "certifi": "2025.6.15",
- "idna": "3.10",
- "imagesize": "1.4.1",
- "packaging": "25.0",
- "Pygments": "2.8.1",
- "requests": "2.32.4",
- "snowballstemmer": "3.0.1",
- "sphinxcontrib-applehelp": "1.0.4",
- "sphinxcontrib-htmlhelp": "2.0.1",
- "sphinxcontrib-serializinghtml": "1.1.5",
- "urllib3": "2.0.0",
- },
-
- # Starting from here, ensure all docutils versions are covered with
- # supported Sphinx versions. Other packages are upgraded only when
- # required by pip
- (4, 0, 0): {
- "PyYAML": "5.1",
- },
- (4, 1, 0): {
- "docutils": "0.17",
- "Pygments": "2.19.1",
- "Jinja2": "3.0.3",
- "MarkupSafe": "2.0",
- },
- (4, 3, 0): {},
- (4, 4, 0): {},
- (4, 5, 0): {
- "docutils": "0.17.1",
- },
- (5, 0, 0): {},
- (5, 1, 0): {},
- (5, 2, 0): {
- "docutils": "0.18",
- "Jinja2": "3.1.2",
- "MarkupSafe": "2.0",
- "PyYAML": "5.3.1",
- },
- (5, 3, 0): {
- "docutils": "0.18.1",
- },
- (6, 0, 0): {},
- (6, 1, 0): {},
- (6, 2, 0): {
- "PyYAML": "5.4.1",
- },
- (7, 0, 0): {},
- (7, 1, 0): {},
- (7, 2, 0): {
- "docutils": "0.19",
- "PyYAML": "6.0.1",
- "sphinxcontrib-serializinghtml": "1.1.9",
- },
- (7, 2, 6): {
- "docutils": "0.20",
- },
- (7, 3, 0): {
- "alabaster": "0.7.14",
- "PyYAML": "6.0.1",
- "tomli": "2.0.1",
- },
- (7, 4, 0): {
- "docutils": "0.20.1",
- "PyYAML": "6.0.1",
- },
- (8, 0, 0): {
- "docutils": "0.21",
- },
- (8, 1, 0): {
- "docutils": "0.21.1",
- "PyYAML": "6.0.1",
- "sphinxcontrib-applehelp": "1.0.7",
- "sphinxcontrib-devhelp": "1.0.6",
- "sphinxcontrib-htmlhelp": "2.0.6",
- "sphinxcontrib-qthelp": "1.0.6",
- },
- (8, 2, 0): {
- "docutils": "0.21.2",
- "PyYAML": "6.0.1",
- "sphinxcontrib-serializinghtml": "1.1.9",
- },
-}
-
-
-class AsyncCommands:
- """Excecute command synchronously"""
-
- def __init__(self, fp=None):
-
- self.stdout = None
- self.stderr = None
- self.output = None
- self.fp = fp
-
- def log(self, out, verbose, is_info=True):
- out = out.removesuffix('\n')
-
- if verbose:
- if is_info:
- print(out)
- else:
- print(out, file=sys.stderr)
-
- if self.fp:
- self.fp.write(out + "\n")
-
- async def _read(self, stream, verbose, is_info):
- """Ancillary routine to capture while displaying"""
-
- while stream is not None:
- line = await stream.readline()
- if line:
- out = line.decode("utf-8", errors="backslashreplace")
- self.log(out, verbose, is_info)
- if is_info:
- self.stdout += out
- else:
- self.stderr += out
- else:
- break
-
- async def run(self, cmd, capture_output=False, check=False,
- env=None, verbose=True):
-
- """
- Execute an arbitrary command, handling errors.
-
- Please notice that this class is not thread safe
- """
-
- self.stdout = ""
- self.stderr = ""
-
- self.log("$ " + " ".join(cmd), verbose)
-
- proc = await asyncio.create_subprocess_exec(cmd[0],
- *cmd[1:],
- env=env,
- stdout=asyncio.subprocess.PIPE,
- stderr=asyncio.subprocess.PIPE)
-
- # Handle input and output in realtime
- await asyncio.gather(
- self._read(proc.stdout, verbose, True),
- self._read(proc.stderr, verbose, False),
- )
-
- await proc.wait()
-
- if check and proc.returncode > 0:
- raise subprocess.CalledProcessError(returncode=proc.returncode,
- cmd=" ".join(cmd),
- output=self.stdout,
- stderr=self.stderr)
-
- if capture_output:
- if proc.returncode > 0:
- self.log(f"Error {proc.returncode}", verbose=True, is_info=False)
- return ""
-
- return self.output
-
- ret = subprocess.CompletedProcess(args=cmd,
- returncode=proc.returncode,
- stdout=self.stdout,
- stderr=self.stderr)
-
- return ret
-
-
-class SphinxVenv:
- """
- Installs Sphinx on one virtual env per Sphinx version with a minimal
- set of dependencies, adjusting them to each specific version.
- """
-
- def __init__(self):
- """Initialize instance variables"""
-
- self.built_time = {}
- self.first_run = True
-
- async def _handle_version(self, args, fp,
- cur_ver, cur_requirements, python_bin):
- """Handle a single Sphinx version"""
-
- cmd = AsyncCommands(fp)
-
- ver = ".".join(map(str, cur_ver))
-
- if not self.first_run and args.wait_input and args.build:
- ret = input("Press Enter to continue or 'a' to abort: ").strip().lower()
- if ret == "a":
- print("Aborted.")
- sys.exit()
- else:
- self.first_run = False
-
- venv_dir = f"Sphinx_{ver}"
- req_file = f"requirements_{ver}.txt"
-
- cmd.log(f"\nSphinx {ver} with {python_bin}", verbose=True)
-
- # Create venv
- await cmd.run([python_bin, "-m", "venv", venv_dir],
- verbose=args.verbose, check=True)
- pip = os.path.join(venv_dir, "bin/pip")
-
- # Create install list
- reqs = []
- for pkg, verstr in cur_requirements.items():
- reqs.append(f"{pkg}=={verstr}")
-
- reqs.append(f"Sphinx=={ver}")
-
- await cmd.run([pip, "install"] + reqs, check=True, verbose=args.verbose)
-
- # Freeze environment
- result = await cmd.run([pip, "freeze"], verbose=False, check=True)
-
- # Pip install succeeded. Write requirements file
- if args.req_file:
- with open(req_file, "w", encoding="utf-8") as fp:
- fp.write(result.stdout)
-
- if args.build:
- start_time = time.time()
-
- # Prepare a venv environment
- env = os.environ.copy()
- bin_dir = os.path.join(venv_dir, "bin")
- env["PATH"] = bin_dir + ":" + env["PATH"]
- env["VIRTUAL_ENV"] = venv_dir
- if "PYTHONHOME" in env:
- del env["PYTHONHOME"]
-
- # Test doc build
- await cmd.run(["make", "cleandocs"], env=env, check=True)
- make = ["make"]
-
- if args.output:
- sphinx_build = os.path.realpath(f"{bin_dir}/sphinx-build")
- make += [f"O={args.output}", f"SPHINXBUILD={sphinx_build}"]
-
- if args.make_args:
- make += args.make_args
-
- make += args.targets
-
- if args.verbose:
- cmd.log(f". {bin_dir}/activate", verbose=True)
- await cmd.run(make, env=env, check=True, verbose=True)
- if args.verbose:
- cmd.log("deactivate", verbose=True)
-
- end_time = time.time()
- elapsed_time = end_time - start_time
- hours, minutes = divmod(elapsed_time, 3600)
- minutes, seconds = divmod(minutes, 60)
-
- hours = int(hours)
- minutes = int(minutes)
- seconds = int(seconds)
-
- self.built_time[ver] = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
-
- cmd.log(f"Finished doc build for Sphinx {ver}. Elapsed time: {self.built_time[ver]}", verbose=True)
-
- async def run(self, args):
- """
- Navigate though multiple Sphinx versions, handling each of them
- on a loop.
- """
-
- if args.log:
- fp = open(args.log, "w", encoding="utf-8")
- if not args.verbose:
- args.verbose = False
- else:
- fp = None
- if not args.verbose:
- args.verbose = True
-
- cur_requirements = {}
- python_bin = min_python_bin
-
- vers = set(SPHINX_REQUIREMENTS.keys()) | set(args.versions)
-
- for cur_ver in sorted(vers):
- if cur_ver in SPHINX_REQUIREMENTS:
- new_reqs = SPHINX_REQUIREMENTS[cur_ver]
- cur_requirements.update(new_reqs)
-
- if cur_ver in PYTHON_VER_CHANGES: # pylint: disable=R1715
- python_bin = PYTHON_VER_CHANGES[cur_ver]
-
- if cur_ver not in args.versions:
- continue
-
- if args.min_version:
- if cur_ver < args.min_version:
- continue
-
- if args.max_version:
- if cur_ver > args.max_version:
- break
-
- await self._handle_version(args, fp, cur_ver, cur_requirements,
- python_bin)
-
- if args.build:
- cmd = AsyncCommands(fp)
- cmd.log("\nSummary:", verbose=True)
- for ver, elapsed_time in sorted(self.built_time.items()):
- cmd.log(f"\tSphinx {ver} elapsed time: {elapsed_time}",
- verbose=True)
-
- if fp:
- fp.close()
-
-def parse_version(ver_str):
- """Convert a version string into a tuple."""
-
- return tuple(map(int, ver_str.split(".")))
-
-
-DEFAULT_VERS = " - "
-DEFAULT_VERS += "\n - ".join(map(lambda v: f"{v[0]}.{v[1]}.{v[2]}",
- DEFAULT_VERSIONS_TO_TEST))
-
-SCRIPT = os.path.relpath(__file__)
-
-DESCRIPTION = f"""
-This tool allows creating Python virtual environments for different
-Sphinx versions that are supported by the Linux Kernel build system.
-
-Besides creating the virtual environment, it can also test building
-the documentation using "make htmldocs" (and/or other doc targets).
-
-If called without "--versions" argument, it covers the versions shipped
-on major distros, plus the lowest supported version:
-
-{DEFAULT_VERS}
-
-A typical usage is to run:
-
- {SCRIPT} -m -l sphinx_builds.log
-
-This will create one virtual env for the default version set and run
-"make htmldocs" for each version, creating a log file with the
-excecuted commands on it.
-
-NOTE: The build time can be very long, specially on old versions. Also, there
-is a known bug with Sphinx version 6.0.x: each subprocess uses a lot of
-memory. That, together with "-jauto" may cause OOM killer to cause
-failures at the doc generation. To minimize the risk, you may use the
-"-a" command line parameter to constrain the built directories and/or
-reduce the number of threads from "-jauto" to, for instance, "-j4":
-
- {SCRIPT} -m -V 6.0.1 -a "SPHINXDIRS=process" "SPHINXOPTS='-j4'"
-
-"""
-
-MAKE_TARGETS = [
- "htmldocs",
- "texinfodocs",
- "infodocs",
- "latexdocs",
- "pdfdocs",
- "epubdocs",
- "xmldocs",
-]
-
-async def main():
- """Main program"""
-
- parser = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawDescriptionHelpFormatter)
-
- ver_group = parser.add_argument_group("Version range options")
-
- ver_group.add_argument('-V', '--versions', nargs="*",
- default=DEFAULT_VERSIONS_TO_TEST,type=parse_version,
- help='Sphinx versions to test')
- ver_group.add_argument('--min-version', "--min", type=parse_version,
- help='Sphinx minimal version')
- ver_group.add_argument('--max-version', "--max", type=parse_version,
- help='Sphinx maximum version')
- ver_group.add_argument('-f', '--full', action='store_true',
- help='Add all Sphinx (major,minor) supported versions to the version range')
-
- build_group = parser.add_argument_group("Build options")
-
- build_group.add_argument('-b', '--build', action='store_true',
- help='Build documentation')
- build_group.add_argument('-a', '--make-args', nargs="*",
- help='extra arguments for make, like SPHINXDIRS=netlink/specs',
- )
- build_group.add_argument('-t', '--targets', nargs="+", choices=MAKE_TARGETS,
- default=[MAKE_TARGETS[0]],
- help="make build targets. Default: htmldocs.")
- build_group.add_argument("-o", '--output',
- help="output directory for the make O=OUTPUT")
-
- other_group = parser.add_argument_group("Other options")
-
- other_group.add_argument('-r', '--req-file', action='store_true',
- help='write a requirements.txt file')
- other_group.add_argument('-l', '--log',
- help='Log command output on a file')
- other_group.add_argument('-v', '--verbose', action='store_true',
- help='Verbose all commands')
- other_group.add_argument('-i', '--wait-input', action='store_true',
- help='Wait for an enter before going to the next version')
-
- args = parser.parse_args()
-
- if not args.make_args:
- args.make_args = []
-
- sphinx_versions = sorted(list(SPHINX_REQUIREMENTS.keys()))
-
- if args.full:
- args.versions += list(SPHINX_REQUIREMENTS.keys())
-
- venv = SphinxVenv()
- await venv.run(args)
-
-
-# Call main method
-if __name__ == "__main__":
- asyncio.run(main())