diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-24 18:42:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-24 18:42:27 -0700 |
commit | f81c2b81508c4f479f2cf1ac0dbb138927dc6188 (patch) | |
tree | 7e9adec195e720111148855fafba741b1ea04d24 /scripts | |
parent | 8541bc1a52e7e8facd67cef1e659f5714abc95ab (diff) | |
parent | 323cc36ef68bc2c8ca0bd5f528736432afc1a36a (diff) |
Merge tag 'docs-6.15' of git://git.lwn.net/linux
Pull documentation updates from Jonathan Corbet:
"It has been a reasonably busy cycle for docs...
- Significant changes throughout the tree to bring Python code up to
current standards and raise the minimum Python required to 3.9
Much of this is preparatory to replacing the ancient Perl
scripts/kernel-doc horror with a slightly less horrifying Python
implementation, expected for 6.16
- Update the minimum Sphinx required to 3.4.3, allowing us to remove
a bunch of older compatibility code
- Rework and improve the generation of the ABI documentation
(All of the above done by Mauro)
- Lots of translation updates. Alex Shi and Yanteng Si are taking on
responsibility for the Chinese translations going forward; that
work will still get to you via docs-next
- Try to standardize the format for indicating a developer's
affiliation in commit tags
- Clarify the TAB's role in CoC enforcement actions
- Try to spell out the rules for when a commit tag can name another
developer without their explicit permission
Plus lots of other typo fixes and updates"
* tag 'docs-6.15' of git://git.lwn.net/linux: (98 commits)
docs/zh_CN: fix spelling mistake
docs/Chinese: change the disclaimer words
docs/zh_CN: Add snp-tdx-threat-model index Chinese translation
docs: driver-api: firmware: clarify userspace requirements
docs: clarify rules wrt tagging other people
docs: Remove outdated highuid.rst documentation
Documentation: dma-buf: heaps: Add heap name definitions
docs/.../submit-checklist: Use Documentation/admin-guide/abi.rst for cross-ref of README
docs: Correct installation instruction
Documentation: kcsan: fix "Plain Accesses and Data Races" URL in kcsan.rst
Documentation/CoC: Spell out the TAB role in enforcement decisions
Documentation: ocxl.rst: Update consortium site
scripts: get_feat.pl: substitute s390x with s390
scripts/kernel-doc: drop dead code for Wcontents_before_sections
scripts/kernel-doc: don't add not needed new lines
docs: driver-api/infiniband.rst: fix Kerneldoc markup
drivers: firewire: firewire-cdev.h: fix identation on a kernel-doc markup
drivers: media: intel-ipu3.h: fix identation on a kernel-doc markup
include/asm-generic/io.h: fix kerneldoc markup
Docs/arch/arm64: Fix spelling in amu.rst
...
Diffstat (limited to 'scripts')
-rwxr-xr-x | scripts/documentation-file-ref-check | 2 | ||||
-rwxr-xr-x | scripts/get_abi.pl | 1103 | ||||
-rwxr-xr-x | scripts/get_abi.py | 214 | ||||
-rwxr-xr-x | scripts/get_feat.pl | 4 | ||||
-rwxr-xr-x | scripts/kernel-doc | 163 | ||||
-rw-r--r-- | scripts/lib/abi/abi_parser.py | 628 | ||||
-rw-r--r-- | scripts/lib/abi/abi_regex.py | 234 | ||||
-rw-r--r-- | scripts/lib/abi/helpers.py | 38 | ||||
-rw-r--r-- | scripts/lib/abi/system_symbols.py | 378 |
9 files changed, 1516 insertions, 1248 deletions
diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check index 68083f2f1122..408b1dbe7884 100755 --- a/scripts/documentation-file-ref-check +++ b/scripts/documentation-file-ref-check @@ -92,7 +92,7 @@ while (<IN>) { next if ($f =~ m,^Next/,); # Makefiles and scripts contain nasty expressions to parse docs - next if ($f =~ m/Makefile/ || $f =~ m/\.sh$/); + next if ($f =~ m/Makefile/ || $f =~ m/\.(sh|py|pl|~|rej|org|orig)$/); # It doesn't make sense to parse hidden files next if ($f =~ m#/\.#); diff --git a/scripts/get_abi.pl b/scripts/get_abi.pl deleted file mode 100755 index de1c0354b50c..000000000000 --- a/scripts/get_abi.pl +++ /dev/null @@ -1,1103 +0,0 @@ -#!/usr/bin/env perl -# SPDX-License-Identifier: GPL-2.0 - -BEGIN { $Pod::Usage::Formatter = 'Pod::Text::Termcap'; } - -use strict; -use warnings; -use utf8; -use Pod::Usage qw(pod2usage); -use Getopt::Long; -use File::Find; -use IO::Handle; -use Fcntl ':mode'; -use Cwd 'abs_path'; -use Data::Dumper; - -my $help = 0; -my $hint = 0; -my $man = 0; -my $debug = 0; -my $enable_lineno = 0; -my $show_warnings = 1; -my $prefix="Documentation/ABI"; -my $sysfs_prefix="/sys"; -my $search_string; - -# Debug options -my $dbg_what_parsing = 1; -my $dbg_what_open = 2; -my $dbg_dump_abi_structs = 4; -my $dbg_undefined = 8; - -$Data::Dumper::Indent = 1; -$Data::Dumper::Terse = 1; - -# -# If true, assumes that the description is formatted with ReST -# -my $description_is_rst = 1; - -GetOptions( - "debug=i" => \$debug, - "enable-lineno" => \$enable_lineno, - "rst-source!" => \$description_is_rst, - "dir=s" => \$prefix, - 'help|?' => \$help, - "show-hints" => \$hint, - "search-string=s" => \$search_string, - man => \$man -) or pod2usage(2); - -pod2usage(1) if $help; -pod2usage(-exitstatus => 0, -noperldoc, -verbose => 2) if $man; - -pod2usage(2) if (scalar @ARGV < 1 || @ARGV > 2); - -my ($cmd, $arg) = @ARGV; - -pod2usage(2) if ($cmd ne "search" && $cmd ne "rest" && $cmd ne "validate" && $cmd ne "undefined"); -pod2usage(2) if ($cmd eq "search" && !$arg); - -require Data::Dumper if ($debug & $dbg_dump_abi_structs); - -my %data; -my %symbols; - -# -# Displays an error message, printing file name and line -# -sub parse_error($$$$) { - my ($file, $ln, $msg, $data) = @_; - - return if (!$show_warnings); - - $data =~ s/\s+$/\n/; - - print STDERR "Warning: file $file#$ln:\n\t$msg"; - - if ($data ne "") { - print STDERR ". Line\n\t\t$data"; - } else { - print STDERR "\n"; - } -} - -# -# Parse an ABI file, storing its contents at %data -# -sub parse_abi { - my $file = $File::Find::name; - - my $mode = (stat($file))[2]; - return if ($mode & S_IFDIR); - return if ($file =~ m,/README,); - return if ($file =~ m,/\.,); - return if ($file =~ m,\.(rej|org|orig|bak)$,); - - my $name = $file; - $name =~ s,.*/,,; - - my $fn = $file; - $fn =~ s,.*Documentation/ABI/,,; - - my $nametag = "File $fn"; - $data{$nametag}->{what} = "File $name"; - $data{$nametag}->{type} = "File"; - $data{$nametag}->{file} = $name; - $data{$nametag}->{filepath} = $file; - $data{$nametag}->{is_file} = 1; - $data{$nametag}->{line_no} = 1; - - my $type = $file; - $type =~ s,.*/(.*)/.*,$1,; - - my $what; - my $new_what; - my $tag = ""; - my $ln; - my $xrefs; - my $space; - my @labels; - my $label = ""; - - print STDERR "Opening $file\n" if ($debug & $dbg_what_open); - open IN, $file; - while(<IN>) { - $ln++; - if (m/^(\S+)(:\s*)(.*)/i) { - my $new_tag = lc($1); - my $sep = $2; - my $content = $3; - - if (!($new_tag =~ m/(what|where|date|kernelversion|contact|description|users)/)) { - if ($tag eq "description") { - # New "tag" is actually part of - # description. Don't consider it a tag - $new_tag = ""; - } elsif ($tag ne "") { - parse_error($file, $ln, "tag '$tag' is invalid", $_); - } - } - - # Invalid, but it is a common mistake - if ($new_tag eq "where") { - parse_error($file, $ln, "tag 'Where' is invalid. Should be 'What:' instead", ""); - $new_tag = "what"; - } - - if ($new_tag =~ m/what/) { - $space = ""; - $content =~ s/[,.;]$//; - - push @{$symbols{$content}->{file}}, " $file:" . ($ln - 1); - - if ($tag =~ m/what/) { - $what .= "\xac" . $content; - } else { - if ($what) { - parse_error($file, $ln, "What '$what' doesn't have a description", "") if (!$data{$what}->{description}); - - foreach my $w(split /\xac/, $what) { - $symbols{$w}->{xref} = $what; - }; - } - - $what = $content; - $label = $content; - $new_what = 1; - } - push @labels, [($content, $label)]; - $tag = $new_tag; - - push @{$data{$nametag}->{symbols}}, $content if ($data{$nametag}->{what}); - next; - } - - if ($tag ne "" && $new_tag) { - $tag = $new_tag; - - if ($new_what) { - @{$data{$what}->{label_list}} = @labels if ($data{$nametag}->{what}); - @labels = (); - $label = ""; - $new_what = 0; - - $data{$what}->{type} = $type; - if (!defined($data{$what}->{file})) { - $data{$what}->{file} = $name; - $data{$what}->{filepath} = $file; - } else { - $data{$what}->{description} .= "\n\n" if (defined($data{$what}->{description})); - if ($name ne $data{$what}->{file}) { - $data{$what}->{file} .= " " . $name; - $data{$what}->{filepath} .= " " . $file; - } - } - print STDERR "\twhat: $what\n" if ($debug & $dbg_what_parsing); - $data{$what}->{line_no} = $ln; - } else { - $data{$what}->{line_no} = $ln if (!defined($data{$what}->{line_no})); - } - - if (!$what) { - parse_error($file, $ln, "'What:' should come first:", $_); - next; - } - if ($new_tag eq "description") { - $sep =~ s,:, ,; - $content = ' ' x length($new_tag) . $sep . $content; - while ($content =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {} - if ($content =~ m/^(\s*)(\S.*)$/) { - # Preserve initial spaces for the first line - $space = $1; - $content = "$2\n"; - $data{$what}->{$tag} .= $content; - } else { - undef($space); - } - - } else { - $data{$what}->{$tag} = $content; - } - next; - } - } - - # Store any contents before tags at the database - if (!$tag && $data{$nametag}->{what}) { - $data{$nametag}->{description} .= $_; - next; - } - - if ($tag eq "description") { - my $content = $_; - while ($content =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {} - if (m/^\s*\n/) { - $data{$what}->{$tag} .= "\n"; - next; - } - - if (!defined($space)) { - # Preserve initial spaces for the first line - if ($content =~ m/^(\s*)(\S.*)$/) { - $space = $1; - $content = "$2\n"; - } - } else { - $space = "" if (!($content =~ s/^($space)//)); - } - $data{$what}->{$tag} .= $content; - - next; - } - if (m/^\s*(.*)/) { - $data{$what}->{$tag} .= "\n$1"; - $data{$what}->{$tag} =~ s/\n+$//; - next; - } - - # Everything else is error - parse_error($file, $ln, "Unexpected content", $_); - } - $data{$nametag}->{description} =~ s/^\n+// if ($data{$nametag}->{description}); - if ($what) { - parse_error($file, $ln, "What '$what' doesn't have a description", "") if (!$data{$what}->{description}); - - foreach my $w(split /\xac/,$what) { - $symbols{$w}->{xref} = $what; - }; - } - close IN; -} - -sub create_labels { - my %labels; - - foreach my $what (keys %data) { - next if ($data{$what}->{file} eq "File"); - - foreach my $p (@{$data{$what}->{label_list}}) { - my ($content, $label) = @{$p}; - $label = "abi_" . $label . " "; - $label =~ tr/A-Z/a-z/; - - # Convert special chars to "_" - $label =~s/([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff])/_/g; - $label =~ s,_+,_,g; - $label =~ s,_$,,; - - # Avoid duplicated labels - while (defined($labels{$label})) { - my @chars = ("A".."Z", "a".."z"); - $label .= $chars[rand @chars]; - } - $labels{$label} = 1; - - $data{$what}->{label} = $label; - - # only one label is enough - last; - } - } -} - -# -# Outputs the book on ReST format -# - -# \b doesn't work well with paths. So, we need to define something else: -# Boundaries are punct characters, spaces and end-of-line -my $start = qr {(^|\s|\() }x; -my $bondary = qr { ([,.:;\)\s]|\z) }x; -my $xref_match = qr { $start(\/(sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)$bondary }x; -my $symbols = qr { ([\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff]) }x; - -sub output_rest { - create_labels(); - - my $part = ""; - - foreach my $what (sort { - ($data{$a}->{type} eq "File") cmp ($data{$b}->{type} eq "File") || - $a cmp $b - } keys %data) { - my $type = $data{$what}->{type}; - - my @file = split / /, $data{$what}->{file}; - my @filepath = split / /, $data{$what}->{filepath}; - - if ($enable_lineno) { - printf ".. LINENO %s%s#%s\n\n", - $prefix, $file[0], - $data{$what}->{line_no}; - } - - my $w = $what; - - if ($type ne "File") { - my $cur_part = $what; - if ($what =~ '/') { - if ($what =~ m#^(\/?(?:[\w\-]+\/?){1,2})#) { - $cur_part = "Symbols under $1"; - $cur_part =~ s,/$,,; - } - } - - if ($cur_part ne "" && $part ne $cur_part) { - $part = $cur_part; - my $bar = $part; - $bar =~ s/./-/g; - print "$part\n$bar\n\n"; - } - - printf ".. _%s:\n\n", $data{$what}->{label}; - - my @names = split /\xac/,$w; - my $len = 0; - - foreach my $name (@names) { - $name =~ s/$symbols/\\$1/g; - $name = "**$name**"; - $len = length($name) if (length($name) > $len); - } - - print "+-" . "-" x $len . "-+\n"; - foreach my $name (@names) { - printf "| %s", $name . " " x ($len - length($name)) . " |\n"; - print "+-" . "-" x $len . "-+\n"; - } - - print "\n"; - } - - for (my $i = 0; $i < scalar(@filepath); $i++) { - my $path = $filepath[$i]; - my $f = $file[$i]; - - $path =~ s,.*/(.*/.*),$1,;; - $path =~ s,[/\-],_,g;; - my $fileref = "abi_file_".$path; - - if ($type eq "File") { - print ".. _$fileref:\n\n"; - } else { - print "Defined on file :ref:`$f <$fileref>`\n\n"; - } - } - - if ($type eq "File") { - my $bar = $w; - $bar =~ s/./-/g; - print "$w\n$bar\n\n"; - } - - my $desc = ""; - $desc = $data{$what}->{description} if (defined($data{$what}->{description})); - $desc =~ s/\s+$/\n/; - - if (!($desc =~ /^\s*$/)) { - if ($description_is_rst) { - # Remove title markups from the description - # Having titles inside ABI files will only work if extra - # care would be taken in order to strictly follow the same - # level order for each markup. - $desc =~ s/\n[\-\*\=\^\~]+\n/\n\n/g; - - # Enrich text by creating cross-references - - my $new_desc = ""; - my $init_indent = -1; - my $literal_indent = -1; - - open(my $fh, "+<", \$desc); - while (my $d = <$fh>) { - my $indent = $d =~ m/^(\s+)/; - my $spaces = length($indent); - $init_indent = $indent if ($init_indent < 0); - if ($literal_indent >= 0) { - if ($spaces > $literal_indent) { - $new_desc .= $d; - next; - } else { - $literal_indent = -1; - } - } else { - if ($d =~ /()::$/ && !($d =~ /^\s*\.\./)) { - $literal_indent = $spaces; - } - } - - $d =~ s,Documentation/(?!devicetree)(\S+)\.rst,:doc:`/$1`,g; - - my @matches = $d =~ m,Documentation/ABI/([\w\/\-]+),g; - foreach my $f (@matches) { - my $xref = $f; - my $path = $f; - $path =~ s,.*/(.*/.*),$1,;; - $path =~ s,[/\-],_,g;; - $xref .= " <abi_file_" . $path . ">"; - $d =~ s,\bDocumentation/ABI/$f\b,:ref:`$xref`,g; - } - - # Seek for cross reference symbols like /sys/... - @matches = $d =~ m/$xref_match/g; - - foreach my $s (@matches) { - next if (!($s =~ m,/,)); - if (defined($data{$s}) && defined($data{$s}->{label})) { - my $xref = $s; - - $xref =~ s/$symbols/\\$1/g; - $xref = ":ref:`$xref <" . $data{$s}->{label} . ">`"; - - $d =~ s,$start$s$bondary,$1$xref$2,g; - } - } - $new_desc .= $d; - } - close $fh; - - - print "$new_desc\n\n"; - } else { - $desc =~ s/^\s+//; - - # Remove title markups from the description, as they won't work - $desc =~ s/\n[\-\*\=\^\~]+\n/\n\n/g; - - if ($desc =~ m/\:\n/ || $desc =~ m/\n[\t ]+/ || $desc =~ m/[\x00-\x08\x0b-\x1f\x7b-\xff]/) { - # put everything inside a code block - $desc =~ s/\n/\n /g; - - print "::\n\n"; - print " $desc\n\n"; - } else { - # Escape any special chars from description - $desc =~s/([\x00-\x08\x0b-\x1f\x21-\x2a\x2d\x2f\x3c-\x40\x5c\x5e-\x60\x7b-\xff])/\\$1/g; - print "$desc\n\n"; - } - } - } else { - print "DESCRIPTION MISSING for $what\n\n" if (!$data{$what}->{is_file}); - } - - if ($data{$what}->{symbols}) { - printf "Has the following ABI:\n\n"; - - foreach my $content(@{$data{$what}->{symbols}}) { - my $label = $data{$symbols{$content}->{xref}}->{label}; - - # Escape special chars from content - $content =~s/([\x00-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])/\\$1/g; - - print "- :ref:`$content <$label>`\n\n"; - } - } - - if (defined($data{$what}->{users})) { - my $users = $data{$what}->{users}; - - $users =~ s/\n/\n\t/g; - printf "Users:\n\t%s\n\n", $users if ($users ne ""); - } - - } -} - -# -# Searches for ABI symbols -# -sub search_symbols { - foreach my $what (sort keys %data) { - next if (!($what =~ m/($arg)/)); - - my $type = $data{$what}->{type}; - next if ($type eq "File"); - - my $file = $data{$what}->{filepath}; - - $what =~ s/\xac/, /g; - my $bar = $what; - $bar =~ s/./-/g; - - print "\n$what\n$bar\n\n"; - - my $kernelversion = $data{$what}->{kernelversion} if (defined($data{$what}->{kernelversion})); - my $contact = $data{$what}->{contact} if (defined($data{$what}->{contact})); - my $users = $data{$what}->{users} if (defined($data{$what}->{users})); - my $date = $data{$what}->{date} if (defined($data{$what}->{date})); - my $desc = $data{$what}->{description} if (defined($data{$what}->{description})); - - $kernelversion =~ s/^\s+// if ($kernelversion); - $contact =~ s/^\s+// if ($contact); - if ($users) { - $users =~ s/^\s+//; - $users =~ s/\n//g; - } - $date =~ s/^\s+// if ($date); - $desc =~ s/^\s+// if ($desc); - - printf "Kernel version:\t\t%s\n", $kernelversion if ($kernelversion); - printf "Date:\t\t\t%s\n", $date if ($date); - printf "Contact:\t\t%s\n", $contact if ($contact); - printf "Users:\t\t\t%s\n", $users if ($users); - print "Defined on file(s):\t$file\n\n"; - print "Description:\n\n$desc"; - } -} - -# Exclude /sys/kernel/debug and /sys/kernel/tracing from the search path -sub dont_parse_special_attributes { - if (($File::Find::dir =~ m,^/sys/kernel,)) { - return grep {!/(debug|tracing)/ } @_; - } - - if (($File::Find::dir =~ m,^/sys/fs,)) { - return grep {!/(pstore|bpf|fuse)/ } @_; - } - - return @_ -} - -my %leaf; -my %aliases; -my @files; -my %root; - -sub graph_add_file { - my $file = shift; - my $type = shift; - - my $dir = $file; - $dir =~ s,^(.*/).*,$1,; - $file =~ s,.*/,,; - - my $name; - my $file_ref = \%root; - foreach my $edge(split "/", $dir) { - $name .= "$edge/"; - if (!defined ${$file_ref}{$edge}) { - ${$file_ref}{$edge} = { }; - } - $file_ref = \%{$$file_ref{$edge}}; - ${$file_ref}{"__name"} = [ $name ]; - } - $name .= "$file"; - ${$file_ref}{$file} = { - "__name" => [ $name ] - }; - - return \%{$$file_ref{$file}}; -} - -sub graph_add_link { - my $file = shift; - my $link = shift; - - # Traverse graph to find the reference - my $file_ref = \%root; - foreach my $edge(split "/", $file) { - $file_ref = \%{$$file_ref{$edge}} || die "Missing node!"; - } - - # do a BFS - - my @queue; - my %seen; - my $st; - - push @queue, $file_ref; - $seen{$start}++; - - while (@queue) { - my $v = shift @queue; - my @child = keys(%{$v}); - - foreach my $c(@child) { - next if $seen{$$v{$c}}; - next if ($c eq "__name"); - - if (!defined($$v{$c}{"__name"})) { - printf STDERR "Error: Couldn't find a non-empty name on a children of $file/.*: "; - print STDERR Dumper(%{$v}); - exit; - } - - # Add new name - my $name = @{$$v{$c}{"__name"}}[0]; - if ($name =~ s#^$file/#$link/#) { - push @{$$v{$c}{"__name"}}, $name; - } - # Add child to the queue and mark as seen - push @queue, $$v{$c}; - $seen{$c}++; - } - } -} - -my $escape_symbols = qr { ([\x01-\x08\x0e-\x1f\x21-\x29\x2b-\x2d\x3a-\x40\x7b-\xfe]) }x; -sub parse_existing_sysfs { - my $file = $File::Find::name; - - my $mode = (lstat($file))[2]; - my $abs_file = abs_path($file); - - my @tmp; - push @tmp, $file; - push @tmp, $abs_file if ($abs_file ne $file); - - foreach my $f(@tmp) { - # Ignore cgroup, as this is big and has zero docs under ABI - return if ($f =~ m#^/sys/fs/cgroup/#); - - # Ignore firmware as it is documented elsewhere - # Either ACPI or under Documentation/devicetree/bindings/ - return if ($f =~ m#^/sys/firmware/#); - - # Ignore some sysfs nodes that aren't actually part of ABI - return if ($f =~ m#/sections|notes/#); - - # Would need to check at - # Documentation/admin-guide/kernel-parameters.txt, but this - # is not easily parseable. - return if ($f =~ m#/parameters/#); - } - - if (S_ISLNK($mode)) { - $aliases{$file} = $abs_file; - return; - } - - return if (S_ISDIR($mode)); - - # Trivial: file is defined exactly the same way at ABI What: - return if (defined($data{$file})); - return if (defined($data{$abs_file})); - - push @files, graph_add_file($abs_file, "file"); -} - -sub get_leave($) -{ - my $what = shift; - my $leave; - - my $l = $what; - my $stop = 1; - - $leave = $l; - $leave =~ s,/$,,; - $leave =~ s,.*/,,; - $leave =~ s/[\(\)]//g; - - # $leave is used to improve search performance at - # check_undefined_symbols, as the algorithm there can seek - # for a small number of "what". It also allows giving a - # hint about a leave with the same name somewhere else. - # However, there are a few occurences where the leave is - # either a wildcard or a number. Just group such cases - # altogether. - if ($leave =~ m/\.\*/ || $leave eq "" || $leave =~ /\\d/) { - $leave = "others"; - } - - return $leave; -} - -my @not_found; - -sub check_file($$) -{ - my $file_ref = shift; - my $names_ref = shift; - my @names = @{$names_ref}; - my $file = $names[0]; - - my $found_string; - - my $leave = get_leave($file); - if (!defined($leaf{$leave})) { - $leave = "others"; - } - my @expr = @{$leaf{$leave}->{expr}}; - die ("\rmissing rules for $leave") if (!defined($leaf{$leave})); - - my $path = $file; - $path =~ s,(.*/).*,$1,; - - if ($search_string) { - return if (!($file =~ m#$search_string#)); - $found_string = 1; - } - - for (my $i = 0; $i < @names; $i++) { - if ($found_string && $hint) { - if (!$i) { - print STDERR "--> $names[$i]\n"; - } else { - print STDERR " $names[$i]\n"; - } - } - foreach my $re (@expr) { - print STDERR "$names[$i] =~ /^$re\$/\n" if ($debug && $dbg_undefined); - if ($names[$i] =~ $re) { - return; - } - } - } - - if ($leave ne "others") { - my @expr = @{$leaf{"others"}->{expr}}; - for (my $i = 0; $i < @names; $i++) { - foreach my $re (@expr) { - print STDERR "$names[$i] =~ /^$re\$/\n" if ($debug && $dbg_undefined); - if ($names[$i] =~ $re) { - return; - } - } - } - } - - push @not_found, $file if (!$search_string || $found_string); - - if ($hint && (!$search_string || $found_string)) { - my $what = $leaf{$leave}->{what}; - $what =~ s/\xac/\n\t/g; - if ($leave ne "others") { - print STDERR "\r more likely regexes:\n\t$what\n"; - } else { - print STDERR "\r tested regexes:\n\t$what\n"; - } - } -} - -sub check_undefined_symbols { - my $num_files = scalar @files; - my $next_i = 0; - my $start_time = times; - - @files = sort @files; - - my $last_time = $start_time; - - # When either debug or hint is enabled, there's no sense showing - # progress, as the progress will be overriden. - if ($hint || ($debug && $dbg_undefined)) { - $next_i = $num_files; - } - - my $is_console; - $is_console = 1 if (-t STDERR); - - for (my $i = 0; $i < $num_files; $i++) { - my $file_ref = $files[$i]; - my @names = @{$$file_ref{"__name"}}; - - check_file($file_ref, \@names); - - my $cur_time = times; - - if ($i == $next_i || $cur_time > $last_time + 1) { - my $percent = $i * 100 / $num_files; - - my $tm = $cur_time - $start_time; - my $time = sprintf "%d:%02d", int($tm), 60 * ($tm - int($tm)); - - printf STDERR "\33[2K\r", if ($is_console); - printf STDERR "%s: processing sysfs files... %i%%: $names[0]", $time, $percent; - printf STDERR "\n", if (!$is_console); - STDERR->flush(); - - $next_i = int (($percent + 1) * $num_files / 100); - $last_time = $cur_time; - } - } - - my $cur_time = times; - my $tm = $cur_time - $start_time; - my $time = sprintf "%d:%02d", int($tm), 60 * ($tm - int($tm)); - - printf STDERR "\33[2K\r", if ($is_console); - printf STDERR "%s: processing sysfs files... done\n", $time; - - foreach my $file (@not_found) { - print "$file not found.\n"; - } -} - -sub undefined_symbols { - print STDERR "Reading $sysfs_prefix directory contents..."; - find({ - wanted =>\&parse_existing_sysfs, - preprocess =>\&dont_parse_special_attributes, - no_chdir => 1 - }, $sysfs_prefix); - print STDERR "done.\n"; - - $leaf{"others"}->{what} = ""; - - print STDERR "Converting ABI What fields into regexes..."; - foreach my $w (sort keys %data) { - foreach my $what (split /\xac/,$w) { - next if (!($what =~ m/^$sysfs_prefix/)); - - # Convert what into regular expressions - - # Escape dot characters - $what =~ s/\./\xf6/g; - - # Temporarily change [0-9]+ type of patterns - $what =~ s/\[0\-9\]\+/\xff/g; - - # Temporarily change [\d+-\d+] type of patterns - $what =~ s/\[0\-\d+\]/\xff/g; - $what =~ s/\[(\d+)\]/\xf4$1\xf5/g; - - # Temporarily change [0-9] type of patterns - $what =~ s/\[(\d)\-(\d)\]/\xf4$1-$2\xf5/g; - - # Handle multiple option patterns - $what =~ s/[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]/($1|$2)/g; - - # Handle wildcards - $what =~ s,\*,.*,g; - $what =~ s,/\xf6..,/.*,g; - $what =~ s/\<[^\>]+\>/.*/g; - $what =~ s/\{[^\}]+\}/.*/g; - $what =~ s/\[[^\]]+\]/.*/g; - - $what =~ s/[XYZ]/.*/g; - - # Recover [0-9] type of patterns - $what =~ s/\xf4/[/g; - $what =~ s/\xf5/]/g; - - # Remove duplicated spaces - $what =~ s/\s+/ /g; - - # Special case: this ABI has a parenthesis on it - $what =~ s/sqrt\(x^2\+y^2\+z^2\)/sqrt\(x^2\+y^2\+z^2\)/; - - # Special case: drop comparition as in: - # What: foo = <something> - # (this happens on a few IIO definitions) - $what =~ s,\s*\=.*$,,; - - # Escape all other symbols - $what =~ s/$escape_symbols/\\$1/g; - $what =~ s/\\\\/\\/g; - $what =~ s/\\([\[\]\(\)\|])/$1/g; - $what =~ s/(\d+)\\(-\d+)/$1$2/g; - - $what =~ s/\xff/\\d+/g; - - # Special case: IIO ABI which a parenthesis. - $what =~ s/sqrt(.*)/sqrt\(.*\)/; - - # Simplify regexes with multiple .* - $what =~ s#(?:\.\*){2,}##g; -# $what =~ s#\.\*/\.\*#.*#g; - - # Recover dot characters - $what =~ s/\xf6/\./g; - - my $leave = get_leave($what); - - my $added = 0; - foreach my $l (split /\|/, $leave) { - if (defined($leaf{$l})) { - next if ($leaf{$l}->{what} =~ m/\b$what\b/); - $leaf{$l}->{what} .= "\xac" . $what; - $added = 1; - } else { - $leaf{$l}->{what} = $what; - $added = 1; - } - } - if ($search_string && $added) { - print STDERR "What: $what\n" if ($what =~ m#$search_string#); - } - - } - } - # Compile regexes - foreach my $l (sort keys %leaf) { - my @expr; - foreach my $w(sort split /\xac/, $leaf{$l}->{what}) { - push @expr, qr /^$w$/; - } - $leaf{$l}->{expr} = \@expr; - } - - # Take links into account - foreach my $link (sort keys %aliases) { - my $abs_file = $aliases{$link}; - graph_add_link($abs_file, $link); - } - print STDERR "done.\n"; - - check_undefined_symbols; -} - -# Ensure that the prefix will always end with a slash -# While this is not needed for find, it makes the patch nicer -# with --enable-lineno -$prefix =~ s,/?$,/,; - -if ($cmd eq "undefined" || $cmd eq "search") { - $show_warnings = 0; -} -# -# Parses all ABI files located at $prefix dir -# -find({wanted =>\&parse_abi, no_chdir => 1}, $prefix); - -print STDERR Data::Dumper->Dump([\%data], [qw(*data)]) if ($debug & $dbg_dump_abi_structs); - -# -# Handles the command -# -if ($cmd eq "undefined") { - undefined_symbols; -} elsif ($cmd eq "search") { - search_symbols; -} else { - if ($cmd eq "rest") { - output_rest; - } - - # Warn about duplicated ABI entries - foreach my $what(sort keys %symbols) { - my @files = @{$symbols{$what}->{file}}; - - next if (scalar(@files) == 1); - - printf STDERR "Warning: $what is defined %d times: @files\n", - scalar(@files); - } -} - -__END__ - -=head1 NAME - -get_abi.pl - parse the Linux ABI files and produce a ReST book. - -=head1 SYNOPSIS - -B<get_abi.pl> [--debug <level>] [--enable-lineno] [--man] [--help] - [--(no-)rst-source] [--dir=<dir>] [--show-hints] - [--search-string <regex>] - <COMMAND> [<ARGUMENT>] - -Where B<COMMAND> can be: - -=over 8 - -B<search> I<SEARCH_REGEX> - search for I<SEARCH_REGEX> inside ABI - -B<rest> - output the ABI in ReST markup language - -B<validate> - validate the ABI contents - -B<undefined> - existing symbols at the system that aren't - defined at Documentation/ABI - -=back - -=head1 OPTIONS - -=over 8 - -=item B<--dir> - -Changes the location of the ABI search. By default, it uses -the Documentation/ABI directory. - -=item B<--rst-source> and B<--no-rst-source> - -The input file may be using ReST syntax or not. Those two options allow -selecting between a rst-compliant source ABI (B<--rst-source>), or a -plain text that may be violating ReST spec, so it requres some escaping -logic (B<--no-rst-source>). - -=item B<--enable-lineno> - -Enable output of .. LINENO lines. - -=item B<--debug> I<debug level> - -Print debug information according with the level, which is given by the -following bitmask: - - - 1: Debug parsing What entries from ABI files; - - 2: Shows what files are opened from ABI files; - - 4: Dump the structs used to store the contents of the ABI files. - -=item B<--show-hints> - -Show hints about possible definitions for the missing ABI symbols. -Used only when B<undefined>. - -=item B<--search-string> I<regex string> - -Show only occurences that match a search string. -Used only when B<undefined>. - -=item B<--help> - -Prints a brief help message and exits. - -=item B<--man> - -Prints the manual page and exits. - -=back - -=head1 DESCRIPTION - -Parse the Linux ABI files from ABI DIR (usually located at Documentation/ABI), -allowing to search for ABI symbols or to produce a ReST book containing -the Linux ABI documentation. - -=head1 EXAMPLES - -Search for all stable symbols with the word "usb": - -=over 8 - -$ scripts/get_abi.pl search usb --dir Documentation/ABI/stable - -=back - -Search for all symbols that match the regex expression "usb.*cap": - -=over 8 - -$ scripts/get_abi.pl search usb.*cap - -=back - -Output all obsoleted symbols in ReST format - -=over 8 - -$ scripts/get_abi.pl rest --dir Documentation/ABI/obsolete - -=back - -=head1 BUGS - -Report bugs to Mauro Carvalho Chehab <mchehab+huawei@kernel.org> - -=head1 COPYRIGHT - -Copyright (c) 2016-2021 by Mauro Carvalho Chehab <mchehab+huawei@kernel.org>. - -License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>. - -This is free software: you are free to change and redistribute it. -There is NO WARRANTY, to the extent permitted by law. - -=cut diff --git a/scripts/get_abi.py b/scripts/get_abi.py new file mode 100755 index 000000000000..7ce4748a46d2 --- /dev/null +++ b/scripts/get_abi.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +# pylint: disable=R0903 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# SPDX-License-Identifier: GPL-2.0 + +""" +Parse ABI documentation and produce results from it. +""" + +import argparse +import logging +import os +import sys + +# Import Python modules + +LIB_DIR = "lib/abi" +SRC_DIR = os.path.dirname(os.path.realpath(__file__)) + +sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR)) + +from abi_parser import AbiParser # pylint: disable=C0413 +from abi_regex import AbiRegex # pylint: disable=C0413 +from helpers import ABI_DIR, DEBUG_HELP # pylint: disable=C0413 +from system_symbols import SystemSymbols # pylint: disable=C0413 + +# Command line classes + + +REST_DESC = """ +Produce output in ReST format. + +The output is done on two sections: + +- Symbols: show all parsed symbols in alphabetic order; +- Files: cross reference the content of each file with the symbols on it. +""" + +class AbiRest: + """Initialize an argparse subparser for rest output""" + + def __init__(self, subparsers): + """Initialize argparse subparsers""" + + parser = subparsers.add_parser("rest", + formatter_class=argparse.RawTextHelpFormatter, + description=REST_DESC) + + parser.add_argument("--enable-lineno", action="store_true", + help="enable lineno") + parser.add_argument("--raw", action="store_true", + help="output text as contained in the ABI files. " + "It not used, output will contain dynamically" + " generated cross references when possible.") + parser.add_argument("--no-file", action="store_true", + help="Don't the files section") + parser.add_argument("--show-hints", help="Show-hints") + + parser.set_defaults(func=self.run) + + def run(self, args): + """Run subparser""" + + parser = AbiParser(args.dir, debug=args.debug) + parser.parse_abi() + parser.check_issues() + + for t in parser.doc(args.raw, not args.no_file): + if args.enable_lineno: + print (f".. LINENO {t[1]}#{t[2]}\n\n") + + print(t[0]) + +class AbiValidate: + """Initialize an argparse subparser for ABI validation""" + + def __init__(self, subparsers): + """Initialize argparse subparsers""" + + parser = subparsers.add_parser("validate", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="list events") + + parser.set_defaults(func=self.run) + + def run(self, args): + """Run subparser""" + + parser = AbiParser(args.dir, debug=args.debug) + parser.parse_abi() + parser.check_issues() + + +class AbiSearch: + """Initialize an argparse subparser for ABI search""" + + def __init__(self, subparsers): + """Initialize argparse subparsers""" + + parser = subparsers.add_parser("search", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="Search ABI using a regular expression") + + parser.add_argument("expression", + help="Case-insensitive search pattern for the ABI symbol") + + parser.set_defaults(func=self.run) + + def run(self, args): + """Run subparser""" + + parser = AbiParser(args.dir, debug=args.debug) + parser.parse_abi() + parser.search_symbols(args.expression) + +UNDEFINED_DESC=""" +Check undefined ABIs on local machine. + +Read sysfs devnodes and check if the devnodes there are defined inside +ABI documentation. + +The search logic tries to minimize the number of regular expressions to +search per each symbol. + +By default, it runs on a single CPU, as Python support for CPU threads +is still experimental, and multi-process runs on Python is very slow. + +On experimental tests, if the number of ABI symbols to search per devnode +is contained on a limit of ~150 regular expressions, using a single CPU +is a lot faster than using multiple processes. However, if the number of +regular expressions to check is at the order of ~30000, using multiple +CPUs speeds up the check. +""" + +class AbiUndefined: + """ + Initialize an argparse subparser for logic to check undefined ABI at + the current machine's sysfs + """ + + def __init__(self, subparsers): + """Initialize argparse subparsers""" + + parser = subparsers.add_parser("undefined", + formatter_class=argparse.RawTextHelpFormatter, + description=UNDEFINED_DESC) + + parser.add_argument("-S", "--sysfs-dir", default="/sys", + help="directory where sysfs is mounted") + parser.add_argument("-s", "--search-string", + help="search string regular expression to limit symbol search") + parser.add_argument("-H", "--show-hints", action="store_true", + help="Hints about definitions for missing ABI symbols.") + parser.add_argument("-j", "--jobs", "--max-workers", type=int, default=1, + help="If bigger than one, enables multiprocessing.") + parser.add_argument("-c", "--max-chunk-size", type=int, default=50, + help="Maximum number of chunk size") + parser.add_argument("-f", "--found", action="store_true", + help="Also show found items. " + "Helpful to debug the parser."), + parser.add_argument("-d", "--dry-run", action="store_true", + help="Don't actually search for undefined. " + "Helpful to debug the parser."), + + parser.set_defaults(func=self.run) + + def run(self, args): + """Run subparser""" + + abi = AbiRegex(args.dir, debug=args.debug, + search_string=args.search_string) + + abi_symbols = SystemSymbols(abi=abi, hints=args.show_hints, + sysfs=args.sysfs_dir) + + abi_symbols.check_undefined_symbols(dry_run=args.dry_run, + found=args.found, + max_workers=args.jobs, + chunk_size=args.max_chunk_size) + + +def main(): + """Main program""" + + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + + parser.add_argument("-d", "--debug", type=int, default=0, help="debug level") + parser.add_argument("-D", "--dir", default=ABI_DIR, help=DEBUG_HELP) + + subparsers = parser.add_subparsers() + + AbiRest(subparsers) + AbiValidate(subparsers) + AbiSearch(subparsers) + AbiUndefined(subparsers) + + args = parser.parse_args() + + if args.debug: + level = logging.DEBUG + else: + level = logging.INFO + + logging.basicConfig(level=level, format="[%(levelname)s] %(message)s") + + if "func" in args: + args.func(args) + else: + sys.exit(f"Please specify a valid command for {sys.argv[0]}") + + +# Call main method +if __name__ == "__main__": + main() diff --git a/scripts/get_feat.pl b/scripts/get_feat.pl index 5c5397eeb237..40fb28c8424e 100755 --- a/scripts/get_feat.pl +++ b/scripts/get_feat.pl @@ -512,13 +512,13 @@ print STDERR Data::Dumper->Dump([\%data], [qw(*data)]) if ($debug); # Handles the command # if ($cmd eq "current") { - $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/'); + $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/'); $arch =~s/\s+$//; } if ($cmd eq "ls" or $cmd eq "list") { if (!$arch) { - $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/'); + $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/'); $arch =~s/\s+$//; } diff --git a/scripts/kernel-doc b/scripts/kernel-doc index e57c5e989a0a..af6cf408b96d 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -26,7 +26,7 @@ kernel-doc - Print formatted kernel documentation to stdout kernel-doc [-h] [-v] [-Werror] [-Wall] [-Wreturn] [-Wshort-desc[ription]] [-Wcontents-before-sections] [ -man | - -rst [-sphinx-version VERSION] [-enable-lineno] | + -rst [-enable-lineno] | -none ] [ @@ -130,7 +130,6 @@ if ($#ARGV == -1) { } my $kernelversion; -my ($sphinx_major, $sphinx_minor, $sphinx_patch); my $dohighlight = ""; @@ -138,7 +137,6 @@ my $verbose = 0; my $Werror = 0; my $Wreturn = 0; my $Wshort_desc = 0; -my $Wcontents_before_sections = 0; my $output_mode = "rst"; my $output_preformatted = 0; my $no_doc_sections = 0; @@ -179,7 +177,7 @@ my ($function, %function_table, %parametertypes, $declaration_purpose); my %nosymbol_table = (); my $declaration_start_line; my ($type, $declaration_name, $return_type); -my ($newsection, $newcontents, $prototype, $brcount, %source_map); +my ($newsection, $newcontents, $prototype, $brcount); if (defined($ENV{'KBUILD_VERBOSE'}) && $ENV{'KBUILD_VERBOSE'} =~ '1') { $verbose = 1; @@ -224,7 +222,6 @@ use constant { STATE_INLINE => 7, # gathering doc outside main block }; my $state; -my $in_doc_sect; my $leading_space; # Inline documentation state @@ -333,12 +330,9 @@ while ($ARGV[0] =~ m/^--?(.*)/) { $Wreturn = 1; } elsif ($cmd eq "Wshort-desc" or $cmd eq "Wshort-description") { $Wshort_desc = 1; - } elsif ($cmd eq "Wcontents-before-sections") { - $Wcontents_before_sections = 1; } elsif ($cmd eq "Wall") { $Wreturn = 1; $Wshort_desc = 1; - $Wcontents_before_sections = 1; } elsif (($cmd eq "h") || ($cmd eq "help")) { pod2usage(-exitval => 0, -verbose => 2); } elsif ($cmd eq 'no-doc-sections') { @@ -347,23 +341,6 @@ while ($ARGV[0] =~ m/^--?(.*)/) { $enable_lineno = 1; } elsif ($cmd eq 'show-not-found') { $show_not_found = 1; # A no-op but don't fail - } elsif ($cmd eq "sphinx-version") { - my $ver_string = shift @ARGV; - if ($ver_string =~ m/^(\d+)(\.\d+)?(\.\d+)?/) { - $sphinx_major = $1; - if (defined($2)) { - $sphinx_minor = substr($2,1); - } else { - $sphinx_minor = 0; - } - if (defined($3)) { - $sphinx_patch = substr($3,1) - } else { - $sphinx_patch = 0; - } - } else { - die "Sphinx version should either major.minor or major.minor.patch format\n"; - } } else { # Unknown argument pod2usage( @@ -387,8 +364,6 @@ while ($ARGV[0] =~ m/^--?(.*)/) { # continue execution near EOF; -# The C domain dialect changed on Sphinx 3. So, we need to check the -# version in order to produce the right tags. sub findprog($) { foreach(split(/:/, $ENV{PATH})) { @@ -396,42 +371,6 @@ sub findprog($) } } -sub get_sphinx_version() -{ - my $ver; - - my $cmd = "sphinx-build"; - if (!findprog($cmd)) { - my $cmd = "sphinx-build3"; - if (!findprog($cmd)) { - $sphinx_major = 1; - $sphinx_minor = 2; - $sphinx_patch = 0; - printf STDERR "Warning: Sphinx version not found. Using default (Sphinx version %d.%d.%d)\n", - $sphinx_major, $sphinx_minor, $sphinx_patch; - return; - } - } - - open IN, "$cmd --version 2>&1 |"; - while (<IN>) { - if (m/^\s*sphinx-build\s+([\d]+)\.([\d\.]+)(\+\/[\da-f]+)?$/) { - $sphinx_major = $1; - $sphinx_minor = $2; - $sphinx_patch = $3; - last; - } - # Sphinx 1.2.x uses a different format - if (m/^\s*Sphinx.*\s+([\d]+)\.([\d\.]+)$/) { - $sphinx_major = $1; - $sphinx_minor = $2; - $sphinx_patch = $3; - last; - } - } - close IN; -} - # get kernel version from env sub get_kernel_version() { my $version = 'unknown kernel version'; @@ -816,6 +755,10 @@ sub output_highlight_rst { if ($block) { $output .= highlight_block($block); } + + $output =~ s/^\n+//g; + $output =~ s/\n+$//g; + foreach $line (split "\n", $output) { print $lineprefix . $line . "\n"; } @@ -859,9 +802,10 @@ sub output_function_rst(%) { $signature .= ")"; } - if ($sphinx_major < 3) { + if ($args{'typedef'} || $args{'functiontype'} eq "") { + print ".. c:macro:: ". $args{'function'} . "\n\n"; + if ($args{'typedef'}) { - print ".. c:type:: ". $args{'function'} . "\n\n"; print_lineno($declaration_start_line); print " **Typedef**: "; $lineprefix = ""; @@ -869,25 +813,10 @@ sub output_function_rst(%) { print "\n\n**Syntax**\n\n"; print " ``$signature``\n\n"; } else { - print ".. c:function:: $signature\n\n"; + print "``$signature``\n\n"; } } else { - if ($args{'typedef'} || $args{'functiontype'} eq "") { - print ".. c:macro:: ". $args{'function'} . "\n\n"; - - if ($args{'typedef'}) { - print_lineno($declaration_start_line); - print " **Typedef**: "; - $lineprefix = ""; - output_highlight_rst($args{'purpose'}); - print "\n\n**Syntax**\n\n"; - print " ``$signature``\n\n"; - } else { - print "``$signature``\n\n"; - } - } else { - print ".. c:function:: $signature\n\n"; - } + print ".. c:function:: $signature\n\n"; } if (!$args{'typedef'}) { @@ -955,13 +884,9 @@ sub output_enum_rst(%) { my $count; my $outer; - if ($sphinx_major < 3) { - my $name = "enum " . $args{'enum'}; - print "\n\n.. c:type:: " . $name . "\n\n"; - } else { - my $name = $args{'enum'}; - print "\n\n.. c:enum:: " . $name . "\n\n"; - } + my $name = $args{'enum'}; + print "\n\n.. c:enum:: " . $name . "\n\n"; + print_lineno($declaration_start_line); $lineprefix = " "; output_highlight_rst($args{'purpose'}); @@ -992,11 +917,8 @@ sub output_typedef_rst(%) { my $oldprefix = $lineprefix; my $name; - if ($sphinx_major < 3) { - $name = "typedef " . $args{'typedef'}; - } else { - $name = $args{'typedef'}; - } + $name = $args{'typedef'}; + print "\n\n.. c:type:: " . $name . "\n\n"; print_lineno($declaration_start_line); $lineprefix = " "; @@ -1012,17 +934,13 @@ sub output_struct_rst(%) { my ($parameter); my $oldprefix = $lineprefix; - if ($sphinx_major < 3) { - my $name = $args{'type'} . " " . $args{'struct'}; - print "\n\n.. c:type:: " . $name . "\n\n"; + my $name = $args{'struct'}; + if ($args{'type'} eq 'union') { + print "\n\n.. c:union:: " . $name . "\n\n"; } else { - my $name = $args{'struct'}; - if ($args{'type'} eq 'union') { - print "\n\n.. c:union:: " . $name . "\n\n"; - } else { - print "\n\n.. c:struct:: " . $name . "\n\n"; - } + print "\n\n.. c:struct:: " . $name . "\n\n"; } + print_lineno($declaration_start_line); $lineprefix = " "; output_highlight_rst($args{'purpose'}); @@ -2005,10 +1923,6 @@ sub map_filename($) { $file = $orig_file; } - if (defined($source_map{$file})) { - $file = $source_map{$file}; - } - return $file; } @@ -2044,7 +1958,6 @@ sub process_export_file($) { sub process_normal() { if (/$doc_start/o) { $state = STATE_NAME; # next line is always the function name - $in_doc_sect = 0; $declaration_start_line = $. + 1; } } @@ -2149,7 +2062,6 @@ sub process_body($$) { } if (/$doc_sect/i) { # case insensitive for supported section names - $in_doc_sect = 1; $newsection = $1; $newcontents = $2; @@ -2166,14 +2078,10 @@ sub process_body($$) { } if (($contents ne "") && ($contents ne "\n")) { - if (!$in_doc_sect && $Wcontents_before_sections) { - emit_warning("${file}:$.", "contents before sections\n"); - } dump_section($file, $section, $contents); $section = $section_default; } - $in_doc_sect = 1; $state = STATE_BODY; $contents = $newcontents; $new_start_line = $.; @@ -2387,11 +2295,6 @@ sub process_file($) { close IN_FILE; } - -if ($output_mode eq "rst") { - get_sphinx_version() if (!$sphinx_major); -} - $kernelversion = get_kernel_version(); # generate a sequence of code that will splice in highlighting information @@ -2403,19 +2306,6 @@ for (my $k = 0; $k < @highlights; $k++) { $dohighlight .= "\$contents =~ s:$pattern:$result:gs;\n"; } -# Read the file that maps relative names to absolute names for -# separate source and object directories and for shadow trees. -if (open(SOURCE_MAP, "<.tmp_filelist.txt")) { - my ($relname, $absname); - while(<SOURCE_MAP>) { - chop(); - ($relname, $absname) = (split())[0..1]; - $relname =~ s:^/+::; - $source_map{$relname} = $absname; - } - close(SOURCE_MAP); -} - if ($output_selection == OUTPUT_EXPORTED || $output_selection == OUTPUT_INTERNAL) { @@ -2471,17 +2361,6 @@ Do not output documentation, only warnings. =head3 reStructuredText only -=over 8 - -=item -sphinx-version VERSION - -Use the ReST C domain dialect compatible with a specific Sphinx Version. - -If not specified, kernel-doc will auto-detect using the sphinx-build version -found on PATH. - -=back - =head2 Output selection (mutually exclusive): =over 8 diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py new file mode 100644 index 000000000000..66a738013ce1 --- /dev/null +++ b/scripts/lib/abi/abi_parser.py @@ -0,0 +1,628 @@ +#!/usr/bin/env python3 +# pylint: disable=R0902,R0903,R0911,R0912,R0913,R0914,R0915,R0917,C0302 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# SPDX-License-Identifier: GPL-2.0 + +""" +Parse ABI documentation and produce results from it. +""" + +from argparse import Namespace +import logging +import os +import re + +from pprint import pformat +from random import randrange, seed + +# Import Python modules + +from helpers import AbiDebug, ABI_DIR + + +class AbiParser: + """Main class to parse ABI files""" + + TAGS = r"(what|where|date|kernelversion|contact|description|users)" + XREF = r"(?:^|\s|\()(\/(?:sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)(?:[,.:;\)\s]|\Z)" + + def __init__(self, directory, logger=None, + enable_lineno=False, show_warnings=True, debug=0): + """Stores arguments for the class and initialize class vars""" + + self.directory = directory + self.enable_lineno = enable_lineno + self.show_warnings = show_warnings + self.debug = debug + + if not logger: + self.log = logging.getLogger("get_abi") + else: + self.log = logger + + self.data = {} + self.what_symbols = {} + self.file_refs = {} + self.what_refs = {} + + # Ignore files that contain such suffixes + self.ignore_suffixes = (".rej", ".org", ".orig", ".bak", "~") + + # Regular expressions used on parser + self.re_abi_dir = re.compile(r"(.*)" + ABI_DIR) + self.re_tag = re.compile(r"(\S+)(:\s*)(.*)", re.I) + self.re_valid = re.compile(self.TAGS) + self.re_start_spc = re.compile(r"(\s*)(\S.*)") + self.re_whitespace = re.compile(r"^\s+") + + # Regular used on print + self.re_what = re.compile(r"(\/?(?:[\w\-]+\/?){1,2})") + self.re_escape = re.compile(r"([\.\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])") + self.re_unprintable = re.compile(r"([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff]+)") + self.re_title_mark = re.compile(r"\n[\-\*\=\^\~]+\n") + self.re_doc = re.compile(r"Documentation/(?!devicetree)(\S+)\.rst") + self.re_abi = re.compile(r"(Documentation/ABI/)([\w\/\-]+)") + self.re_xref_node = re.compile(self.XREF) + + def warn(self, fdata, msg, extra=None): + """Displays a parse error if warning is enabled""" + + if not self.show_warnings: + return + + msg = f"{fdata.fname}:{fdata.ln}: {msg}" + if extra: + msg += "\n\t\t" + extra + + self.log.warning(msg) + + def add_symbol(self, what, fname, ln=None, xref=None): + """Create a reference table describing where each 'what' is located""" + + if what not in self.what_symbols: + self.what_symbols[what] = {"file": {}} + + if fname not in self.what_symbols[what]["file"]: + self.what_symbols[what]["file"][fname] = [] + + if ln and ln not in self.what_symbols[what]["file"][fname]: + self.what_symbols[what]["file"][fname].append(ln) + + if xref: + self.what_symbols[what]["xref"] = xref + + def _parse_line(self, fdata, line): + """Parse a single line of an ABI file""" + + new_what = False + new_tag = False + content = None + + match = self.re_tag.match(line) + if match: + new = match.group(1).lower() + sep = match.group(2) + content = match.group(3) + + match = self.re_valid.search(new) + if match: + new_tag = match.group(1) + else: + if fdata.tag == "description": + # New "tag" is actually part of description. + # Don't consider it a tag + new_tag = False + elif fdata.tag != "": + self.warn(fdata, f"tag '{fdata.tag}' is invalid", line) + + if new_tag: + # "where" is Invalid, but was a common mistake. Warn if found + if new_tag == "where": + self.warn(fdata, "tag 'Where' is invalid. Should be 'What:' instead") + new_tag = "what" + + if new_tag == "what": + fdata.space = None + + if content not in self.what_symbols: + self.add_symbol(what=content, fname=fdata.fname, ln=fdata.ln) + + if fdata.tag == "what": + fdata.what.append(content.strip("\n")) + else: + if fdata.key: + if "description" not in self.data.get(fdata.key, {}): + self.warn(fdata, f"{fdata.key} doesn't have a description") + + for w in fdata.what: + self.add_symbol(what=w, fname=fdata.fname, + ln=fdata.what_ln, xref=fdata.key) + + fdata.label = content + new_what = True + + key = "abi_" + content.lower() + fdata.key = self.re_unprintable.sub("_", key).strip("_") + + # Avoid duplicated keys but using a defined seed, to make + # the namespace identical if there aren't changes at the + # ABI symbols + seed(42) + + while fdata.key in self.data: + char = randrange(0, 51) + ord("A") + if char > ord("Z"): + char += ord("a") - ord("Z") - 1 + + fdata.key += chr(char) + + if fdata.key and fdata.key not in self.data: + self.data[fdata.key] = { + "what": [content], + "file": [fdata.file_ref], + "path": fdata.ftype, + "line_no": fdata.ln, + } + + fdata.what = self.data[fdata.key]["what"] + + self.what_refs[content] = fdata.key + fdata.tag = new_tag + fdata.what_ln = fdata.ln + + if fdata.nametag["what"]: + t = (content, fdata.key) + if t not in fdata.nametag["symbols"]: + fdata.nametag["symbols"].append(t) + + return + + if fdata.tag and new_tag: + fdata.tag = new_tag + + if new_what: + fdata.label = "" + + if "description" in self.data[fdata.key]: + self.data[fdata.key]["description"] += "\n\n" + + if fdata.file_ref not in self.data[fdata.key]["file"]: + self.data[fdata.key]["file"].append(fdata.file_ref) + + if self.debug == AbiDebug.WHAT_PARSING: + self.log.debug("what: %s", fdata.what) + + if not fdata.what: + self.warn(fdata, "'What:' should come first:", line) + return + + if new_tag == "description": + fdata.space = None + + if content: + sep = sep.replace(":", " ") + + c = " " * len(new_tag) + sep + content + c = c.expandtabs() + + match = self.re_start_spc.match(c) + if match: + # Preserve initial spaces for the first line + fdata.space = match.group(1) + content = match.group(2) + "\n" + + self.data[fdata.key][fdata.tag] = content + + return + + # Store any contents before tags at the database + if not fdata.tag and "what" in fdata.nametag: + fdata.nametag["description"] += line + return + + if fdata.tag == "description": + content = line.expandtabs() + + if self.re_whitespace.sub("", content) == "": + self.data[fdata.key][fdata.tag] += "\n" + return + + if fdata.space is None: + match = self.re_start_spc.match(content) + if match: + # Preserve initial spaces for the first line + fdata.space = match.group(1) + + content = match.group(2) + "\n" + else: + if content.startswith(fdata.space): + content = content[len(fdata.space):] + + else: + fdata.space = "" + + if fdata.tag == "what": + w = content.strip("\n") + if w: + self.data[fdata.key][fdata.tag].append(w) + else: + self.data[fdata.key][fdata.tag] += content + return + + content = line.strip() + if fdata.tag: + if fdata.tag == "what": + w = content.strip("\n") + if w: + self.data[fdata.key][fdata.tag].append(w) + else: + self.data[fdata.key][fdata.tag] += "\n" + content.rstrip("\n") + return + + # Everything else is error + if content: + self.warn(fdata, "Unexpected content", line) + + def parse_readme(self, nametag, fname): + """Parse ABI README file""" + + nametag["what"] = ["Introduction"] + nametag["path"] = "README" + with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp: + for line in fp: + match = self.re_tag.match(line) + if match: + new = match.group(1).lower() + + match = self.re_valid.search(new) + if match: + nametag["description"] += "\n:" + line + continue + + nametag["description"] += line + + def parse_file(self, fname, path, basename): + """Parse a single file""" + + ref = f"abi_file_{path}_{basename}" + ref = self.re_unprintable.sub("_", ref).strip("_") + + # Store per-file state into a namespace variable. This will be used + # by the per-line parser state machine and by the warning function. + fdata = Namespace + + fdata.fname = fname + fdata.name = basename + + pos = fname.find(ABI_DIR) + if pos > 0: + f = fname[pos:] + else: + f = fname + + fdata.file_ref = (f, ref) + self.file_refs[f] = ref + + fdata.ln = 0 + fdata.what_ln = 0 + fdata.tag = "" + fdata.label = "" + fdata.what = [] + fdata.key = None + fdata.xrefs = None + fdata.space = None + fdata.ftype = path.split("/")[0] + + fdata.nametag = {} + fdata.nametag["what"] = [f"ABI file {path}/{basename}"] + fdata.nametag["type"] = "File" + fdata.nametag["path"] = fdata.ftype + fdata.nametag["file"] = [fdata.file_ref] + fdata.nametag["line_no"] = 1 + fdata.nametag["description"] = "" + fdata.nametag["symbols"] = [] + + self.data[ref] = fdata.nametag + + if self.debug & AbiDebug.WHAT_OPEN: + self.log.debug("Opening file %s", fname) + + if basename == "README": + self.parse_readme(fdata.nametag, fname) + return + + with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp: + for line in fp: + fdata.ln += 1 + + self._parse_line(fdata, line) + + if "description" in fdata.nametag: + fdata.nametag["description"] = fdata.nametag["description"].lstrip("\n") + + if fdata.key: + if "description" not in self.data.get(fdata.key, {}): + self.warn(fdata, f"{fdata.key} doesn't have a description") + + for w in fdata.what: + self.add_symbol(what=w, fname=fname, xref=fdata.key) + + def _parse_abi(self, root=None): + """Internal function to parse documentation ABI recursively""" + + if not root: + root = self.directory + + with os.scandir(root) as obj: + for entry in obj: + name = os.path.join(root, entry.name) + + if entry.is_dir(): + self._parse_abi(name) + continue + + if not entry.is_file(): + continue + + basename = os.path.basename(name) + + if basename.startswith("."): + continue + + if basename.endswith(self.ignore_suffixes): + continue + + path = self.re_abi_dir.sub("", os.path.dirname(name)) + + self.parse_file(name, path, basename) + + def parse_abi(self, root=None): + """Parse documentation ABI""" + + self._parse_abi(root) + + if self.debug & AbiDebug.DUMP_ABI_STRUCTS: + self.log.debug(pformat(self.data)) + + def desc_txt(self, desc): + """Print description as found inside ABI files""" + + desc = desc.strip(" \t\n") + + return desc + "\n\n" + + def xref(self, fname): + """ + Converts a Documentation/ABI + basename into a ReST cross-reference + """ + + xref = self.file_refs.get(fname) + if not xref: + return None + else: + return xref + + def desc_rst(self, desc): + """Enrich ReST output by creating cross-references""" + + # Remove title markups from the description + # Having titles inside ABI files will only work if extra + # care would be taken in order to strictly follow the same + # level order for each markup. + desc = self.re_title_mark.sub("\n\n", "\n" + desc) + desc = desc.rstrip(" \t\n").lstrip("\n") + + # Python's regex performance for non-compiled expressions is a lot + # than Perl, as Perl automatically caches them at their + # first usage. Here, we'll need to do the same, as otherwise the + # performance penalty is be high + + new_desc = "" + for d in desc.split("\n"): + if d == "": + new_desc += "\n" + continue + + # Use cross-references for doc files where needed + d = self.re_doc.sub(r":doc:`/\1`", d) + + # Use cross-references for ABI generated docs where needed + matches = self.re_abi.findall(d) + for m in matches: + abi = m[0] + m[1] + + xref = self.file_refs.get(abi) + if not xref: + # This may happen if ABI is on a separate directory, + # like parsing ABI testing and symbol is at stable. + # The proper solution is to move this part of the code + # for it to be inside sphinx/kernel_abi.py + self.log.info("Didn't find ABI reference for '%s'", abi) + else: + new = self.re_escape.sub(r"\\\1", m[1]) + d = re.sub(fr"\b{abi}\b", f":ref:`{new} <{xref}>`", d) + + # Seek for cross reference symbols like /sys/... + # Need to be careful to avoid doing it on a code block + if d[0] not in [" ", "\t"]: + matches = self.re_xref_node.findall(d) + for m in matches: + # Finding ABI here is more complex due to wildcards + xref = self.what_refs.get(m) + if xref: + new = self.re_escape.sub(r"\\\1", m) + d = re.sub(fr"\b{m}\b", f":ref:`{new} <{xref}>`", d) + + new_desc += d + "\n" + + return new_desc + "\n\n" + + def doc(self, output_in_txt=False, show_symbols=True, show_file=True, + filter_path=None): + """Print ABI at stdout""" + + part = None + for key, v in sorted(self.data.items(), + key=lambda x: (x[1].get("type", ""), + x[1].get("what"))): + + wtype = v.get("type", "Symbol") + file_ref = v.get("file") + names = v.get("what", [""]) + + if wtype == "File": + if not show_file: + continue + else: + if not show_symbols: + continue + + if filter_path: + if v.get("path") != filter_path: + continue + + msg = "" + + if wtype != "File": + cur_part = names[0] + if cur_part.find("/") >= 0: + match = self.re_what.match(cur_part) + if match: + symbol = match.group(1).rstrip("/") + cur_part = "Symbols under " + symbol + + if cur_part and cur_part != part: + part = cur_part + msg += part + "\n"+ "-" * len(part) +"\n\n" + + msg += f".. _{key}:\n\n" + + max_len = 0 + for i in range(0, len(names)): # pylint: disable=C0200 + names[i] = "**" + self.re_escape.sub(r"\\\1", names[i]) + "**" + + max_len = max(max_len, len(names[i])) + + msg += "+-" + "-" * max_len + "-+\n" + for name in names: + msg += f"| {name}" + " " * (max_len - len(name)) + " |\n" + msg += "+-" + "-" * max_len + "-+\n" + msg += "\n" + + for ref in file_ref: + if wtype == "File": + msg += f".. _{ref[1]}:\n\n" + else: + base = os.path.basename(ref[0]) + msg += f"Defined on file :ref:`{base} <{ref[1]}>`\n\n" + + if wtype == "File": + msg += names[0] +"\n" + "-" * len(names[0]) +"\n\n" + + desc = v.get("description") + if not desc and wtype != "File": + msg += f"DESCRIPTION MISSING for {names[0]}\n\n" + + if desc: + if output_in_txt: + msg += self.desc_txt(desc) + else: + msg += self.desc_rst(desc) + + symbols = v.get("symbols") + if symbols: + msg += "Has the following ABI:\n\n" + + for w, label in symbols: + # Escape special chars from content + content = self.re_escape.sub(r"\\\1", w) + + msg += f"- :ref:`{content} <{label}>`\n\n" + + users = v.get("users") + if users and users.strip(" \t\n"): + users = users.strip("\n").replace('\n', '\n\t') + msg += f"Users:\n\t{users}\n\n" + + ln = v.get("line_no", 1) + + yield (msg, file_ref[0][0], ln) + + def check_issues(self): + """Warn about duplicated ABI entries""" + + for what, v in self.what_symbols.items(): + files = v.get("file") + if not files: + # Should never happen if the parser works properly + self.log.warning("%s doesn't have a file associated", what) + continue + + if len(files) == 1: + continue + + f = [] + for fname, lines in sorted(files.items()): + if not lines: + f.append(f"{fname}") + elif len(lines) == 1: + f.append(f"{fname}:{lines[0]}") + else: + m = fname + "lines " + m += ", ".join(str(x) for x in lines) + f.append(m) + + self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f)) + + def search_symbols(self, expr): + """ Searches for ABI symbols """ + + regex = re.compile(expr, re.I) + + found_keys = 0 + for t in sorted(self.data.items(), key=lambda x: [0]): + v = t[1] + + wtype = v.get("type", "") + if wtype == "File": + continue + + for what in v.get("what", [""]): + if regex.search(what): + found_keys += 1 + + kernelversion = v.get("kernelversion", "").strip(" \t\n") + date = v.get("date", "").strip(" \t\n") + contact = v.get("contact", "").strip(" \t\n") + users = v.get("users", "").strip(" \t\n") + desc = v.get("description", "").strip(" \t\n") + + files = [] + for f in v.get("file", ()): + files.append(f[0]) + + what = str(found_keys) + ". " + what + title_tag = "-" * len(what) + + print(f"\n{what}\n{title_tag}\n") + + if kernelversion: + print(f"Kernel version:\t\t{kernelversion}") + + if date: + print(f"Date:\t\t\t{date}") + + if contact: + print(f"Contact:\t\t{contact}") + + if users: + print(f"Users:\t\t\t{users}") + + print("Defined on file(s):\t" + ", ".join(files)) + + if desc: + desc = desc.strip("\n") + print(f"\n{desc}\n") + + if not found_keys: + print(f"Regular expression /{expr}/ not found.") diff --git a/scripts/lib/abi/abi_regex.py b/scripts/lib/abi/abi_regex.py new file mode 100644 index 000000000000..8a57846cbc69 --- /dev/null +++ b/scripts/lib/abi/abi_regex.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 +# xxpylint: disable=R0903 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# SPDX-License-Identifier: GPL-2.0 + +""" +Convert ABI what into regular expressions +""" + +import re +import sys + +from pprint import pformat + +from abi_parser import AbiParser +from helpers import AbiDebug + +class AbiRegex(AbiParser): + """Extends AbiParser to search ABI nodes with regular expressions""" + + # Escape only ASCII visible characters + escape_symbols = r"([\x21-\x29\x2b-\x2d\x3a-\x40\x5c\x60\x7b-\x7e])" + leave_others = "others" + + # Tuples with regular expressions to be compiled and replacement data + re_whats = [ + # Drop escape characters that might exist + (re.compile("\\\\"), ""), + + # Temporarily escape dot characters + (re.compile(r"\."), "\xf6"), + + # Temporarily change [0-9]+ type of patterns + (re.compile(r"\[0\-9\]\+"), "\xff"), + + # Temporarily change [\d+-\d+] type of patterns + (re.compile(r"\[0\-\d+\]"), "\xff"), + (re.compile(r"\[0:\d+\]"), "\xff"), + (re.compile(r"\[(\d+)\]"), "\xf4\\\\d+\xf5"), + + # Temporarily change [0-9] type of patterns + (re.compile(r"\[(\d)\-(\d)\]"), "\xf4\1-\2\xf5"), + + # Handle multiple option patterns + (re.compile(r"[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]"), r"(\1|\2)"), + + # Handle wildcards + (re.compile(r"([^\/])\*"), "\\1\\\\w\xf7"), + (re.compile(r"/\*/"), "/.*/"), + (re.compile(r"/\xf6\xf6\xf6"), "/.*"), + (re.compile(r"\<[^\>]+\>"), "\\\\w\xf7"), + (re.compile(r"\{[^\}]+\}"), "\\\\w\xf7"), + (re.compile(r"\[[^\]]+\]"), "\\\\w\xf7"), + + (re.compile(r"XX+"), "\\\\w\xf7"), + (re.compile(r"([^A-Z])[XYZ]([^A-Z])"), "\\1\\\\w\xf7\\2"), + (re.compile(r"([^A-Z])[XYZ]$"), "\\1\\\\w\xf7"), + (re.compile(r"_[AB]_"), "_\\\\w\xf7_"), + + # Recover [0-9] type of patterns + (re.compile(r"\xf4"), "["), + (re.compile(r"\xf5"), "]"), + + # Remove duplicated spaces + (re.compile(r"\s+"), r" "), + + # Special case: drop comparison as in: + # What: foo = <something> + # (this happens on a few IIO definitions) + (re.compile(r"\s*\=.*$"), ""), + + # Escape all other symbols + (re.compile(escape_symbols), r"\\\1"), + (re.compile(r"\\\\"), r"\\"), + (re.compile(r"\\([\[\]\(\)\|])"), r"\1"), + (re.compile(r"(\d+)\\(-\d+)"), r"\1\2"), + + (re.compile(r"\xff"), r"\\d+"), + + # Special case: IIO ABI which a parenthesis. + (re.compile(r"sqrt(.*)"), r"sqrt(.*)"), + + # Simplify regexes with multiple .* + (re.compile(r"(?:\.\*){2,}"), ""), + + # Recover dot characters + (re.compile(r"\xf6"), "\\."), + # Recover plus characters + (re.compile(r"\xf7"), "+"), + ] + re_has_num = re.compile(r"\\d") + + # Symbol name after escape_chars that are considered a devnode basename + re_symbol_name = re.compile(r"(\w|\\[\.\-\:])+$") + + # List of popular group names to be skipped to minimize regex group size + # Use AbiDebug.SUBGROUP_SIZE to detect those + skip_names = set(["devices", "hwmon"]) + + def regex_append(self, what, new): + """ + Get a search group for a subset of regular expressions. + + As ABI may have thousands of symbols, using a for to search all + regular expressions is at least O(n^2). When there are wildcards, + the complexity increases substantially, eventually becoming exponential. + + To avoid spending too much time on them, use a logic to split + them into groups. The smaller the group, the better, as it would + mean that searches will be confined to a small number of regular + expressions. + + The conversion to a regex subset is tricky, as we need something + that can be easily obtained from the sysfs symbol and from the + regular expression. So, we need to discard nodes that have + wildcards. + + If it can't obtain a subgroup, place the regular expression inside + a special group (self.leave_others). + """ + + search_group = None + + for search_group in reversed(new.split("/")): + if not search_group or search_group in self.skip_names: + continue + if self.re_symbol_name.match(search_group): + break + + if not search_group: + search_group = self.leave_others + + if self.debug & AbiDebug.SUBGROUP_MAP: + self.log.debug("%s: mapped as %s", what, search_group) + + try: + if search_group not in self.regex_group: + self.regex_group[search_group] = [] + + self.regex_group[search_group].append(re.compile(new)) + if self.search_string: + if what.find(self.search_string) >= 0: + print(f"What: {what}") + except re.PatternError: + self.log.warning("Ignoring '%s' as it produced an invalid regex:\n" + " '%s'", what, new) + + def get_regexes(self, what): + """ + Given an ABI devnode, return a list of all regular expressions that + may match it, based on the sub-groups created by regex_append() + """ + + re_list = [] + + patches = what.split("/") + patches.reverse() + patches.append(self.leave_others) + + for search_group in patches: + if search_group in self.regex_group: + re_list += self.regex_group[search_group] + + return re_list + + def __init__(self, *args, **kwargs): + """ + Override init method to get verbose argument + """ + + self.regex_group = None + self.search_string = None + self.re_string = None + + if "search_string" in kwargs: + self.search_string = kwargs.get("search_string") + del kwargs["search_string"] + + if self.search_string: + + try: + self.re_string = re.compile(self.search_string) + except re.PatternError as e: + msg = f"{self.search_string} is not a valid regular expression" + raise ValueError(msg) from e + + super().__init__(*args, **kwargs) + + def parse_abi(self, *args, **kwargs): + + super().parse_abi(*args, **kwargs) + + self.regex_group = {} + + print("Converting ABI What fields into regexes...", file=sys.stderr) + + for t in sorted(self.data.items(), key=lambda x: x[0]): + v = t[1] + if v.get("type") == "File": + continue + + v["regex"] = [] + + for what in v.get("what", []): + if not what.startswith("/sys"): + continue + + new = what + for r, s in self.re_whats: + try: + new = r.sub(s, new) + except re.PatternError as e: + # Help debugging troubles with new regexes + raise re.PatternError(f"{e}\nwhile re.sub('{r.pattern}', {s}, str)") from e + + v["regex"].append(new) + + if self.debug & AbiDebug.REGEX: + self.log.debug("%-90s <== %s", new, what) + + # Store regex into a subgroup to speedup searches + self.regex_append(what, new) + + if self.debug & AbiDebug.SUBGROUP_DICT: + self.log.debug("%s", pformat(self.regex_group)) + + if self.debug & AbiDebug.SUBGROUP_SIZE: + biggestd_keys = sorted(self.regex_group.keys(), + key= lambda k: len(self.regex_group[k]), + reverse=True) + + print("Top regex subgroups:", file=sys.stderr) + for k in biggestd_keys[:10]: + print(f"{k} has {len(self.regex_group[k])} elements", file=sys.stderr) diff --git a/scripts/lib/abi/helpers.py b/scripts/lib/abi/helpers.py new file mode 100644 index 000000000000..639b23e4ca33 --- /dev/null +++ b/scripts/lib/abi/helpers.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# pylint: disable=R0903 +# SPDX-License-Identifier: GPL-2.0 + +""" +Helper classes for ABI parser +""" + +ABI_DIR = "Documentation/ABI/" + + +class AbiDebug: + """Debug levels""" + + WHAT_PARSING = 1 + WHAT_OPEN = 2 + DUMP_ABI_STRUCTS = 4 + UNDEFINED = 8 + REGEX = 16 + SUBGROUP_MAP = 32 + SUBGROUP_DICT = 64 + SUBGROUP_SIZE = 128 + GRAPH = 256 + + +DEBUG_HELP = """ +1 - enable debug parsing logic +2 - enable debug messages on file open +4 - enable debug for ABI parse data +8 - enable extra debug information to identify troubles + with ABI symbols found at the local machine that + weren't found on ABI documentation (used only for + undefined subcommand) +16 - enable debug for what to regex conversion +32 - enable debug for symbol regex subgroups +64 - enable debug for sysfs graph tree variable +""" diff --git a/scripts/lib/abi/system_symbols.py b/scripts/lib/abi/system_symbols.py new file mode 100644 index 000000000000..f15c94a6e33c --- /dev/null +++ b/scripts/lib/abi/system_symbols.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python3 +# pylint: disable=R0902,R0912,R0914,R0915,R1702 +# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>. +# SPDX-License-Identifier: GPL-2.0 + +""" +Parse ABI documentation and produce results from it. +""" + +import os +import re +import sys + +from concurrent import futures +from datetime import datetime +from random import shuffle + +from helpers import AbiDebug + +class SystemSymbols: + """Stores arguments for the class and initialize class vars""" + + def graph_add_file(self, path, link=None): + """ + add a file path to the sysfs graph stored at self.root + """ + + if path in self.files: + return + + name = "" + ref = self.root + for edge in path.split("/"): + name += edge + "/" + if edge not in ref: + ref[edge] = {"__name": [name.rstrip("/")]} + + ref = ref[edge] + + if link and link not in ref["__name"]: + ref["__name"].append(link.rstrip("/")) + + self.files.add(path) + + def print_graph(self, root_prefix="", root=None, level=0): + """Prints a reference tree graph using UTF-8 characters""" + + if not root: + root = self.root + level = 0 + + # Prevent endless traverse + if level > 5: + return + + if level > 0: + prefix = "├──" + last_prefix = "└──" + else: + prefix = "" + last_prefix = "" + + items = list(root.items()) + + names = root.get("__name", []) + for k, edge in items: + if k == "__name": + continue + + if not k: + k = "/" + + if len(names) > 1: + k += " links: " + ",".join(names[1:]) + + if edge == items[-1][1]: + print(root_prefix + last_prefix + k) + p = root_prefix + if level > 0: + p += " " + self.print_graph(p, edge, level + 1) + else: + print(root_prefix + prefix + k) + p = root_prefix + "│ " + self.print_graph(p, edge, level + 1) + + def _walk(self, root): + """ + Walk through sysfs to get all devnodes that aren't ignored. + + By default, uses /sys as sysfs mounting point. If another + directory is used, it replaces them to /sys at the patches. + """ + + with os.scandir(root) as obj: + for entry in obj: + path = os.path.join(root, entry.name) + if self.sysfs: + p = path.replace(self.sysfs, "/sys", count=1) + else: + p = path + + if self.re_ignore.search(p): + return + + # Handle link first to avoid directory recursion + if entry.is_symlink(): + real = os.path.realpath(path) + if not self.sysfs: + self.aliases[path] = real + else: + real = real.replace(self.sysfs, "/sys", count=1) + + # Add absfile location to graph if it doesn't exist + if not self.re_ignore.search(real): + # Add link to the graph + self.graph_add_file(real, p) + + elif entry.is_file(): + self.graph_add_file(p) + + elif entry.is_dir(): + self._walk(path) + + def __init__(self, abi, sysfs="/sys", hints=False): + """ + Initialize internal variables and get a list of all files inside + sysfs that can currently be parsed. + + Please notice that there are several entries on sysfs that aren't + documented as ABI. Ignore those. + + The real paths will be stored under self.files. Aliases will be + stored in separate, as self.aliases. + """ + + self.abi = abi + self.log = abi.log + + if sysfs != "/sys": + self.sysfs = sysfs.rstrip("/") + else: + self.sysfs = None + + self.hints = hints + + self.root = {} + self.aliases = {} + self.files = set() + + dont_walk = [ + # Those require root access and aren't documented at ABI + f"^{sysfs}/kernel/debug", + f"^{sysfs}/kernel/tracing", + f"^{sysfs}/fs/pstore", + f"^{sysfs}/fs/bpf", + f"^{sysfs}/fs/fuse", + + # This is not documented at ABI + f"^{sysfs}/module", + + f"^{sysfs}/fs/cgroup", # this is big and has zero docs under ABI + f"^{sysfs}/firmware", # documented elsewhere: ACPI, DT bindings + "sections|notes", # aren't actually part of ABI + + # kernel-parameters.txt - not easy to parse + "parameters", + ] + + self.re_ignore = re.compile("|".join(dont_walk)) + + print(f"Reading {sysfs} directory contents...", file=sys.stderr) + self._walk(sysfs) + + def check_file(self, refs, found): + """Check missing ABI symbols for a given sysfs file""" + + res_list = [] + + try: + for names in refs: + fname = names[0] + + res = { + "found": False, + "fname": fname, + "msg": "", + } + res_list.append(res) + + re_what = self.abi.get_regexes(fname) + if not re_what: + self.abi.log.warning(f"missing rules for {fname}") + continue + + for name in names: + for r in re_what: + if self.abi.debug & AbiDebug.UNDEFINED: + self.log.debug("check if %s matches '%s'", name, r.pattern) + if r.match(name): + res["found"] = True + if found: + res["msg"] += f" {fname}: regex:\n\t" + continue + + if self.hints and not res["found"]: + res["msg"] += f" {fname} not found. Tested regexes:\n" + for r in re_what: + res["msg"] += " " + r.pattern + "\n" + + except KeyboardInterrupt: + pass + + return res_list + + def _ref_interactor(self, root): + """Recursive function to interact over the sysfs tree""" + + for k, v in root.items(): + if isinstance(v, dict): + yield from self._ref_interactor(v) + + if root == self.root or k == "__name": + continue + + if self.abi.re_string: + fname = v["__name"][0] + if self.abi.re_string.search(fname): + yield v + else: + yield v + + + def get_fileref(self, all_refs, chunk_size): + """Interactor to group refs into chunks""" + + n = 0 + refs = [] + + for ref in all_refs: + refs.append(ref) + + n += 1 + if n >= chunk_size: + yield refs + n = 0 + refs = [] + + yield refs + + def check_undefined_symbols(self, max_workers=None, chunk_size=50, + found=None, dry_run=None): + """Seach ABI for sysfs symbols missing documentation""" + + self.abi.parse_abi() + + if self.abi.debug & AbiDebug.GRAPH: + self.print_graph() + + all_refs = [] + for ref in self._ref_interactor(self.root): + all_refs.append(ref["__name"]) + + if dry_run: + print("Would check", file=sys.stderr) + for ref in all_refs: + print(", ".join(ref)) + + return + + print("Starting to search symbols (it may take several minutes):", + file=sys.stderr) + start = datetime.now() + old_elapsed = None + + # Python doesn't support multithreading due to limitations on its + # global lock (GIL). While Python 3.13 finally made GIL optional, + # there are still issues related to it. Also, we want to have + # backward compatibility with older versions of Python. + # + # So, use instead multiprocess. However, Python is very slow passing + # data from/to multiple processes. Also, it may consume lots of memory + # if the data to be shared is not small. So, we need to group workload + # in chunks that are big enough to generate performance gains while + # not being so big that would cause out-of-memory. + + num_refs = len(all_refs) + print(f"Number of references to parse: {num_refs}", file=sys.stderr) + + if not max_workers: + max_workers = os.cpu_count() + elif max_workers > os.cpu_count(): + max_workers = os.cpu_count() + + max_workers = max(max_workers, 1) + + max_chunk_size = int((num_refs + max_workers - 1) / max_workers) + chunk_size = min(chunk_size, max_chunk_size) + chunk_size = max(1, chunk_size) + + if max_workers > 1: + executor = futures.ProcessPoolExecutor + + # Place references in a random order. This may help improving + # performance, by mixing complex/simple expressions when creating + # chunks + shuffle(all_refs) + else: + # Python has a high overhead with processes. When there's just + # one worker, it is faster to not create a new process. + # Yet, User still deserves to have a progress print. So, use + # python's "thread", which is actually a single process, using + # an internal schedule to switch between tasks. No performance + # gains for non-IO tasks, but still it can be quickly interrupted + # from time to time to display progress. + executor = futures.ThreadPoolExecutor + + not_found = [] + f_list = [] + with executor(max_workers=max_workers) as exe: + for refs in self.get_fileref(all_refs, chunk_size): + if refs: + try: + f_list.append(exe.submit(self.check_file, refs, found)) + + except KeyboardInterrupt: + return + + total = len(f_list) + + if not total: + if self.abi.re_string: + print(f"No ABI symbol matches {self.abi.search_string}") + else: + self.abi.log.warning("No ABI symbols found") + return + + print(f"{len(f_list):6d} jobs queued on {max_workers} workers", + file=sys.stderr) + + while f_list: + try: + t = futures.wait(f_list, timeout=1, + return_when=futures.FIRST_COMPLETED) + + done = t[0] + + for fut in done: + res_list = fut.result() + + for res in res_list: + if not res["found"]: + not_found.append(res["fname"]) + if res["msg"]: + print(res["msg"]) + + f_list.remove(fut) + except KeyboardInterrupt: + return + + except RuntimeError as e: + self.abi.log.warning(f"Future: {e}") + break + + if sys.stderr.isatty(): + elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0] + if len(f_list) < total: + elapsed += f" ({total - len(f_list)}/{total} jobs completed). " + if elapsed != old_elapsed: + print(elapsed + "\r", end="", flush=True, + file=sys.stderr) + old_elapsed = elapsed + + elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0] + print(elapsed, file=sys.stderr) + + for f in sorted(not_found): + print(f"{f} not found.") |