summaryrefslogtreecommitdiff
path: root/tools/lib/python
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/python')
-rw-r--r--tools/lib/python/abi/abi_parser.py33
-rw-r--r--tools/lib/python/abi/abi_regex.py26
-rw-r--r--tools/lib/python/abi/helpers.py42
-rw-r--r--tools/lib/python/abi/system_symbols.py14
-rwxr-xr-xtools/lib/python/feat/parse_features.py27
-rwxr-xr-xtools/lib/python/jobserver.py158
-rw-r--r--tools/lib/python/kdoc/enrich_formatter.py20
-rw-r--r--tools/lib/python/kdoc/kdoc_files.py23
-rw-r--r--tools/lib/python/kdoc/kdoc_item.py18
-rw-r--r--tools/lib/python/kdoc/kdoc_output.py104
-rw-r--r--tools/lib/python/kdoc/kdoc_parser.py287
-rw-r--r--tools/lib/python/kdoc/kdoc_re.py28
-rwxr-xr-xtools/lib/python/kdoc/latex_fonts.py95
-rwxr-xr-xtools/lib/python/kdoc/parse_data_structs.py62
-rw-r--r--tools/lib/python/kdoc/python_version.py20
15 files changed, 643 insertions, 314 deletions
diff --git a/tools/lib/python/abi/abi_parser.py b/tools/lib/python/abi/abi_parser.py
index 9b8db70067ef..d7bb20ef3acc 100644
--- a/tools/lib/python/abi/abi_parser.py
+++ b/tools/lib/python/abi/abi_parser.py
@@ -21,14 +21,17 @@ from abi.helpers import AbiDebug, ABI_DIR
class AbiParser:
- """Main class to parse ABI files"""
+ """Main class to parse ABI files."""
+ #: Valid tags at Documentation/ABI.
TAGS = r"(what|where|date|kernelversion|contact|description|users)"
+
+ #: ABI elements that will auto-generate cross-references.
XREF = r"(?:^|\s|\()(\/(?:sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)(?:[,.:;\)\s]|\Z)"
def __init__(self, directory, logger=None,
enable_lineno=False, show_warnings=True, debug=0):
- """Stores arguments for the class and initialize class vars"""
+ """Stores arguments for the class and initialize class vars."""
self.directory = directory
self.enable_lineno = enable_lineno
@@ -65,7 +68,7 @@ class AbiParser:
self.re_xref_node = re.compile(self.XREF)
def warn(self, fdata, msg, extra=None):
- """Displays a parse error if warning is enabled"""
+ """Displays a parse error if warning is enabled."""
if not self.show_warnings:
return
@@ -77,7 +80,7 @@ class AbiParser:
self.log.warning(msg)
def add_symbol(self, what, fname, ln=None, xref=None):
- """Create a reference table describing where each 'what' is located"""
+ """Create a reference table describing where each 'what' is located."""
if what not in self.what_symbols:
self.what_symbols[what] = {"file": {}}
@@ -92,7 +95,7 @@ class AbiParser:
self.what_symbols[what]["xref"] = xref
def _parse_line(self, fdata, line):
- """Parse a single line of an ABI file"""
+ """Parse a single line of an ABI file."""
new_what = False
new_tag = False
@@ -264,7 +267,7 @@ class AbiParser:
self.warn(fdata, "Unexpected content", line)
def parse_readme(self, nametag, fname):
- """Parse ABI README file"""
+ """Parse ABI README file."""
nametag["what"] = ["Introduction"]
nametag["path"] = "README"
@@ -282,7 +285,7 @@ class AbiParser:
nametag["description"] += line
def parse_file(self, fname, path, basename):
- """Parse a single file"""
+ """Parse a single file."""
ref = f"abi_file_{path}_{basename}"
ref = self.re_unprintable.sub("_", ref).strip("_")
@@ -348,7 +351,7 @@ class AbiParser:
self.add_symbol(what=w, fname=fname, xref=fdata.key)
def _parse_abi(self, root=None):
- """Internal function to parse documentation ABI recursively"""
+ """Internal function to parse documentation ABI recursively."""
if not root:
root = self.directory
@@ -377,7 +380,7 @@ class AbiParser:
self.parse_file(name, path, basename)
def parse_abi(self, root=None):
- """Parse documentation ABI"""
+ """Parse documentation ABI."""
self._parse_abi(root)
@@ -385,7 +388,7 @@ class AbiParser:
self.log.debug(pformat(self.data))
def desc_txt(self, desc):
- """Print description as found inside ABI files"""
+ """Print description as found inside ABI files."""
desc = desc.strip(" \t\n")
@@ -393,7 +396,7 @@ class AbiParser:
def xref(self, fname):
"""
- Converts a Documentation/ABI + basename into a ReST cross-reference
+ Converts a Documentation/ABI + basename into a ReST cross-reference.
"""
xref = self.file_refs.get(fname)
@@ -403,7 +406,7 @@ class AbiParser:
return xref
def desc_rst(self, desc):
- """Enrich ReST output by creating cross-references"""
+ """Enrich ReST output by creating cross-references."""
# Remove title markups from the description
# Having titles inside ABI files will only work if extra
@@ -459,7 +462,7 @@ class AbiParser:
def doc(self, output_in_txt=False, show_symbols=True, show_file=True,
filter_path=None):
- """Print ABI at stdout"""
+ """Print ABI at stdout."""
part = None
for key, v in sorted(self.data.items(),
@@ -549,7 +552,7 @@ class AbiParser:
yield (msg, file_ref[0][0], ln)
def check_issues(self):
- """Warn about duplicated ABI entries"""
+ """Warn about duplicated ABI entries."""
for what, v in self.what_symbols.items():
files = v.get("file")
@@ -575,7 +578,7 @@ class AbiParser:
self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f))
def search_symbols(self, expr):
- """ Searches for ABI symbols """
+ """ Searches for ABI symbols."""
regex = re.compile(expr, re.I)
diff --git a/tools/lib/python/abi/abi_regex.py b/tools/lib/python/abi/abi_regex.py
index d5553206de3c..d0c5e3ede6b5 100644
--- a/tools/lib/python/abi/abi_regex.py
+++ b/tools/lib/python/abi/abi_regex.py
@@ -16,10 +16,22 @@ from abi.abi_parser import AbiParser
from abi.helpers import AbiDebug
class AbiRegex(AbiParser):
- """Extends AbiParser to search ABI nodes with regular expressions"""
+ """
+ Extends AbiParser to search ABI nodes with regular expressions.
- # Escape only ASCII visible characters
+ There some optimizations here to allow a quick symbol search:
+ instead of trying to place all symbols altogether an doing linear
+ search which is very time consuming, create a tree with one depth,
+ grouping similar symbols altogether.
+
+ Yet, sometimes a full search will be needed, so we have a special branch
+ on such group tree where other symbols are placed.
+ """
+
+ #: Escape only ASCII visible characters.
escape_symbols = r"([\x21-\x29\x2b-\x2d\x3a-\x40\x5c\x60\x7b-\x7e])"
+
+ #: Special group for other nodes.
leave_others = "others"
# Tuples with regular expressions to be compiled and replacement data
@@ -88,13 +100,15 @@ class AbiRegex(AbiParser):
# Recover plus characters
(re.compile(r"\xf7"), "+"),
]
+
+ #: Regex to check if the symbol name has a number on it.
re_has_num = re.compile(r"\\d")
- # Symbol name after escape_chars that are considered a devnode basename
+ #: Symbol name after escape_chars that are considered a devnode basename.
re_symbol_name = re.compile(r"(\w|\\[\.\-\:])+$")
- # List of popular group names to be skipped to minimize regex group size
- # Use AbiDebug.SUBGROUP_SIZE to detect those
+ #: List of popular group names to be skipped to minimize regex group size
+ #: Use AbiDebug.SUBGROUP_SIZE to detect those.
skip_names = set(["devices", "hwmon"])
def regex_append(self, what, new):
@@ -148,7 +162,7 @@ class AbiRegex(AbiParser):
def get_regexes(self, what):
"""
Given an ABI devnode, return a list of all regular expressions that
- may match it, based on the sub-groups created by regex_append()
+ may match it, based on the sub-groups created by regex_append().
"""
re_list = []
diff --git a/tools/lib/python/abi/helpers.py b/tools/lib/python/abi/helpers.py
index 639b23e4ca33..2a378d780d3c 100644
--- a/tools/lib/python/abi/helpers.py
+++ b/tools/lib/python/abi/helpers.py
@@ -13,26 +13,28 @@ ABI_DIR = "Documentation/ABI/"
class AbiDebug:
"""Debug levels"""
- WHAT_PARSING = 1
- WHAT_OPEN = 2
- DUMP_ABI_STRUCTS = 4
- UNDEFINED = 8
- REGEX = 16
- SUBGROUP_MAP = 32
- SUBGROUP_DICT = 64
- SUBGROUP_SIZE = 128
- GRAPH = 256
-
+ WHAT_PARSING = 1 #: Enable debug parsing logic.
+ WHAT_OPEN = 2 #: Enable debug messages on file open.
+ DUMP_ABI_STRUCTS = 4 #: Enable debug for ABI parse data.
+ UNDEFINED = 8 #: Enable extra undefined symbol data.
+ REGEX = 16 #: Enable debug for what to regex conversion.
+ SUBGROUP_MAP = 32 #: Enable debug for symbol regex subgroups
+ SUBGROUP_DICT = 64 #: Enable debug for sysfs graph tree variable.
+ SUBGROUP_SIZE = 128 #: Enable debug of search groups.
+ GRAPH = 256 #: Display ref tree graph for undefined symbols.
+#: Helper messages for each debug variable
DEBUG_HELP = """
-1 - enable debug parsing logic
-2 - enable debug messages on file open
-4 - enable debug for ABI parse data
-8 - enable extra debug information to identify troubles
- with ABI symbols found at the local machine that
- weren't found on ABI documentation (used only for
- undefined subcommand)
-16 - enable debug for what to regex conversion
-32 - enable debug for symbol regex subgroups
-64 - enable debug for sysfs graph tree variable
+1 - enable debug parsing logic
+2 - enable debug messages on file open
+4 - enable debug for ABI parse data
+8 - enable extra debug information to identify troubles
+ with ABI symbols found at the local machine that
+ weren't found on ABI documentation (used only for
+ undefined subcommand)
+16 - enable debug for what to regex conversion
+32 - enable debug for symbol regex subgroups
+64 - enable debug for sysfs graph tree variable
+128 - enable debug of search groups
+256 - enable displaying refrence tree graphs for undefined symbols.
"""
diff --git a/tools/lib/python/abi/system_symbols.py b/tools/lib/python/abi/system_symbols.py
index 4a2554da217b..7bbefd274ea2 100644
--- a/tools/lib/python/abi/system_symbols.py
+++ b/tools/lib/python/abi/system_symbols.py
@@ -18,11 +18,11 @@ from random import shuffle
from abi.helpers import AbiDebug
class SystemSymbols:
- """Stores arguments for the class and initialize class vars"""
+ """Stores arguments for the class and initialize class vars."""
def graph_add_file(self, path, link=None):
"""
- add a file path to the sysfs graph stored at self.root
+ add a file path to the sysfs graph stored at self.root.
"""
if path in self.files:
@@ -43,7 +43,7 @@ class SystemSymbols:
self.files.add(path)
def print_graph(self, root_prefix="", root=None, level=0):
- """Prints a reference tree graph using UTF-8 characters"""
+ """Prints a reference tree graph using UTF-8 characters."""
if not root:
root = self.root
@@ -173,7 +173,7 @@ class SystemSymbols:
self._walk(sysfs)
def check_file(self, refs, found):
- """Check missing ABI symbols for a given sysfs file"""
+ """Check missing ABI symbols for a given sysfs file."""
res_list = []
@@ -214,7 +214,7 @@ class SystemSymbols:
return res_list
def _ref_interactor(self, root):
- """Recursive function to interact over the sysfs tree"""
+ """Recursive function to interact over the sysfs tree."""
for k, v in root.items():
if isinstance(v, dict):
@@ -232,7 +232,7 @@ class SystemSymbols:
def get_fileref(self, all_refs, chunk_size):
- """Interactor to group refs into chunks"""
+ """Interactor to group refs into chunks."""
n = 0
refs = []
@@ -250,7 +250,7 @@ class SystemSymbols:
def check_undefined_symbols(self, max_workers=None, chunk_size=50,
found=None, dry_run=None):
- """Seach ABI for sysfs symbols missing documentation"""
+ """Seach ABI for sysfs symbols missing documentation."""
self.abi.parse_abi()
diff --git a/tools/lib/python/feat/parse_features.py b/tools/lib/python/feat/parse_features.py
index b88c04d3e2fe..41a51d9d6f62 100755
--- a/tools/lib/python/feat/parse_features.py
+++ b/tools/lib/python/feat/parse_features.py
@@ -21,14 +21,25 @@ class ParseFeature:
from it.
"""
+ #: feature header string.
h_name = "Feature"
+
+ #: Kernel config header string.
h_kconfig = "Kconfig"
+
+ #: description header string.
h_description = "Description"
+
+ #: subsystem header string.
h_subsys = "Subsystem"
+
+ #: status header string.
h_status = "Status"
+
+ #: architecture header string.
h_arch = "Architecture"
- # Sort order for status. Others will be mapped at the end.
+ #: Sort order for status. Others will be mapped at the end.
status_map = {
"ok": 0,
"TODO": 1,
@@ -40,7 +51,7 @@ class ParseFeature:
def __init__(self, prefix, debug=0, enable_fname=False):
"""
- Sets internal variables
+ Sets internal variables.
"""
self.prefix = prefix
@@ -63,11 +74,13 @@ class ParseFeature:
self.msg = ""
def emit(self, msg="", end="\n"):
+ """Helper function to append a new message for feature output."""
+
self.msg += msg + end
def parse_error(self, fname, ln, msg, data=None):
"""
- Displays an error message, printing file name and line
+ Displays an error message, printing file name and line.
"""
if ln:
@@ -82,7 +95,7 @@ class ParseFeature:
print("", file=sys.stderr)
def parse_feat_file(self, fname):
- """Parses a single arch-support.txt feature file"""
+ """Parses a single arch-support.txt feature file."""
if os.path.isdir(fname):
return
@@ -204,7 +217,7 @@ class ParseFeature:
self.max_size_arch_with_header = self.max_size_arch + len(self.h_arch)
def parse(self):
- """Parses all arch-support.txt feature files inside self.prefix"""
+ """Parses all arch-support.txt feature files inside self.prefix."""
path = os.path.expanduser(self.prefix)
@@ -281,7 +294,7 @@ class ParseFeature:
def output_feature(self, feat):
"""
- Output a feature on all architectures
+ Output a feature on all architectures.
"""
title = f"Feature {feat}"
@@ -331,7 +344,7 @@ class ParseFeature:
def matrix_lines(self, desc_size, max_size_status, header):
"""
- Helper function to split element tables at the output matrix
+ Helper function to split element tables at the output matrix.
"""
if header:
diff --git a/tools/lib/python/jobserver.py b/tools/lib/python/jobserver.py
index a24f30ef4fa8..aba22c33393d 100755
--- a/tools/lib/python/jobserver.py
+++ b/tools/lib/python/jobserver.py
@@ -11,20 +11,23 @@ Interacts with the POSIX jobserver during the Kernel build time.
A "normal" jobserver task, like the one initiated by a make subrocess would do:
- open read/write file descriptors to communicate with the job server;
- - ask for one slot by calling:
+ - ask for one slot by calling::
+
claim = os.read(reader, 1)
- - when the job finshes, call:
+
+ - when the job finshes, call::
+
os.write(writer, b"+") # os.write(writer, claim)
Here, the goal is different: This script aims to get the remaining number
of slots available, using all of them to run a command which handle tasks in
parallel. To to that, it has a loop that ends only after there are no
slots left. It then increments the number by one, in order to allow a
-call equivalent to make -j$((claim+1)), e.g. having a parent make creating
+call equivalent to ``make -j$((claim+1))``, e.g. having a parent make creating
$claim child to do the actual work.
The end goal here is to keep the total number of build tasks under the
-limit established by the initial make -j$n_proc call.
+limit established by the initial ``make -j$n_proc`` call.
See:
https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html#POSIX-Jobserver
@@ -35,18 +38,22 @@ import os
import subprocess
import sys
+def warn(text, *args):
+ print(f'WARNING: {text}', *args, file = sys.stderr)
+
class JobserverExec:
"""
Claim all slots from make using POSIX Jobserver.
The main methods here are:
+
- open(): reserves all slots;
- close(): method returns all used slots back to make;
- - run(): executes a command setting PARALLELISM=<available slots jobs + 1>
+ - run(): executes a command setting PARALLELISM=<available slots jobs + 1>.
"""
def __init__(self):
- """Initialize internal vars"""
+ """Initialize internal vars."""
self.claim = 0
self.jobs = b""
self.reader = None
@@ -54,66 +61,105 @@ class JobserverExec:
self.is_open = False
def open(self):
- """Reserve all available slots to be claimed later on"""
+ """Reserve all available slots to be claimed later on."""
if self.is_open:
return
-
- try:
- # Fetch the make environment options.
- flags = os.environ["MAKEFLAGS"]
- # Look for "--jobserver=R,W"
- # Note that GNU Make has used --jobserver-fds and --jobserver-auth
- # so this handles all of them.
- opts = [x for x in flags.split(" ") if x.startswith("--jobserver")]
-
- # Parse out R,W file descriptor numbers and set them nonblocking.
- # If the MAKEFLAGS variable contains multiple instances of the
- # --jobserver-auth= option, the last one is relevant.
- fds = opts[-1].split("=", 1)[1]
-
- # Starting with GNU Make 4.4, named pipes are used for reader
- # and writer.
- # Example argument: --jobserver-auth=fifo:/tmp/GMfifo8134
- _, _, path = fds.partition("fifo:")
-
- if path:
+ self.is_open = True # We only try once
+ self.claim = None
+ #
+ # Check the make flags for "--jobserver=R,W"
+ # Note that GNU Make has used --jobserver-fds and --jobserver-auth
+ # so this handles all of them.
+ #
+ flags = os.environ.get('MAKEFLAGS', '')
+ opts = [x for x in flags.split(" ") if x.startswith("--jobserver")]
+ if not opts:
+ return
+ #
+ # Separate out the provided file descriptors
+ #
+ split_opt = opts[-1].split('=', 1)
+ if len(split_opt) != 2:
+ warn('unparseable option:', opts[-1])
+ return
+ fds = split_opt[1]
+ #
+ # As of GNU Make 4.4, we'll be looking for a named pipe
+ # identified as fifo:path
+ #
+ if fds.startswith('fifo:'):
+ path = fds[len('fifo:'):]
+ try:
self.reader = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
self.writer = os.open(path, os.O_WRONLY)
- else:
- self.reader, self.writer = [int(x) for x in fds.split(",", 1)]
+ except (OSError, IOError):
+ warn('unable to open jobserver pipe', path)
+ return
+ #
+ # Otherwise look for integer file-descriptor numbers.
+ #
+ else:
+ split_fds = fds.split(',')
+ if len(split_fds) != 2:
+ warn('malformed jobserver file descriptors:', fds)
+ return
+ try:
+ self.reader = int(split_fds[0])
+ self.writer = int(split_fds[1])
+ except ValueError:
+ warn('non-integer jobserver file-descriptors:', fds)
+ return
+ try:
+ #
# Open a private copy of reader to avoid setting nonblocking
# on an unexpecting process with the same reader fd.
- self.reader = os.open("/proc/self/fd/%d" % (self.reader),
+ #
+ self.reader = os.open(f"/proc/self/fd/{self.reader}",
os.O_RDONLY | os.O_NONBLOCK)
-
- # Read out as many jobserver slots as possible
- while True:
- try:
- slot = os.read(self.reader, 8)
- self.jobs += slot
- except (OSError, IOError) as e:
- if e.errno == errno.EWOULDBLOCK:
- # Stop at the end of the jobserver queue.
- break
- # If something went wrong, give back the jobs.
- if self.jobs:
- os.write(self.writer, self.jobs)
- raise e
-
- # Add a bump for our caller's reserveration, since we're just going
- # to sit here blocked on our child.
- self.claim = len(self.jobs) + 1
-
- except (KeyError, IndexError, ValueError, OSError, IOError):
- # Any missing environment strings or bad fds should result in just
- # not being parallel.
- self.claim = None
-
- self.is_open = True
+ except (IOError, OSError) as e:
+ warn('Unable to reopen jobserver read-side pipe:', repr(e))
+ return
+ #
+ # OK, we have the channel to the job server; read out as many jobserver
+ # slots as possible.
+ #
+ while True:
+ try:
+ slot = os.read(self.reader, 8)
+ if not slot:
+ #
+ # Something went wrong. Clear self.jobs to avoid writing
+ # weirdness back to the jobserver and give up.
+ self.jobs = b""
+ warn("unexpected empty token from jobserver;"
+ " possible invalid '--jobserver-auth=' setting")
+ self.claim = None
+ return
+ except (OSError, IOError) as e:
+ #
+ # If there is nothing more to read then we are done.
+ #
+ if e.errno == errno.EWOULDBLOCK:
+ break
+ #
+ # Anything else says that something went weird; give back
+ # the jobs and give up.
+ #
+ if self.jobs:
+ os.write(self.writer, self.jobs)
+ self.claim = None
+ warn('error reading from jobserver pipe', repr(e))
+ return
+ self.jobs += slot
+ #
+ # Add a bump for our caller's reserveration, since we're just going
+ # to sit here blocked on our child.
+ #
+ self.claim = len(self.jobs) + 1
def close(self):
- """Return all reserved slots to Jobserver"""
+ """Return all reserved slots to Jobserver."""
if not self.is_open:
return
diff --git a/tools/lib/python/kdoc/enrich_formatter.py b/tools/lib/python/kdoc/enrich_formatter.py
index bb171567a4ca..d1be4e5e1962 100644
--- a/tools/lib/python/kdoc/enrich_formatter.py
+++ b/tools/lib/python/kdoc/enrich_formatter.py
@@ -26,12 +26,16 @@ class EnrichFormatter(argparse.HelpFormatter):
and how they're used at the __doc__ description.
"""
def __init__(self, *args, **kwargs):
- """Initialize class and check if is TTY"""
+ """
+ Initialize class and check if is TTY.
+ """
super().__init__(*args, **kwargs)
self._tty = sys.stdout.isatty()
def enrich_text(self, text):
- """Handle ReST markups (currently, only ``foo``)"""
+ r"""
+ Handle ReST markups (currently, only \`\`text\`\` markups).
+ """
if self._tty and text:
# Replace ``text`` with ANSI SGR (bold)
return re.sub(r'\`\`(.+?)\`\`',
@@ -39,12 +43,16 @@ class EnrichFormatter(argparse.HelpFormatter):
return text
def _fill_text(self, text, width, indent):
- """Enrich descriptions with markups on it"""
+ """
+ Enrich descriptions with markups on it.
+ """
enriched = self.enrich_text(text)
return "\n".join(indent + line for line in enriched.splitlines())
def _format_usage(self, usage, actions, groups, prefix):
- """Enrich positional arguments at usage: line"""
+ """
+ Enrich positional arguments at usage: line.
+ """
prog = self._prog
parts = []
@@ -63,7 +71,9 @@ class EnrichFormatter(argparse.HelpFormatter):
return usage_text
def _format_action_invocation(self, action):
- """Enrich argument names"""
+ """
+ Enrich argument names.
+ """
if not action.option_strings:
return self.enrich_text(f"``{action.dest.upper()}``")
diff --git a/tools/lib/python/kdoc/kdoc_files.py b/tools/lib/python/kdoc/kdoc_files.py
index bfe02baf1606..022487ea2cc6 100644
--- a/tools/lib/python/kdoc/kdoc_files.py
+++ b/tools/lib/python/kdoc/kdoc_files.py
@@ -5,7 +5,8 @@
# pylint: disable=R0903,R0913,R0914,R0917
"""
-Parse lernel-doc tags on multiple kernel source files.
+Classes for navigating through the files that kernel-doc needs to handle
+to generate documentation.
"""
import argparse
@@ -43,7 +44,7 @@ class GlobSourceFiles:
self.srctree = srctree
def _parse_dir(self, dirname):
- """Internal function to parse files recursively"""
+ """Internal function to parse files recursively."""
with os.scandir(dirname) as obj:
for entry in obj:
@@ -65,7 +66,7 @@ class GlobSourceFiles:
def parse_files(self, file_list, file_not_found_cb):
"""
Define an iterator to parse all source files from file_list,
- handling directories if any
+ handling directories if any.
"""
if not file_list:
@@ -91,18 +92,18 @@ class KernelFiles():
There are two type of parsers defined here:
- self.parse_file(): parses both kernel-doc markups and
- EXPORT_SYMBOL* macros;
- - self.process_export_file(): parses only EXPORT_SYMBOL* macros.
+ ``EXPORT_SYMBOL*`` macros;
+ - self.process_export_file(): parses only ``EXPORT_SYMBOL*`` macros.
"""
def warning(self, msg):
- """Ancillary routine to output a warning and increment error count"""
+ """Ancillary routine to output a warning and increment error count."""
self.config.log.warning(msg)
self.errors += 1
def error(self, msg):
- """Ancillary routine to output an error and increment error count"""
+ """Ancillary routine to output an error and increment error count."""
self.config.log.error(msg)
self.errors += 1
@@ -128,7 +129,7 @@ class KernelFiles():
def process_export_file(self, fname):
"""
- Parses EXPORT_SYMBOL* macros from a single Kernel source file.
+ Parses ``EXPORT_SYMBOL*`` macros from a single Kernel source file.
"""
# Prevent parsing the same file twice if results are cached
@@ -157,7 +158,7 @@ class KernelFiles():
wcontents_before_sections=False,
logger=None):
"""
- Initialize startup variables and parse all files
+ Initialize startup variables and parse all files.
"""
if not verbose:
@@ -213,7 +214,7 @@ class KernelFiles():
def parse(self, file_list, export_file=None):
"""
- Parse all files
+ Parse all files.
"""
glob = GlobSourceFiles(srctree=self.config.src_tree)
@@ -242,7 +243,7 @@ class KernelFiles():
filenames=None, export_file=None):
"""
Interacts over the kernel-doc results and output messages,
- returning kernel-doc markups on each interaction
+ returning kernel-doc markups on each interaction.
"""
self.out_style.set_config(self.config)
diff --git a/tools/lib/python/kdoc/kdoc_item.py b/tools/lib/python/kdoc/kdoc_item.py
index 19805301cb2c..2b8a93f79716 100644
--- a/tools/lib/python/kdoc/kdoc_item.py
+++ b/tools/lib/python/kdoc/kdoc_item.py
@@ -4,7 +4,16 @@
# then pass into the output modules.
#
+"""
+Data class to store a kernel-doc Item.
+"""
+
class KdocItem:
+ """
+ A class that will, eventually, encapsulate all of the parsed data that we
+ then pass into the output modules.
+ """
+
def __init__(self, name, fname, type, start_line, **other_stuff):
self.name = name
self.fname = fname
@@ -24,6 +33,9 @@ class KdocItem:
self.other_stuff = other_stuff
def get(self, key, default = None):
+ """
+ Get a value from optional keys.
+ """
return self.other_stuff.get(key, default)
def __getitem__(self, key):
@@ -33,10 +45,16 @@ class KdocItem:
# Tracking of section and parameter information.
#
def set_sections(self, sections, start_lines):
+ """
+ Set sections and start lines.
+ """
self.sections = sections
self.section_start_lines = start_lines
def set_params(self, names, descs, types, starts):
+ """
+ Set parameter list: names, descriptions, types and start lines.
+ """
self.parameterlist = names
self.parameterdescs = descs
self.parametertypes = types
diff --git a/tools/lib/python/kdoc/kdoc_output.py b/tools/lib/python/kdoc/kdoc_output.py
index b1aaa7fc3604..4210b91dde5f 100644
--- a/tools/lib/python/kdoc/kdoc_output.py
+++ b/tools/lib/python/kdoc/kdoc_output.py
@@ -5,14 +5,16 @@
# pylint: disable=C0301,R0902,R0911,R0912,R0913,R0914,R0915,R0917
"""
-Implement output filters to print kernel-doc documentation.
+Classes to implement output filters to print kernel-doc documentation.
-The implementation uses a virtual base class (OutputFormat) which
+The implementation uses a virtual base class ``OutputFormat``. It
contains dispatches to virtual methods, and some code to filter
out output messages.
The actual implementation is done on one separate class per each type
-of output. Currently, there are output classes for ReST and man/troff.
+of output, e.g. ``RestFormat`` and ``ManFormat`` classes.
+
+Currently, there are output classes for ReST and man/troff.
"""
import os
@@ -54,16 +56,19 @@ class OutputFormat:
"""
# output mode.
- OUTPUT_ALL = 0 # output all symbols and doc sections
- OUTPUT_INCLUDE = 1 # output only specified symbols
- OUTPUT_EXPORTED = 2 # output exported symbols
- OUTPUT_INTERNAL = 3 # output non-exported symbols
+ OUTPUT_ALL = 0 #: Output all symbols and doc sections.
+ OUTPUT_INCLUDE = 1 #: Output only specified symbols.
+ OUTPUT_EXPORTED = 2 #: Output exported symbols.
+ OUTPUT_INTERNAL = 3 #: Output non-exported symbols.
- # Virtual member to be overridden at the inherited classes
+ #: Highlights to be used in ReST format.
highlights = []
+ #: Blank line character.
+ blankline = ""
+
def __init__(self):
- """Declare internal vars and set mode to OUTPUT_ALL"""
+ """Declare internal vars and set mode to ``OUTPUT_ALL``."""
self.out_mode = self.OUTPUT_ALL
self.enable_lineno = None
@@ -128,7 +133,7 @@ class OutputFormat:
self.config.warning(log_msg)
def check_doc(self, name, args):
- """Check if DOC should be output"""
+ """Check if DOC should be output."""
if self.no_doc_sections:
return False
@@ -177,7 +182,7 @@ class OutputFormat:
def msg(self, fname, name, args):
"""
- Handles a single entry from kernel-doc parser
+ Handles a single entry from kernel-doc parser.
"""
self.data = ""
@@ -199,6 +204,10 @@ class OutputFormat:
self.out_enum(fname, name, args)
return self.data
+ if dtype == "var":
+ self.out_var(fname, name, args)
+ return self.data
+
if dtype == "typedef":
self.out_typedef(fname, name, args)
return self.data
@@ -216,27 +225,31 @@ class OutputFormat:
# Virtual methods to be overridden by inherited classes
# At the base class, those do nothing.
def set_symbols(self, symbols):
- """Get a list of all symbols from kernel_doc"""
+ """Get a list of all symbols from kernel_doc."""
def out_doc(self, fname, name, args):
- """Outputs a DOC block"""
+ """Outputs a DOC block."""
def out_function(self, fname, name, args):
- """Outputs a function"""
+ """Outputs a function."""
def out_enum(self, fname, name, args):
- """Outputs an enum"""
+ """Outputs an enum."""
+
+ def out_var(self, fname, name, args):
+ """Outputs a variable."""
def out_typedef(self, fname, name, args):
- """Outputs a typedef"""
+ """Outputs a typedef."""
def out_struct(self, fname, name, args):
- """Outputs a struct"""
+ """Outputs a struct."""
class RestFormat(OutputFormat):
- """Consts and functions used by ReST output"""
+ """Consts and functions used by ReST output."""
+ #: Highlights to be used in ReST format
highlights = [
(type_constant, r"``\1``"),
(type_constant2, r"``\1``"),
@@ -256,9 +269,13 @@ class RestFormat(OutputFormat):
(type_fallback, r":c:type:`\1`"),
(type_param_ref, r"**\1\2**")
]
+
blankline = "\n"
+ #: Sphinx literal block regex.
sphinx_literal = KernRe(r'^[^.].*::$', cache=False)
+
+ #: Sphinx code block regex.
sphinx_cblock = KernRe(r'^\.\.\ +code-block::', cache=False)
def __init__(self):
@@ -273,7 +290,7 @@ class RestFormat(OutputFormat):
self.lineprefix = ""
def print_lineno(self, ln):
- """Outputs a line number"""
+ """Outputs a line number."""
if self.enable_lineno and ln is not None:
ln += 1
@@ -282,7 +299,7 @@ class RestFormat(OutputFormat):
def output_highlight(self, args):
"""
Outputs a C symbol that may require being converted to ReST using
- the self.highlights variable
+ the self.highlights variable.
"""
input_text = args
@@ -472,6 +489,25 @@ class RestFormat(OutputFormat):
self.lineprefix = oldprefix
self.out_section(args)
+ def out_var(self, fname, name, args):
+ oldprefix = self.lineprefix
+ ln = args.declaration_start_line
+ full_proto = args.other_stuff["full_proto"]
+
+ self.lineprefix = " "
+
+ self.data += f"\n\n.. c:macro:: {name}\n\n{self.lineprefix}``{full_proto}``\n\n"
+
+ self.print_lineno(ln)
+ self.output_highlight(args.get('purpose', ''))
+ self.data += "\n"
+
+ if args.other_stuff["default_val"]:
+ self.data += f'{self.lineprefix}**Initialization**\n\n'
+ self.output_highlight(f'default: ``{args.other_stuff["default_val"]}``')
+
+ self.out_section(args)
+
def out_typedef(self, fname, name, args):
oldprefix = self.lineprefix
@@ -544,7 +580,7 @@ class RestFormat(OutputFormat):
class ManFormat(OutputFormat):
- """Consts and functions used by man pages output"""
+ """Consts and functions used by man pages output."""
highlights = (
(type_constant, r"\1"),
@@ -561,6 +597,7 @@ class ManFormat(OutputFormat):
)
blankline = ""
+ #: Allowed timestamp formats.
date_formats = [
"%a %b %d %H:%M:%S %Z %Y",
"%a %b %d %H:%M:%S %Y",
@@ -627,7 +664,7 @@ class ManFormat(OutputFormat):
self.symbols = symbols
def out_tail(self, fname, name, args):
- """Adds a tail for all man pages"""
+ """Adds a tail for all man pages."""
# SEE ALSO section
self.data += f'.SH "SEE ALSO"' + "\n.PP\n"
@@ -663,7 +700,7 @@ class ManFormat(OutputFormat):
def output_highlight(self, block):
"""
Outputs a C symbol that may require being highlighted with
- self.highlights variable using troff syntax
+ self.highlights variable using troff syntax.
"""
contents = self.highlight_block(block)
@@ -694,7 +731,6 @@ class ManFormat(OutputFormat):
self.output_highlight(text)
def out_function(self, fname, name, args):
- """output function in man"""
out_name = self.arg_name(args, name)
@@ -773,6 +809,26 @@ class ManFormat(OutputFormat):
self.data += f'.SH "{section}"' + "\n"
self.output_highlight(text)
+ def out_var(self, fname, name, args):
+ out_name = self.arg_name(args, name)
+ full_proto = args.other_stuff["full_proto"]
+
+ self.data += f'.TH "{self.modulename}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"{name} \\- {args['purpose']}\n"
+
+ self.data += ".SH SYNOPSIS\n"
+ self.data += f"{full_proto}\n"
+
+ if args.other_stuff["default_val"]:
+ self.data += f'.SH "Initialization"' + "\n"
+ self.output_highlight(f'default: {args.other_stuff["default_val"]}')
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
+
def out_typedef(self, fname, name, args):
module = self.modulename
purpose = args.get('purpose')
diff --git a/tools/lib/python/kdoc/kdoc_parser.py b/tools/lib/python/kdoc/kdoc_parser.py
index 500aafc50032..fd57944ae907 100644
--- a/tools/lib/python/kdoc/kdoc_parser.py
+++ b/tools/lib/python/kdoc/kdoc_parser.py
@@ -5,11 +5,8 @@
# pylint: disable=C0301,C0302,R0904,R0912,R0913,R0914,R0915,R0917,R1702
"""
-kdoc_parser
-===========
-
-Read a C language source or header FILE and extract embedded
-documentation comments
+Classes and functions related to reading a C language source or header FILE
+and extract embedded documentation comments from it.
"""
import sys
@@ -53,7 +50,7 @@ doc_content = doc_com_body + KernRe(r'(.*)', cache=False)
doc_inline_start = KernRe(r'^\s*/\*\*\s*$', cache=False)
doc_inline_sect = KernRe(r'\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)', cache=False)
doc_inline_end = KernRe(r'^\s*\*/\s*$', cache=False)
-doc_inline_oneline = KernRe(r'^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$', cache=False)
+doc_inline_oneline = KernRe(r'^\s*/\*\*\s*(@\s*[\w][\w\.]*\s*):\s*(.*)\s*\*/\s*$', cache=False)
export_symbol = KernRe(r'^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*', cache=False)
export_symbol_ns = KernRe(r'^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+"\)\s*', cache=False)
@@ -64,7 +61,7 @@ type_param = KernRe(r"@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
# Tests for the beginning of a kerneldoc block in its various forms.
#
doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False)
-doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)", cache = False)
+doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef|var)\b\s*(\w*)", cache = False)
doc_begin_func = KernRe(str(doc_com) + # initial " * '
r"(?:\w+\s*\*\s*)?" + # type (not captured)
r'(?:define\s+)?' + # possible "define" (not captured)
@@ -195,25 +192,28 @@ function_xforms = [
]
#
-# Apply a set of transforms to a block of text.
+# Ancillary functions
#
+
def apply_transforms(xforms, text):
+ """
+ Apply a set of transforms to a block of text.
+ """
for search, subst in xforms:
text = search.sub(subst, text)
return text
-#
-# A little helper to get rid of excess white space
-#
multi_space = KernRe(r'\s\s+')
def trim_whitespace(s):
+ """
+ A little helper to get rid of excess white space.
+ """
return multi_space.sub(' ', s.strip())
-#
-# Remove struct/enum members that have been marked "private".
-#
def trim_private_members(text):
- #
+ """
+ Remove ``struct``/``enum`` members that have been marked "private".
+ """
# First look for a "public:" block that ends a private region, then
# handle the "private until the end" case.
#
@@ -226,20 +226,21 @@ def trim_private_members(text):
class state:
"""
- State machine enums
+ States used by the parser's state machine.
"""
# Parser states
- NORMAL = 0 # normal code
- NAME = 1 # looking for function name
- DECLARATION = 2 # We have seen a declaration which might not be done
- BODY = 3 # the body of the comment
- SPECIAL_SECTION = 4 # doc section ending with a blank line
- PROTO = 5 # scanning prototype
- DOCBLOCK = 6 # documentation block
- INLINE_NAME = 7 # gathering doc outside main block
- INLINE_TEXT = 8 # reading the body of inline docs
-
+ NORMAL = 0 #: Normal code.
+ NAME = 1 #: Looking for function name.
+ DECLARATION = 2 #: We have seen a declaration which might not be done.
+ BODY = 3 #: The body of the comment.
+ SPECIAL_SECTION = 4 #: Doc section ending with a blank line.
+ PROTO = 5 #: Scanning prototype.
+ DOCBLOCK = 6 #: Documentation block.
+ INLINE_NAME = 7 #: Gathering doc outside main block.
+ INLINE_TEXT = 8 #: Reading the body of inline docs.
+
+ #: Names for each parser state.
name = [
"NORMAL",
"NAME",
@@ -253,9 +254,12 @@ class state:
]
-SECTION_DEFAULT = "Description" # default section
+SECTION_DEFAULT = "Description" #: Default section.
class KernelEntry:
+ """
+ Encapsulates a Kernel documentation entry.
+ """
def __init__(self, config, fname, ln):
self.config = config
@@ -288,14 +292,16 @@ class KernelEntry:
# Management of section contents
#
def add_text(self, text):
+ """Add a new text to the entry contents list."""
self._contents.append(text)
def contents(self):
+ """Returns a string with all content texts that were added."""
return '\n'.join(self._contents) + '\n'
# TODO: rename to emit_message after removal of kernel-doc.pl
def emit_msg(self, ln, msg, *, warning=True):
- """Emit a message"""
+ """Emit a message."""
log_msg = f"{self.fname}:{ln} {msg}"
@@ -309,10 +315,10 @@ class KernelEntry:
self.warnings.append(log_msg)
return
- #
- # Begin a new section.
- #
def begin_section(self, line_no, title = SECTION_DEFAULT, dump = False):
+ """
+ Begin a new section.
+ """
if dump:
self.dump_section(start_new = True)
self.section = title
@@ -366,11 +372,13 @@ class KernelDoc:
documentation comments.
"""
- # Section names
-
+ #: Name of context section.
section_context = "Context"
+
+ #: Name of return section.
section_return = "Return"
+ #: String to write when a parameter is not described.
undescribed = "-- undescribed --"
def __init__(self, config, fname):
@@ -416,7 +424,7 @@ class KernelDoc:
def dump_section(self, start_new=True):
"""
- Dumps section contents to arrays/hashes intended for that purpose.
+ Dump section contents to arrays/hashes intended for that purpose.
"""
if self.entry:
@@ -425,9 +433,9 @@ class KernelDoc:
# TODO: rename it to store_declaration after removal of kernel-doc.pl
def output_declaration(self, dtype, name, **args):
"""
- Stores the entry into an entry array.
+ Store the entry into an entry array.
- The actual output and output filters will be handled elsewhere
+ The actual output and output filters will be handled elsewhere.
"""
item = KdocItem(name, self.fname, dtype,
@@ -448,18 +456,37 @@ class KernelDoc:
self.config.log.debug("Output: %s:%s = %s", dtype, name, pformat(args))
+ def emit_unused_warnings(self):
+ """
+ When the parser fails to produce a valid entry, it places some
+ warnings under `entry.warnings` that will be discarded when resetting
+ the state.
+
+ Ensure that those warnings are not lost.
+
+ .. note::
+
+ Because we are calling `config.warning()` here, those
+ warnings are not filtered by the `-W` parameters: they will all
+ be produced even when `-Wreturn`, `-Wshort-desc`, and/or
+ `-Wcontents-before-sections` are used.
+
+ Allowing those warnings to be filtered is complex, because it
+ would require storing them in a buffer and then filtering them
+ during the output step of the code, depending on the
+ selected symbols.
+ """
+ if self.entry and self.entry not in self.entries:
+ for log_msg in self.entry.warnings:
+ self.config.warning(log_msg)
+
def reset_state(self, ln):
"""
Ancillary routine to create a new entry. It initializes all
variables used by the state machine.
"""
- #
- # Flush the warnings out before we proceed further
- #
- if self.entry and self.entry not in self.entries:
- for log_msg in self.entry.warnings:
- self.config.log.warning(log_msg)
+ self.emit_unused_warnings()
self.entry = KernelEntry(self.config, self.fname, ln)
@@ -663,10 +690,12 @@ class KernelDoc:
self.emit_msg(ln,
f"No description found for return value of '{declaration_name}'")
- #
- # Split apart a structure prototype; returns (struct|union, name, members) or None
- #
def split_struct_proto(self, proto):
+ """
+ Split apart a structure prototype; returns (struct|union, name,
+ members) or ``None``.
+ """
+
type_pattern = r'(struct|union)'
qualifiers = [
"__attribute__",
@@ -685,21 +714,26 @@ class KernelDoc:
if r.search(proto):
return (r.group(1), r.group(3), r.group(2))
return None
- #
- # Rewrite the members of a structure or union for easier formatting later on.
- # Among other things, this function will turn a member like:
- #
- # struct { inner_members; } foo;
- #
- # into:
- #
- # struct foo; inner_members;
- #
+
def rewrite_struct_members(self, members):
+ """
+ Process ``struct``/``union`` members from the most deeply nested
+ outward.
+
+ Rewrite the members of a ``struct`` or ``union`` for easier formatting
+ later on. Among other things, this function will turn a member like::
+
+ struct { inner_members; } foo;
+
+ into::
+
+ struct foo; inner_members;
+ """
+
#
- # Process struct/union members from the most deeply nested outward. The
- # trick is in the ^{ below - it prevents a match of an outer struct/union
- # until the inner one has been munged (removing the "{" in the process).
+ # The trick is in the ``^{`` below - it prevents a match of an outer
+ # ``struct``/``union`` until the inner one has been munged
+ # (removing the ``{`` in the process).
#
struct_members = KernRe(r'(struct|union)' # 0: declaration type
r'([^\{\};]+)' # 1: possible name
@@ -777,11 +811,12 @@ class KernelDoc:
tuples = struct_members.findall(members)
return members
- #
- # Format the struct declaration into a standard form for inclusion in the
- # resulting docs.
- #
def format_struct_decl(self, declaration):
+ """
+ Format the ``struct`` declaration into a standard form for inclusion
+ in the resulting docs.
+ """
+
#
# Insert newlines, get rid of extra spaces.
#
@@ -815,7 +850,7 @@ class KernelDoc:
def dump_struct(self, ln, proto):
"""
- Store an entry for a struct or union
+ Store an entry for a ``struct`` or ``union``
"""
#
# Do the basic parse to get the pieces of the declaration.
@@ -857,7 +892,7 @@ class KernelDoc:
def dump_enum(self, ln, proto):
"""
- Stores an enum inside self.entries array.
+ Store an ``enum`` inside self.entries array.
"""
#
# Strip preprocessor directives. Note that this depends on the
@@ -927,9 +962,84 @@ class KernelDoc:
self.output_declaration('enum', declaration_name,
purpose=self.entry.declaration_purpose)
+ def dump_var(self, ln, proto):
+ """
+ Store variables that are part of kAPI.
+ """
+ VAR_ATTRIBS = [
+ "extern",
+ ]
+ OPTIONAL_VAR_ATTR = "^(?:" + "|".join(VAR_ATTRIBS) + ")?"
+
+ sub_prefixes = [
+ (KernRe(r"__read_mostly"), ""),
+ (KernRe(r"__ro_after_init"), ""),
+ (KernRe(r"(?://.*)$"), ""),
+ (KernRe(r"(?:/\*.*\*/)"), ""),
+ (KernRe(r";$"), ""),
+ (KernRe(r"=.*"), ""),
+ ]
+
+ #
+ # Store the full prototype before modifying it
+ #
+ full_proto = proto
+ declaration_name = None
+
+ #
+ # Handle macro definitions
+ #
+ macro_prefixes = [
+ KernRe(r"DEFINE_[\w_]+\s*\(([\w_]+)\)"),
+ ]
+
+ for r in macro_prefixes:
+ match = r.search(proto)
+ if match:
+ declaration_name = match.group(1)
+ break
+
+ #
+ # Drop comments and macros to have a pure C prototype
+ #
+ if not declaration_name:
+ for r, sub in sub_prefixes:
+ proto = r.sub(sub, proto)
+
+ proto = proto.rstrip()
+
+ #
+ # Variable name is at the end of the declaration
+ #
+
+ default_val = None
+
+ r= KernRe(OPTIONAL_VAR_ATTR + r"\w.*\s+(?:\*+)?([\w_]+)\s*[\d\]\[]*\s*(=.*)?")
+ if r.match(proto):
+ if not declaration_name:
+ declaration_name = r.group(1)
+
+ default_val = r.group(2)
+ else:
+ r= KernRe(OPTIONAL_VAR_ATTR + r"(?:\w.*)?\s+(?:\*+)?(?:[\w_]+)\s*[\d\]\[]*\s*(=.*)?")
+ if r.match(proto):
+ default_val = r.group(1)
+
+ if not declaration_name:
+ self.emit_msg(ln,f"{proto}: can't parse variable")
+ return
+
+ if default_val:
+ default_val = default_val.lstrip("=").strip()
+
+ self.output_declaration("var", declaration_name,
+ full_proto=full_proto,
+ default_val=default_val,
+ purpose=self.entry.declaration_purpose)
+
def dump_declaration(self, ln, prototype):
"""
- Stores a data declaration inside self.entries array.
+ Store a data declaration inside self.entries array.
"""
if self.entry.decl_type == "enum":
@@ -938,13 +1048,15 @@ class KernelDoc:
self.dump_typedef(ln, prototype)
elif self.entry.decl_type in ["union", "struct"]:
self.dump_struct(ln, prototype)
+ elif self.entry.decl_type == "var":
+ self.dump_var(ln, prototype)
else:
# This would be a bug
self.emit_message(ln, f'Unknown declaration type: {self.entry.decl_type}')
def dump_function(self, ln, prototype):
"""
- Stores a function or function macro inside self.entries array.
+ Store a function or function macro inside self.entries array.
"""
found = func_macro = False
@@ -1045,7 +1157,7 @@ class KernelDoc:
def dump_typedef(self, ln, proto):
"""
- Stores a typedef inside self.entries array.
+ Store a ``typedef`` inside self.entries array.
"""
#
# We start by looking for function typedefs.
@@ -1099,7 +1211,7 @@ class KernelDoc:
@staticmethod
def process_export(function_set, line):
"""
- process EXPORT_SYMBOL* tags
+ process ``EXPORT_SYMBOL*`` tags
This method doesn't use any variable from the class, so declare it
with a staticmethod decorator.
@@ -1130,7 +1242,7 @@ class KernelDoc:
def process_normal(self, ln, line):
"""
- STATE_NORMAL: looking for the /** to begin everything.
+ STATE_NORMAL: looking for the ``/**`` to begin everything.
"""
if not doc_start.match(line):
@@ -1220,10 +1332,10 @@ class KernelDoc:
else:
self.emit_msg(ln, f"Cannot find identifier on line:\n{line}")
- #
- # Helper function to determine if a new section is being started.
- #
def is_new_section(self, ln, line):
+ """
+ Helper function to determine if a new section is being started.
+ """
if doc_sect.search(line):
self.state = state.BODY
#
@@ -1255,10 +1367,10 @@ class KernelDoc:
return True
return False
- #
- # Helper function to detect (and effect) the end of a kerneldoc comment.
- #
def is_comment_end(self, ln, line):
+ """
+ Helper function to detect (and effect) the end of a kerneldoc comment.
+ """
if doc_end.search(line):
self.dump_section()
@@ -1277,7 +1389,7 @@ class KernelDoc:
def process_decl(self, ln, line):
"""
- STATE_DECLARATION: We've seen the beginning of a declaration
+ STATE_DECLARATION: We've seen the beginning of a declaration.
"""
if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
return
@@ -1306,7 +1418,7 @@ class KernelDoc:
def process_special(self, ln, line):
"""
- STATE_SPECIAL_SECTION: a section ending with a blank line
+ STATE_SPECIAL_SECTION: a section ending with a blank line.
"""
#
# If we have hit a blank line (only the " * " marker), then this
@@ -1396,7 +1508,7 @@ class KernelDoc:
def syscall_munge(self, ln, proto): # pylint: disable=W0613
"""
- Handle syscall definitions
+ Handle syscall definitions.
"""
is_void = False
@@ -1435,7 +1547,7 @@ class KernelDoc:
def tracepoint_munge(self, ln, proto):
"""
- Handle tracepoint definitions
+ Handle tracepoint definitions.
"""
tracepointname = None
@@ -1471,7 +1583,7 @@ class KernelDoc:
return proto
def process_proto_function(self, ln, line):
- """Ancillary routine to process a function prototype"""
+ """Ancillary routine to process a function prototype."""
# strip C99-style comments to end of line
line = KernRe(r"//.*$", re.S).sub('', line)
@@ -1516,7 +1628,9 @@ class KernelDoc:
self.reset_state(ln)
def process_proto_type(self, ln, line):
- """Ancillary routine to process a type"""
+ """
+ Ancillary routine to process a type.
+ """
# Strip C99-style comments and surrounding whitespace
line = KernRe(r"//.*$", re.S).sub('', line).strip()
@@ -1570,7 +1684,7 @@ class KernelDoc:
self.process_proto_type(ln, line)
def process_docblock(self, ln, line):
- """STATE_DOCBLOCK: within a DOC: block."""
+ """STATE_DOCBLOCK: within a ``DOC:`` block."""
if doc_end.search(line):
self.dump_section()
@@ -1582,7 +1696,7 @@ class KernelDoc:
def parse_export(self):
"""
- Parses EXPORT_SYMBOL* macros from a single Kernel source file.
+ Parses ``EXPORT_SYMBOL*`` macros from a single Kernel source file.
"""
export_table = set()
@@ -1599,10 +1713,7 @@ class KernelDoc:
return export_table
- #
- # The state/action table telling us which function to invoke in
- # each state.
- #
+ #: The state/action table telling us which function to invoke in each state.
state_actions = {
state.NORMAL: process_normal,
state.NAME: process_name,
@@ -1664,6 +1775,8 @@ class KernelDoc:
# Hand this line to the appropriate state handler
self.state_actions[self.state](self, ln, line)
+ self.emit_unused_warnings()
+
except OSError:
self.config.log.error(f"Error: Cannot open file {self.fname}")
diff --git a/tools/lib/python/kdoc/kdoc_re.py b/tools/lib/python/kdoc/kdoc_re.py
index 2dfa1bf83d64..0bf9e01cdc57 100644
--- a/tools/lib/python/kdoc/kdoc_re.py
+++ b/tools/lib/python/kdoc/kdoc_re.py
@@ -51,6 +51,9 @@ class KernRe:
"""
return self.regex.pattern
+ def __repr__(self):
+ return f're.compile("{self.regex.pattern}")'
+
def __add__(self, other):
"""
Allows adding two regular expressions into one.
@@ -61,7 +64,7 @@ class KernRe:
def match(self, string):
"""
- Handles a re.match storing its results
+ Handles a re.match storing its results.
"""
self.last_match = self.regex.match(string)
@@ -69,7 +72,7 @@ class KernRe:
def search(self, string):
"""
- Handles a re.search storing its results
+ Handles a re.search storing its results.
"""
self.last_match = self.regex.search(string)
@@ -77,28 +80,28 @@ class KernRe:
def findall(self, string):
"""
- Alias to re.findall
+ Alias to re.findall.
"""
return self.regex.findall(string)
def split(self, string):
"""
- Alias to re.split
+ Alias to re.split.
"""
return self.regex.split(string)
def sub(self, sub, string, count=0):
"""
- Alias to re.sub
+ Alias to re.sub.
"""
return self.regex.sub(sub, string, count=count)
def group(self, num):
"""
- Returns the group results of the last match
+ Returns the group results of the last match.
"""
return self.last_match.group(num)
@@ -110,7 +113,7 @@ class NestedMatch:
even harder on Python with its normal re module, as there are several
advanced regular expressions that are missing.
- This is the case of this pattern:
+ This is the case of this pattern::
'\\bSTRUCT_GROUP(\\(((?:(?>[^)(]+)|(?1))*)\\))[^;]*;'
@@ -121,6 +124,7 @@ class NestedMatch:
replace nested expressions.
The original approach was suggested by:
+
https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
Although I re-implemented it to make it more generic and match 3 types
@@ -224,14 +228,18 @@ class NestedMatch:
yield line[t[0]:t[2]]
def sub(self, regex, sub, line, count=0):
- """
+ r"""
This is similar to re.sub:
It matches a regex that it is followed by a delimiter,
replacing occurrences only if all delimiters are paired.
- if r'\1' is used, it works just like re: it places there the
- matched paired data with the delimiter stripped.
+ if the sub argument contains::
+
+ r'\1'
+
+ it will work just like re: it places there the matched paired data
+ with the delimiter stripped.
If count is different than zero, it will replace at most count
items.
diff --git a/tools/lib/python/kdoc/latex_fonts.py b/tools/lib/python/kdoc/latex_fonts.py
index 29317f8006ea..1d04cbda169f 100755
--- a/tools/lib/python/kdoc/latex_fonts.py
+++ b/tools/lib/python/kdoc/latex_fonts.py
@@ -5,12 +5,13 @@
# Ported to Python by (c) Mauro Carvalho Chehab, 2025
"""
-Detect problematic Noto CJK variable fonts.
+Detect problematic Noto CJK variable fonts
+==========================================
-For "make pdfdocs", reports of build errors of translations.pdf started
-arriving early 2024 [1, 2]. It turned out that Fedora and openSUSE
-tumbleweed have started deploying variable-font [3] format of "Noto CJK"
-fonts [4, 5]. For PDF, a LaTeX package named xeCJK is used for CJK
+For ``make pdfdocs``, reports of build errors of translations.pdf started
+arriving early 2024 [1]_ [2]_. It turned out that Fedora and openSUSE
+tumbleweed have started deploying variable-font [3]_ format of "Noto CJK"
+fonts [4]_ [5]_. For PDF, a LaTeX package named xeCJK is used for CJK
(Chinese, Japanese, Korean) pages. xeCJK requires XeLaTeX/XeTeX, which
does not (and likely never will) understand variable fonts for historical
reasons.
@@ -25,68 +26,77 @@ This script is invoked from the error path of "make pdfdocs" and emits
suggestions if variable-font files of "Noto CJK" fonts are in the list of
fonts accessible from XeTeX.
-References:
-[1]: https://lore.kernel.org/r/8734tqsrt7.fsf@meer.lwn.net/
-[2]: https://lore.kernel.org/r/1708585803.600323099@f111.i.mail.ru/
-[3]: https://en.wikipedia.org/wiki/Variable_font
-[4]: https://fedoraproject.org/wiki/Changes/Noto_CJK_Variable_Fonts
-[5]: https://build.opensuse.org/request/show/1157217
+.. [1] https://lore.kernel.org/r/8734tqsrt7.fsf@meer.lwn.net/
+.. [2] https://lore.kernel.org/r/1708585803.600323099@f111.i.mail.ru/
+.. [3] https://en.wikipedia.org/wiki/Variable_font
+.. [4] https://fedoraproject.org/wiki/Changes/Noto_CJK_Variable_Fonts
+.. [5] https://build.opensuse.org/request/show/1157217
-#===========================================================================
Workarounds for building translations.pdf
-#===========================================================================
+-----------------------------------------
* Denylist "variable font" Noto CJK fonts.
+
- Create $HOME/deny-vf/fontconfig/fonts.conf from template below, with
tweaks if necessary. Remove leading "".
+
- Path of fontconfig/fonts.conf can be overridden by setting an env
variable FONTS_CONF_DENY_VF.
- * Template:
------------------------------------------------------------------
-<?xml version="1.0"?>
-<!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
-<fontconfig>
-<!--
- Ignore variable-font glob (not to break xetex)
--->
- <selectfont>
- <rejectfont>
- <!--
- for Fedora
- -->
- <glob>/usr/share/fonts/google-noto-*-cjk-vf-fonts</glob>
- <!--
- for openSUSE tumbleweed
- -->
- <glob>/usr/share/fonts/truetype/Noto*CJK*-VF.otf</glob>
- </rejectfont>
- </selectfont>
-</fontconfig>
------------------------------------------------------------------
+ * Template::
+
+ <?xml version="1.0"?>
+ <!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
+ <fontconfig>
+ <!--
+ Ignore variable-font glob (not to break xetex)
+ -->
+ <selectfont>
+ <rejectfont>
+ <!--
+ for Fedora
+ -->
+ <glob>/usr/share/fonts/google-noto-*-cjk-vf-fonts</glob>
+ <!--
+ for openSUSE tumbleweed
+ -->
+ <glob>/usr/share/fonts/truetype/Noto*CJK*-VF.otf</glob>
+ </rejectfont>
+ </selectfont>
+ </fontconfig>
The denylisting is activated for "make pdfdocs".
* For skipping CJK pages in PDF
+
- Uninstall texlive-xecjk.
Denylisting is not needed in this case.
* For printing CJK pages in PDF
+
- Need non-variable "Noto CJK" fonts.
+
* Fedora
+
- google-noto-sans-cjk-fonts
- google-noto-serif-cjk-fonts
+
* openSUSE tumbleweed
+
- Non-variable "Noto CJK" fonts are not available as distro packages
as of April, 2024. Fetch a set of font files from upstream Noto
CJK Font released at:
+
https://github.com/notofonts/noto-cjk/tree/main/Sans#super-otc
+
and at:
+
https://github.com/notofonts/noto-cjk/tree/main/Serif#super-otc
- , then uncompress and deploy them.
+
+ then uncompress and deploy them.
- Remember to update fontconfig cache by running fc-cache.
-!!! Caution !!!
+.. caution::
Uninstalling "variable font" packages can be dangerous.
They might be depended upon by other packages important for your work.
Denylisting should be less invasive, as it is effective only while
@@ -115,10 +125,15 @@ class LatexFontChecker:
self.re_cjk = re.compile(r"([^:]+):\s*Noto\s+(Sans|Sans Mono|Serif) CJK")
def description(self):
+ """
+ Returns module description.
+ """
return __doc__
def get_noto_cjk_vf_fonts(self):
- """Get Noto CJK fonts"""
+ """
+ Get Noto CJK fonts.
+ """
cjk_fonts = set()
cmd = ["fc-list", ":", "file", "family", "variable"]
@@ -143,7 +158,9 @@ class LatexFontChecker:
return sorted(cjk_fonts)
def check(self):
- """Check for problems with CJK fonts"""
+ """
+ Check for problems with CJK fonts.
+ """
fonts = textwrap.indent("\n".join(self.get_noto_cjk_vf_fonts()), " ")
if not fonts:
diff --git a/tools/lib/python/kdoc/parse_data_structs.py b/tools/lib/python/kdoc/parse_data_structs.py
index 25361996cd20..9941cd19032e 100755
--- a/tools/lib/python/kdoc/parse_data_structs.py
+++ b/tools/lib/python/kdoc/parse_data_structs.py
@@ -9,12 +9,12 @@ Parse a source file or header, creating ReStructured Text cross references.
It accepts an optional file to change the default symbol reference or to
suppress symbols from the output.
-It is capable of identifying defines, functions, structs, typedefs,
-enums and enum symbols and create cross-references for all of them.
+It is capable of identifying ``define``, function, ``struct``, ``typedef``,
+``enum`` and ``enum`` symbols and create cross-references for all of them.
It is also capable of distinguish #define used for specifying a Linux
ioctl.
-The optional rules file contains a set of rules like:
+The optional rules file contains a set of rules like::
ignore ioctl VIDIOC_ENUM_FMT
replace ioctl VIDIOC_DQBUF vidioc_qbuf
@@ -34,8 +34,8 @@ class ParseDataStructs:
It is meant to allow having a more comprehensive documentation, where
uAPI headers will create cross-reference links to the code.
- It is capable of identifying defines, functions, structs, typedefs,
- enums and enum symbols and create cross-references for all of them.
+ It is capable of identifying ``define``, function, ``struct``, ``typedef``,
+ ``enum`` and ``enum`` symbols and create cross-references for all of them.
It is also capable of distinguish #define used for specifying a Linux
ioctl.
@@ -43,13 +43,13 @@ class ParseDataStructs:
allows parsing an exception file. Such file contains a set of rules
using the syntax below:
- 1. Ignore rules:
+ 1. Ignore rules::
ignore <type> <symbol>`
Removes the symbol from reference generation.
- 2. Replace rules:
+ 2. Replace rules::
replace <type> <old_symbol> <new_reference>
@@ -58,22 +58,22 @@ class ParseDataStructs:
- A simple symbol name;
- A full Sphinx reference.
- 3. Namespace rules
+ 3. Namespace rules::
namespace <namespace>
Sets C namespace to be used during cross-reference generation. Can
be overridden by replace rules.
- On ignore and replace rules, <type> can be:
- - ioctl: for defines that end with _IO*, e.g. ioctl definitions
- - define: for other defines
- - symbol: for symbols defined within enums;
- - typedef: for typedefs;
- - enum: for the name of a non-anonymous enum;
- - struct: for structs.
+ On ignore and replace rules, ``<type>`` can be:
+ - ``ioctl``: for defines that end with ``_IO*``, e.g. ioctl definitions
+ - ``define``: for other defines
+ - ``symbol``: for symbols defined within enums;
+ - ``typedef``: for typedefs;
+ - ``enum``: for the name of a non-anonymous enum;
+ - ``struct``: for structs.
- Examples:
+ Examples::
ignore define __LINUX_MEDIA_H
ignore ioctl VIDIOC_ENUM_FMT
@@ -83,13 +83,15 @@ class ParseDataStructs:
namespace MC
"""
- # Parser regexes with multiple ways to capture enums and structs
+ #: Parser regex with multiple ways to capture enums.
RE_ENUMS = [
re.compile(r"^\s*enum\s+([\w_]+)\s*\{"),
re.compile(r"^\s*enum\s+([\w_]+)\s*$"),
re.compile(r"^\s*typedef\s*enum\s+([\w_]+)\s*\{"),
re.compile(r"^\s*typedef\s*enum\s+([\w_]+)\s*$"),
]
+
+ #: Parser regex with multiple ways to capture structs.
RE_STRUCTS = [
re.compile(r"^\s*struct\s+([_\w][\w\d_]+)\s*\{"),
re.compile(r"^\s*struct\s+([_\w][\w\d_]+)$"),
@@ -97,11 +99,13 @@ class ParseDataStructs:
re.compile(r"^\s*typedef\s*struct\s+([_\w][\w\d_]+)$"),
]
- # FIXME: the original code was written a long time before Sphinx C
+ # NOTE: the original code was written a long time before Sphinx C
# domain to have multiple namespaces. To avoid to much turn at the
# existing hyperlinks, the code kept using "c:type" instead of the
# right types. To change that, we need to change the types not only
# here, but also at the uAPI media documentation.
+
+ #: Dictionary containing C type identifiers to be transformed.
DEF_SYMBOL_TYPES = {
"ioctl": {
"prefix": "\\ ",
@@ -158,6 +162,10 @@ class ParseDataStructs:
self.symbols[symbol_type] = {}
def read_exceptions(self, fname: str):
+ """
+ Read an optional exceptions file, used to override defaults.
+ """
+
if not fname:
return
@@ -242,9 +250,9 @@ class ParseDataStructs:
def store_type(self, ln, symbol_type: str, symbol: str,
ref_name: str = None, replace_underscores: bool = True):
"""
- Stores a new symbol at self.symbols under symbol_type.
+ Store a new symbol at self.symbols under symbol_type.
- By default, underscores are replaced by "-"
+ By default, underscores are replaced by ``-``.
"""
defs = self.DEF_SYMBOL_TYPES[symbol_type]
@@ -276,12 +284,16 @@ class ParseDataStructs:
self.symbols[symbol_type][symbol] = (f"{prefix}{ref_link}{suffix}", ln)
def store_line(self, line):
- """Stores a line at self.data, properly indented"""
+ """
+ Store a line at self.data, properly indented.
+ """
line = " " + line.expandtabs()
self.data += line.rstrip(" ")
def parse_file(self, file_in: str, exceptions: str = None):
- """Reads a C source file and get identifiers"""
+ """
+ Read a C source file and get identifiers.
+ """
self.data = ""
is_enum = False
is_comment = False
@@ -433,7 +445,7 @@ class ParseDataStructs:
def gen_toc(self):
"""
- Create a list of symbols to be part of a TOC contents table
+ Create a list of symbols to be part of a TOC contents table.
"""
text = []
@@ -464,6 +476,10 @@ class ParseDataStructs:
return "\n".join(text)
def write_output(self, file_in: str, file_out: str, toc: bool):
+ """
+ Write a ReST output file.
+ """
+
title = os.path.basename(file_in)
if toc:
diff --git a/tools/lib/python/kdoc/python_version.py b/tools/lib/python/kdoc/python_version.py
index e83088013db2..4ddb7ead5f56 100644
--- a/tools/lib/python/kdoc/python_version.py
+++ b/tools/lib/python/kdoc/python_version.py
@@ -33,21 +33,31 @@ class PythonVersion:
"""
def __init__(self, version):
- """Ïnitialize self.version tuple from a version string"""
+ """
+ Ïnitialize self.version tuple from a version string.
+ """
self.version = self.parse_version(version)
@staticmethod
def parse_version(version):
- """Convert a major.minor.patch version into a tuple"""
+ """
+ Convert a major.minor.patch version into a tuple.
+ """
return tuple(int(x) for x in version.split("."))
@staticmethod
def ver_str(version):
- """Returns a version tuple as major.minor.patch"""
+ """
+ Returns a version tuple as major.minor.patch.
+ """
return ".".join([str(x) for x in version])
@staticmethod
def cmd_print(cmd, max_len=80):
+ """
+ Outputs a command line, repecting maximum width.
+ """
+
cmd_line = []
for w in cmd:
@@ -66,7 +76,9 @@ class PythonVersion:
return "\n ".join(cmd_line)
def __str__(self):
- """Returns a version tuple as major.minor.patch from self.version"""
+ """
+ Return a version tuple as major.minor.patch from self.version.
+ """
return self.ver_str(self.version)
@staticmethod